From fcb2ae14ac90ec8c7bc903fa9693966b23e10929 Mon Sep 17 00:00:00 2001 From: acalhounRH Date: Mon, 4 Jun 2018 09:40:00 -0400 Subject: [PATCH 01/74] updated monitoring to include pbench capability, also included a call in librbdfio to start pbench data monitoring/collection --- benchmark/librbdfio.py | 2 ++ monitoring.py | 13 +++++++++++++ 2 files changed, 15 insertions(+) diff --git a/benchmark/librbdfio.py b/benchmark/librbdfio.py index 7ae3a24e..a50325bf 100644 --- a/benchmark/librbdfio.py +++ b/benchmark/librbdfio.py @@ -103,6 +103,7 @@ def run(self): self.cluster.dump_config(self.run_dir) monitoring.start(self.run_dir) + monitoring.start_pbench(self.out_dir) time.sleep(5) @@ -123,6 +124,7 @@ def run(self): if 'recovery_test' in self.cluster.config: self.cluster.wait_recovery_done() + monitoring.stop_pbench(self.out_dir) monitoring.stop(self.run_dir) # Finally, get the historic ops diff --git a/monitoring.py b/monitoring.py index f3dd99c4..24c4776a 100644 --- a/monitoring.py +++ b/monitoring.py @@ -24,6 +24,19 @@ def start(directory): # % (blktrace_dir, device, device)) +def start_pbench(directory): + logger.info('Executing Pbench-start-tools') + start_cmd = "/opt/pbench-agent/util-scripts/pbench-start-tools -g default -d %s" % directory + os.system(start_cmd) + +def stop_pbench(directory): + logger.info('Executing Pbench-stop-tools') + stop_cmd = "/opt/pbench-agent/util-scripts/pbench-stop-tools -g default -d %s" % directory + os.system(stop_cmd) + logger.info('Executing Pbench-postprocess-tools') + postprocess_cmd = "/opt/pbench-agent/util-scripts/pbench-postprocess-tools -g default -d %s" % directory + os.system(postprocess_cmd) + def stop(directory=None): nodes = settings.getnodes('clients', 'osds', 'mons', 'rgws') From 68c92fec8b3408bcb383dab4c4b1625c56142b58 Mon Sep 17 00:00:00 2001 From: acalhounRH Date: Mon, 4 Jun 2018 20:14:09 -0400 Subject: [PATCH 02/74] fixed monitoring.py, included logging --- monitoring.py | 1 + 1 file changed, 1 insertion(+) diff --git a/monitoring.py b/monitoring.py index 24c4776a..4a678b0c 100644 --- a/monitoring.py +++ b/monitoring.py @@ -1,5 +1,6 @@ import common import settings +import logging def start(directory): From 72c262614c3d709da67b3c965d489192dc8c3333 Mon Sep 17 00:00:00 2001 From: acalhounRH Date: Mon, 4 Jun 2018 20:45:47 -0400 Subject: [PATCH 03/74] delcared logger --- monitoring.py | 1 + 1 file changed, 1 insertion(+) diff --git a/monitoring.py b/monitoring.py index 4a678b0c..6d0c20b2 100644 --- a/monitoring.py +++ b/monitoring.py @@ -2,6 +2,7 @@ import settings import logging +logger = logging.getLogger("cbt") def start(directory): nodes = settings.getnodes('clients', 'osds', 'mons', 'rgws') From 5187b81f581d708be0c74b6079403d7063dccbe6 Mon Sep 17 00:00:00 2001 From: acalhounRH Date: Mon, 4 Jun 2018 21:02:40 -0400 Subject: [PATCH 04/74] included os --- monitoring.py | 1 + 1 file changed, 1 insertion(+) diff --git a/monitoring.py b/monitoring.py index 6d0c20b2..1efd06eb 100644 --- a/monitoring.py +++ b/monitoring.py @@ -1,6 +1,7 @@ import common import settings import logging +import os logger = logging.getLogger("cbt") From 25bf9041f622152bcec3da34492edf25c4f5d996 Mon Sep 17 00:00:00 2001 From: root Date: Mon, 27 Aug 2018 17:38:52 +0000 Subject: [PATCH 05/74] Updated to call the starting and stopping of pbench collection tools --- benchmark/radosbench.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/benchmark/radosbench.py b/benchmark/radosbench.py index 15128771..532b1cd5 100644 --- a/benchmark/radosbench.py +++ b/benchmark/radosbench.py @@ -121,6 +121,7 @@ def _run(self, mode, run_dir, out_dir): # Run rados bench monitoring.start(run_dir) + monitoring.start_pbench(self.out_dir) logger.info('Running radosbench %s test.' % mode) ps = [] for i in xrange(self.concurrent_procs): @@ -139,6 +140,7 @@ def _run(self, mode, run_dir, out_dir): ps.append(p) for p in ps: p.wait() + monitoring.stop_pbench(self.out_dir) monitoring.stop(run_dir) # If we were doing recovery, wait until it's done. From 7f5136e13dfb07133123f6ee7b6317b5065e98b2 Mon Sep 17 00:00:00 2001 From: root Date: Tue, 28 Aug 2018 01:43:08 +0000 Subject: [PATCH 06/74] changed pbench dir to benchmark dir + mode --- benchmark/radosbench.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/benchmark/radosbench.py b/benchmark/radosbench.py index 532b1cd5..bb946c69 100644 --- a/benchmark/radosbench.py +++ b/benchmark/radosbench.py @@ -121,7 +121,7 @@ def _run(self, mode, run_dir, out_dir): # Run rados bench monitoring.start(run_dir) - monitoring.start_pbench(self.out_dir) + monitoring.start_pbench(%s/%s % (self.out_dir, mode)) logger.info('Running radosbench %s test.' % mode) ps = [] for i in xrange(self.concurrent_procs): @@ -140,7 +140,7 @@ def _run(self, mode, run_dir, out_dir): ps.append(p) for p in ps: p.wait() - monitoring.stop_pbench(self.out_dir) + monitoring.stop_pbench(%s/%s % (self.out_dir, mode)) monitoring.stop(run_dir) # If we were doing recovery, wait until it's done. From 5850e9c8723caa4d558d75846c2e5945eb738f59 Mon Sep 17 00:00:00 2001 From: acalhoun Date: Tue, 28 Aug 2018 02:12:23 +0000 Subject: [PATCH 07/74] changed pbench dir to benchmark dir + mode --- benchmark/radosbench.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/benchmark/radosbench.py b/benchmark/radosbench.py index bb946c69..e849ded7 100644 --- a/benchmark/radosbench.py +++ b/benchmark/radosbench.py @@ -121,7 +121,7 @@ def _run(self, mode, run_dir, out_dir): # Run rados bench monitoring.start(run_dir) - monitoring.start_pbench(%s/%s % (self.out_dir, mode)) + monitoring.start_pbench("%s/%s" % (self.out_dir, mode)) logger.info('Running radosbench %s test.' % mode) ps = [] for i in xrange(self.concurrent_procs): @@ -140,7 +140,7 @@ def _run(self, mode, run_dir, out_dir): ps.append(p) for p in ps: p.wait() - monitoring.stop_pbench(%s/%s % (self.out_dir, mode)) + monitoring.stop_pbench("%s/%s" % (self.out_dir, mode)) monitoring.stop(run_dir) # If we were doing recovery, wait until it's done. From 2c1453b097c1b0ded43bb1d571b97e98ffcb1064 Mon Sep 17 00:00:00 2001 From: acalhounRH Date: Tue, 18 Sep 2018 16:22:52 -0400 Subject: [PATCH 08/74] add pbench start/stop call for idle monitoring --- benchmark/librbdfio.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/benchmark/librbdfio.py b/benchmark/librbdfio.py index a50325bf..09ca4fa8 100644 --- a/benchmark/librbdfio.py +++ b/benchmark/librbdfio.py @@ -73,9 +73,10 @@ def initialize(self): logger.info('Pausing for 60s for idle monitoring.') monitoring.start("%s/idle_monitoring" % self.run_dir) + monitoring.start_pbench("%s/idle" % self.out_dir) time.sleep(60) monitoring.stop() - + monitoring.stop_pbench("%s/idle" % self.out_dir) common.sync_files('%s/*' % self.run_dir, self.out_dir) self.mkimages() From bb8d37901fd9f97040c023fd92e64eb6e1f3ac80 Mon Sep 17 00:00:00 2001 From: acalhounRH Date: Tue, 18 Sep 2018 16:23:42 -0400 Subject: [PATCH 09/74] Corrected tabing of pbench calls --- benchmark/radosbench.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/benchmark/radosbench.py b/benchmark/radosbench.py index e849ded7..95d445a5 100644 --- a/benchmark/radosbench.py +++ b/benchmark/radosbench.py @@ -121,7 +121,7 @@ def _run(self, mode, run_dir, out_dir): # Run rados bench monitoring.start(run_dir) - monitoring.start_pbench("%s/%s" % (self.out_dir, mode)) + monitoring.start_pbench("%s/%s" % (self.out_dir, mode)) logger.info('Running radosbench %s test.' % mode) ps = [] for i in xrange(self.concurrent_procs): @@ -140,7 +140,7 @@ def _run(self, mode, run_dir, out_dir): ps.append(p) for p in ps: p.wait() - monitoring.stop_pbench("%s/%s" % (self.out_dir, mode)) + monitoring.stop_pbench("%s/%s" % (self.out_dir, mode)) monitoring.stop(run_dir) # If we were doing recovery, wait until it's done. From f3a19b1d23cc365417c4b214b0ca19e27c69f50a Mon Sep 17 00:00:00 2001 From: Ben England Date: Fri, 2 Nov 2018 10:02:35 -0400 Subject: [PATCH 10/74] integrate smallfile benchmark with CBT This should make it easier to add smallfile tests to Jenkins CI and to Teuthology --- benchmark/smallfile.py | 163 ++++++++++++++++++++++++++++++++ benchmarkfactory.py | 3 + example/smallfile-test-set.yaml | 45 +++++++++ 3 files changed, 211 insertions(+) create mode 100644 benchmark/smallfile.py create mode 100644 example/smallfile-test-set.yaml diff --git a/benchmark/smallfile.py b/benchmark/smallfile.py new file mode 100644 index 00000000..5b4e3a9f --- /dev/null +++ b/benchmark/smallfile.py @@ -0,0 +1,163 @@ +# Benchmark subclass to invoke smallfile +# this benchmark will iterate over smallfile test parameters +# something that smallfile cannot do today +# +# see examples/smallfile.yaml for how to use it +# +# at present, this does not create the filesystem or mount it, +# all clients and head node must have filesystem mounted +# +# it assumes that all hosts are accessed with the same user account +# so that user@hostname pdsh syntax is not needed +# +# it has only been tested with a single Cephfs mountpoint/host + +import copy +import common +import monitoring +import os +import time +import logging +import settings +import yaml +import json +import subprocess + +from benchmark import Benchmark + +logger = logging.getLogger("cbt") + +# we do this so source of exception is really obvious +class CbtSmfExc(Exception): + pass + +class Smallfile(Benchmark): + + def __init__(self, cluster, config): + super(Smallfile, self).__init__(cluster, config) + self.out_dir = self.archive_dir + self.config = config + mons = settings.getnodes('mons').split(',') + self.any_mon = mons[0] + self.clients = settings.getnodes('clients').split(',') + self.any_client = self.clients[0] + self.head = settings.getnodes('head') + self.cephfs_data_pool_name = config.get('data_pool_name', 'cephfs_data') + self.cleandir() + + + # this function uses "ceph df" output to monitor + # cephfs_data pool object count, when that stops going down + # then the pool is stable and it's ok to start another test + + def get_cephfs_data_objects(self): + (cephdf_out, cephdf_err) = common.pdsh( + self.any_mon, 'ceph -f json df', continue_if_error=False).communicate() + # pdsh prepends JSON output with IP address of host that did the command, + # we have to strip the IP address off before JSON parser will accept it + start_of_json = cephdf_out.index('{') + json_str = cephdf_out[start_of_json:] + cephdf = json.loads(json_str) + cephfs_data_objs = -1 + for p in cephdf['pools']: + if p['name'] == self.cephfs_data_pool_name: + cephfs_data_objs = int(p['stats']['objects']) + break + if cephfs_data_objs == -1: + raise CbtSmfExc('could not find cephfs_data pool in ceph -f json df output') + logger.info('cephfs_data pool object count = %d' % cephfs_data_objs) + return cephfs_data_objs + + def run(self): + super(Smallfile, self).run() + + # someday we might want to allow the option + # to NOT drop cache + self.dropcaches() + # FIXME: if desired, drop cache on OSDs + # FIXME: if desired, drop cache on MDSs + + # dump the cluster config + self.cluster.dump_config(self.run_dir) + + # input YAML parameters for smallfile are subset + # extract parameters that you need + + smfparams = copy.deepcopy(self.config) + del smfparams['benchmark'] + del smfparams['iteration'] + try: + del smfparams['data_pool_name'] + except KeyError: + pass + operation = smfparams['operation'] + topdir = smfparams['top'].split(',')[0] + yaml_input_pathname = os.path.join(self.out_dir, 'smfparams.yaml') + with open(yaml_input_pathname, 'w') as yamlf: + yamlf.write(yaml.dump(smfparams, default_flow_style=False)) + + # generate client list + + client_list_path = os.path.join(self.out_dir, 'client.list') + with open(client_list_path, 'w') as client_f: + for c in self.clients: + client_f.write(c + '\n') + + # ensure SMF directory exists + # for shared filesystem, we only need 1 client to + # initialize it + + logger.info('using client %s to initialize shared filesystem' % self.any_client) + common.pdsh(self.any_client, 'mkdir -p -v -m 0777 ' + topdir, continue_if_error=False).communicate() + + # Run the backfill testing thread if requested + if 'recovery_test' in self.cluster.config: + recovery_callback = self.recovery_callback + self.cluster.create_recovery_test(self.run_dir, recovery_callback) + + # Run smallfile + monitoring.start(self.run_dir) + logger.info('Running smallfile test, see %s for parameters' % yaml_input_pathname) + smfcmd = [ 'smallfile_cli.py', + '--host-set', client_list_path, + '--yaml-input-file', yaml_input_pathname, + '--verbose', 'Y', + '--output-json', '%s/smfresult.json' % self.out_dir ] + logger.info('smallfile command: %s' % ' '.join(smfcmd)) + smf_out_path = os.path.join(self.out_dir, 'smf-out.log') + (smf_out_str, smf_err_str) = common.pdsh(self.head, ' '.join(smfcmd), continue_if_error=False).communicate() + with open(smf_out_path, 'w') as smf_outf: + smf_outf.write(smf_out_str + '\n') + logger.info('smallfile result: %s' % smf_out_path) + monitoring.stop(self.run_dir) + + if operation == 'cleanup': + common.pdsh(self.any_client, 'rm -rf ' + topdir, continue_if_error=False).communicate() + common.pdsh(self.any_client, 'mkdir -v -m 0777 ' + topdir, continue_if_error=False).communicate() + # wait until cephfs_data pool stops decreasing + logger.info('wait for cephfs_data pool to empty') + pool_shrinking = True + old_data_objs = self.get_cephfs_data_objects() + while pool_shrinking: + time.sleep(10) + data_objs = self.get_cephfs_data_objects() + if old_data_objs == data_objs: + logger.info('pool stopped shrinking') + pool_shrinking = False + else: + logger.info('pool shrank by %d objects', old_data_objs - data_objs) + old_data_objs = data_objs + + # If we were doing recovery, wait until it's done. + if 'recovery_test' in self.cluster.config: + self.cluster.wait_recovery_done() + + # Finally, get the historic ops + self.cluster.dump_historic_ops(self.run_dir) + common.sync_files(self.run_dir, self.out_dir) + + def recovery_callback(self): + pass + + def __str__(self): + return "%s\n%s\n%s" % (self.run_dir, self.out_dir, super(Smallfile, self).__str__()) diff --git a/benchmarkfactory.py b/benchmarkfactory.py index f357427b..aaa300fc 100644 --- a/benchmarkfactory.py +++ b/benchmarkfactory.py @@ -2,6 +2,7 @@ import itertools import settings +from benchmark.smallfile import Smallfile from benchmark.radosbench import Radosbench from benchmark.rbdfio import RbdFio from benchmark.rawfio import RawFio @@ -63,3 +64,5 @@ def get_object(cluster, benchmark, bconfig): return CephTestRados(cluster, bconfig) if benchmark == 'getput': return Getput(cluster, bconfig) + if benchmark == 'smallfile': + return Smallfile(cluster, bconfig) diff --git a/example/smallfile-test-set.yaml b/example/smallfile-test-set.yaml new file mode 100644 index 00000000..841cdfe9 --- /dev/null +++ b/example/smallfile-test-set.yaml @@ -0,0 +1,45 @@ +# this example runs the smallfile benchmark +# +# we are using a kernel Cephfs shared filesystem +# which must be mounted on head node as well as clients +# +# CBT lets you specify multiple operations and file sizes, +# something that smallfile_cli.py cannot do on its own +# +# the smallfile CBT benchmark not create the filesystem or mount it, +# all clients and head node must have filesystem mounted +# +# it assumes that all hosts are accessed with the same user account +# so that user@hostname pdsh syntax is not needed +# +# ensure that smallfile_cli.py (or softlink) appears in the PATH +# on the 'head' node and that smallfile_remote.py (or softlink) appears +# in PATH on the 'clients' hosts. +# +# you should not specify host-set in your smallfile input parameters, +# the smallfile CBT benchmark module will do that for you +# using 'clients' host group +# +# it has only been tested with a single Cephfs mountpoint/host + +cluster: + use_existing: True + head: "localhost" + clients: [ "192.168.121.64", "192.168.121.112", "192.168.121.158" ] + osds: [ "192.168.121.64", "192.168.121.112", "192.168.121.158" ] + mons: [ "192.168.121.64" ] + rebuild_every_test: False + tmp_dir: "/tmp/cbt" + osds_per_node: 1 + pool_profiles: + replicated: + replication: 3 + crush_profile: 1 + iterations: 1 +benchmarks: + smallfile: + operation: [ cleanup, create, read, delete ] + file-size: [ 4, 64 ] + top: /mnt/cephfs/smf + files: 2000 + pause: 2000 From 1b4cd9c965c1354219531fab2e44651c57770836 Mon Sep 17 00:00:00 2001 From: Ben England Date: Mon, 5 Nov 2018 12:13:08 -0500 Subject: [PATCH 11/74] save response times in out_dir subdirectory so we can compute response time percentiles log YAML inputs so user sees which test is running --- benchmark/smallfile.py | 9 +++++++++ example/smallfile-test-set.yaml | 3 +++ 2 files changed, 12 insertions(+) diff --git a/benchmark/smallfile.py b/benchmark/smallfile.py index 5b4e3a9f..1b146e2b 100644 --- a/benchmark/smallfile.py +++ b/benchmark/smallfile.py @@ -120,10 +120,12 @@ def run(self): logger.info('Running smallfile test, see %s for parameters' % yaml_input_pathname) smfcmd = [ 'smallfile_cli.py', '--host-set', client_list_path, + '--response-times', 'Y', '--yaml-input-file', yaml_input_pathname, '--verbose', 'Y', '--output-json', '%s/smfresult.json' % self.out_dir ] logger.info('smallfile command: %s' % ' '.join(smfcmd)) + logger.info('YAML inputs: %s' % yaml.dump(smfparams)) smf_out_path = os.path.join(self.out_dir, 'smf-out.log') (smf_out_str, smf_err_str) = common.pdsh(self.head, ' '.join(smfcmd), continue_if_error=False).communicate() with open(smf_out_path, 'w') as smf_outf: @@ -131,6 +133,13 @@ def run(self): logger.info('smallfile result: %s' % smf_out_path) monitoring.stop(self.run_dir) + # save response times + rsptimes_target_dir = os.path.join(self.out_dir, 'rsptimes') + common.mkdir_p(rsptimes_target_dir) + common.rpdcp(self.any_client, '', + os.path.join(os.path.join(topdir, 'network_shared'), 'rsptimes*csv'), + rsptimes_target_dir) + if operation == 'cleanup': common.pdsh(self.any_client, 'rm -rf ' + topdir, continue_if_error=False).communicate() common.pdsh(self.any_client, 'mkdir -v -m 0777 ' + topdir, continue_if_error=False).communicate() diff --git a/example/smallfile-test-set.yaml b/example/smallfile-test-set.yaml index 841cdfe9..a4abc902 100644 --- a/example/smallfile-test-set.yaml +++ b/example/smallfile-test-set.yaml @@ -20,6 +20,9 @@ # the smallfile CBT benchmark module will do that for you # using 'clients' host group # +# you should not specify "response-times: Y" in your YAML input, +# the smallfile CBT benchmark defaults to this behavior +# # it has only been tested with a single Cephfs mountpoint/host cluster: From 7517ee53711435ed1829d4fbddfb1f37b49f4dc0 Mon Sep 17 00:00:00 2001 From: acalhounRH Date: Tue, 5 Mar 2019 00:55:23 -0500 Subject: [PATCH 12/74] changed pdsh cmd to ansible cmd --- common.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/common.py b/common.py index 73f3a92d..7c2d5a14 100644 --- a/common.py +++ b/common.py @@ -64,7 +64,8 @@ def expanded_node_list(nodes): return node_list def pdsh(nodes, command, continue_if_error=True): - args = ['pdsh', '-f', str(len(expanded_node_list(nodes))), '-R', 'ssh', '-w', nodes, command] + #args = ['pdsh', '-f', str(len(expanded_node_list(nodes))), '-R', 'ssh', '-w', nodes, command] + args = ['ansible', '-f', str(len(expanded_node_list(nodes))), '-m', 'shell', '-a', command, '-i', nodes] # -S means pdsh fails if any host fails if not continue_if_error: args.insert(1, '-S') return CheckedPopen(args,continue_if_error=continue_if_error) From b4686103847124ee1c05bab16e1ba6afb4463aeb Mon Sep 17 00:00:00 2001 From: acalhounRH Date: Tue, 5 Mar 2019 00:57:42 -0500 Subject: [PATCH 13/74] removed if statement for -S option for pdsh --- common.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/common.py b/common.py index 7c2d5a14..46b14a73 100644 --- a/common.py +++ b/common.py @@ -67,7 +67,7 @@ def pdsh(nodes, command, continue_if_error=True): #args = ['pdsh', '-f', str(len(expanded_node_list(nodes))), '-R', 'ssh', '-w', nodes, command] args = ['ansible', '-f', str(len(expanded_node_list(nodes))), '-m', 'shell', '-a', command, '-i', nodes] # -S means pdsh fails if any host fails - if not continue_if_error: args.insert(1, '-S') + #if not continue_if_error: args.insert(1, '-S') return CheckedPopen(args,continue_if_error=continue_if_error) From e20dd3c86dd186cc23d02a7afdeaadfae3a561fb Mon Sep 17 00:00:00 2001 From: acalhounRH Date: Tue, 5 Mar 2019 00:59:40 -0500 Subject: [PATCH 14/74] added all to ansible command --- common.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/common.py b/common.py index 46b14a73..6682c338 100644 --- a/common.py +++ b/common.py @@ -65,7 +65,7 @@ def expanded_node_list(nodes): def pdsh(nodes, command, continue_if_error=True): #args = ['pdsh', '-f', str(len(expanded_node_list(nodes))), '-R', 'ssh', '-w', nodes, command] - args = ['ansible', '-f', str(len(expanded_node_list(nodes))), '-m', 'shell', '-a', command, '-i', nodes] + args = ['ansible', '-f', str(len(expanded_node_list(nodes))), '-m', 'shell', '-a', command, '-i', nodes, 'all'] # -S means pdsh fails if any host fails #if not continue_if_error: args.insert(1, '-S') return CheckedPopen(args,continue_if_error=continue_if_error) From ee918b294b2f293a081c8eaa805d372a9da5ad50 Mon Sep 17 00:00:00 2001 From: acalhounRH Date: Tue, 5 Mar 2019 01:09:42 -0500 Subject: [PATCH 15/74] changed rpdcp to ansible fetch command --- common.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/common.py b/common.py index 6682c338..9ed6338f 100644 --- a/common.py +++ b/common.py @@ -80,11 +80,13 @@ def pdcp(nodes, flags, localfile, remotefile): def rpdcp(nodes, flags, remotefile, localfile): - args = ['rpdcp', '-f', '10', '-R', 'ssh', '-w', nodes] + #args = ['rpdcp', '-f', '10', '-R', 'ssh', '-w', nodes] + args = ['ansible', '-f', '10', '-m', 'fetch', "src=remotefile dest=localfile", '-i', nodes, 'all'] if flags: args += [flags] - return CheckedPopen(args + [remotefile, localfile], - continue_if_error=False) + #return CheckedPopen(args + [remotefile, localfile], + # continue_if_error=False) + return CheckedPopen(args,continue_if_error=continue_if_error) def scp(node, localfile, remotefile): From 28322a04f9692e33da311b2a8737ddbf18ceac85 Mon Sep 17 00:00:00 2001 From: acalhounRH Date: Tue, 5 Mar 2019 01:14:17 -0500 Subject: [PATCH 16/74] set contine_if_error to false for rpdcp --- common.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/common.py b/common.py index 9ed6338f..0a7bce3b 100644 --- a/common.py +++ b/common.py @@ -86,7 +86,7 @@ def rpdcp(nodes, flags, remotefile, localfile): args += [flags] #return CheckedPopen(args + [remotefile, localfile], # continue_if_error=False) - return CheckedPopen(args,continue_if_error=continue_if_error) + return CheckedPopen(args,continue_if_error=False) def scp(node, localfile, remotefile): From 18f66c37ca59ca19b711849db3e0244be9519003 Mon Sep 17 00:00:00 2001 From: acalhounRH Date: Tue, 5 Mar 2019 01:19:27 -0500 Subject: [PATCH 17/74] corrected fetch command and add flat option --- common.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/common.py b/common.py index 0a7bce3b..be9c7bf2 100644 --- a/common.py +++ b/common.py @@ -81,9 +81,9 @@ def pdcp(nodes, flags, localfile, remotefile): def rpdcp(nodes, flags, remotefile, localfile): #args = ['rpdcp', '-f', '10', '-R', 'ssh', '-w', nodes] - args = ['ansible', '-f', '10', '-m', 'fetch', "src=remotefile dest=localfile", '-i', nodes, 'all'] - if flags: - args += [flags] + args = ['ansible', '-f', '10', '-m', 'fetch', '-a', "flat==yes src=%s dest=%s" % (remotefile, localfile), '-i', nodes, 'all'] +# if flags: +# args += [flags] #return CheckedPopen(args + [remotefile, localfile], # continue_if_error=False) return CheckedPopen(args,continue_if_error=False) From 20e614c02003eb13c6fe1502d9cd0fed5b731eda Mon Sep 17 00:00:00 2001 From: acalhounRH Date: Tue, 5 Mar 2019 10:49:55 -0500 Subject: [PATCH 18/74] temp change ansible adhoc cmd to recur fetch files. --- common.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/common.py b/common.py index be9c7bf2..a75cf2f6 100644 --- a/common.py +++ b/common.py @@ -2,6 +2,7 @@ import logging import os import subprocess +import socket import settings @@ -81,7 +82,9 @@ def pdcp(nodes, flags, localfile, remotefile): def rpdcp(nodes, flags, remotefile, localfile): #args = ['rpdcp', '-f', '10', '-R', 'ssh', '-w', nodes] - args = ['ansible', '-f', '10', '-m', 'fetch', '-a', "flat==yes src=%s dest=%s" % (remotefile, localfile), '-i', nodes, 'all'] + #args = ['ansible', '-f', '10', '-m', 'fetch', '-a', "flat==yes src=%s dest=%s" % (remotefile, localfile), '-i', nodes, 'all'] + lhost = socket.gethostname() + args = ['ansible', '-f', str(len(expanded_node_list(nodes))), '-m', 'shell', '-a', "scp -r %s %s:%s" % (remotefile, lhost, localfiles), '-i', nodes, 'all'] # if flags: # args += [flags] #return CheckedPopen(args + [remotefile, localfile], From 4e8403b6279bd06f4e26ca8f513357f731faf930 Mon Sep 17 00:00:00 2001 From: acalhounRH Date: Tue, 5 Mar 2019 10:52:52 -0500 Subject: [PATCH 19/74] corrected typo --- common.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/common.py b/common.py index a75cf2f6..653a307b 100644 --- a/common.py +++ b/common.py @@ -84,7 +84,7 @@ def rpdcp(nodes, flags, remotefile, localfile): #args = ['rpdcp', '-f', '10', '-R', 'ssh', '-w', nodes] #args = ['ansible', '-f', '10', '-m', 'fetch', '-a', "flat==yes src=%s dest=%s" % (remotefile, localfile), '-i', nodes, 'all'] lhost = socket.gethostname() - args = ['ansible', '-f', str(len(expanded_node_list(nodes))), '-m', 'shell', '-a', "scp -r %s %s:%s" % (remotefile, lhost, localfiles), '-i', nodes, 'all'] + args = ['ansible', '-f', str(len(expanded_node_list(nodes))), '-m', 'shell', '-a', "scp -r %s %s:%s" % (remotefile, lhost, localfile), '-i', nodes, 'all'] # if flags: # args += [flags] #return CheckedPopen(args + [remotefile, localfile], From 9fbba17d26481ba723b019a678a150d7947150e0 Mon Sep 17 00:00:00 2001 From: acalhounRH Date: Tue, 5 Mar 2019 11:07:38 -0500 Subject: [PATCH 20/74] removing * from remotefiles var --- common.py | 1 + 1 file changed, 1 insertion(+) diff --git a/common.py b/common.py index 653a307b..7f73d562 100644 --- a/common.py +++ b/common.py @@ -84,6 +84,7 @@ def rpdcp(nodes, flags, remotefile, localfile): #args = ['rpdcp', '-f', '10', '-R', 'ssh', '-w', nodes] #args = ['ansible', '-f', '10', '-m', 'fetch', '-a', "flat==yes src=%s dest=%s" % (remotefile, localfile), '-i', nodes, 'all'] lhost = socket.gethostname() + remotefile = remotefile.replace("*", "") args = ['ansible', '-f', str(len(expanded_node_list(nodes))), '-m', 'shell', '-a', "scp -r %s %s:%s" % (remotefile, lhost, localfile), '-i', nodes, 'all'] # if flags: # args += [flags] From 405fae2b872d131435b251775c57df54ac4f2837 Mon Sep 17 00:00:00 2001 From: acalhounRH Date: Tue, 5 Mar 2019 11:15:00 -0500 Subject: [PATCH 21/74] added "" in command field --- common.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/common.py b/common.py index 7f73d562..983ca879 100644 --- a/common.py +++ b/common.py @@ -66,7 +66,7 @@ def expanded_node_list(nodes): def pdsh(nodes, command, continue_if_error=True): #args = ['pdsh', '-f', str(len(expanded_node_list(nodes))), '-R', 'ssh', '-w', nodes, command] - args = ['ansible', '-f', str(len(expanded_node_list(nodes))), '-m', 'shell', '-a', command, '-i', nodes, 'all'] + args = ['ansible', '-f', str(len(expanded_node_list(nodes))), '-m', 'shell', '-a', "\"%s\"" % command, '-i', nodes, 'all'] # -S means pdsh fails if any host fails #if not continue_if_error: args.insert(1, '-S') return CheckedPopen(args,continue_if_error=continue_if_error) @@ -85,7 +85,7 @@ def rpdcp(nodes, flags, remotefile, localfile): #args = ['ansible', '-f', '10', '-m', 'fetch', '-a', "flat==yes src=%s dest=%s" % (remotefile, localfile), '-i', nodes, 'all'] lhost = socket.gethostname() remotefile = remotefile.replace("*", "") - args = ['ansible', '-f', str(len(expanded_node_list(nodes))), '-m', 'shell', '-a', "scp -r %s %s:%s" % (remotefile, lhost, localfile), '-i', nodes, 'all'] + args = ['ansible', '-f', str(len(expanded_node_list(nodes))), '-m', 'shell', '-a', "\"scp -r %s %s:%s\"" % (remotefile, lhost, localfile), '-i', nodes, 'all'] # if flags: # args += [flags] #return CheckedPopen(args + [remotefile, localfile], From 2f6624bb50e17130169ea71d3f0fb9823c7be475 Mon Sep 17 00:00:00 2001 From: acalhounRH Date: Tue, 5 Mar 2019 11:21:01 -0500 Subject: [PATCH 22/74] correction for nested quotes --- common.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/common.py b/common.py index 983ca879..7e599f24 100644 --- a/common.py +++ b/common.py @@ -66,7 +66,7 @@ def expanded_node_list(nodes): def pdsh(nodes, command, continue_if_error=True): #args = ['pdsh', '-f', str(len(expanded_node_list(nodes))), '-R', 'ssh', '-w', nodes, command] - args = ['ansible', '-f', str(len(expanded_node_list(nodes))), '-m', 'shell', '-a', "\"%s\"" % command, '-i', nodes, 'all'] + args = ['ansible', '-f', str(len(expanded_node_list(nodes))), '-m', 'shell', '-a', '''"%s"''' % command, '-i', nodes, 'all'] # -S means pdsh fails if any host fails #if not continue_if_error: args.insert(1, '-S') return CheckedPopen(args,continue_if_error=continue_if_error) @@ -85,7 +85,7 @@ def rpdcp(nodes, flags, remotefile, localfile): #args = ['ansible', '-f', '10', '-m', 'fetch', '-a', "flat==yes src=%s dest=%s" % (remotefile, localfile), '-i', nodes, 'all'] lhost = socket.gethostname() remotefile = remotefile.replace("*", "") - args = ['ansible', '-f', str(len(expanded_node_list(nodes))), '-m', 'shell', '-a', "\"scp -r %s %s:%s\"" % (remotefile, lhost, localfile), '-i', nodes, 'all'] + args = ['ansible', '-f', str(len(expanded_node_list(nodes))), '-m', 'shell', '-a', '''"scp -r %s %s:%s"''' % (remotefile, lhost, localfile), '-i', nodes, 'all'] # if flags: # args += [flags] #return CheckedPopen(args + [remotefile, localfile], From 308a9cb70ee83f2d157a36b90e9b6fc7907a7b32 Mon Sep 17 00:00:00 2001 From: acalhounRH Date: Tue, 5 Mar 2019 11:22:08 -0500 Subject: [PATCH 23/74] removed nested quote on pdsh --- common.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/common.py b/common.py index 7e599f24..2f908adc 100644 --- a/common.py +++ b/common.py @@ -66,7 +66,7 @@ def expanded_node_list(nodes): def pdsh(nodes, command, continue_if_error=True): #args = ['pdsh', '-f', str(len(expanded_node_list(nodes))), '-R', 'ssh', '-w', nodes, command] - args = ['ansible', '-f', str(len(expanded_node_list(nodes))), '-m', 'shell', '-a', '''"%s"''' % command, '-i', nodes, 'all'] + args = ['ansible', '-f', str(len(expanded_node_list(nodes))), '-m', 'shell', '-a', "%s" % command, '-i', nodes, 'all'] # -S means pdsh fails if any host fails #if not continue_if_error: args.insert(1, '-S') return CheckedPopen(args,continue_if_error=continue_if_error) From 5c4645152727f9e53392aca2ed52a999a4b74e8c Mon Sep 17 00:00:00 2001 From: acalhounRH Date: Tue, 5 Mar 2019 11:42:25 -0500 Subject: [PATCH 24/74] removed nested quotes from rpdcp command --- common.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/common.py b/common.py index 2f908adc..3fc3e677 100644 --- a/common.py +++ b/common.py @@ -85,7 +85,7 @@ def rpdcp(nodes, flags, remotefile, localfile): #args = ['ansible', '-f', '10', '-m', 'fetch', '-a', "flat==yes src=%s dest=%s" % (remotefile, localfile), '-i', nodes, 'all'] lhost = socket.gethostname() remotefile = remotefile.replace("*", "") - args = ['ansible', '-f', str(len(expanded_node_list(nodes))), '-m', 'shell', '-a', '''"scp -r %s %s:%s"''' % (remotefile, lhost, localfile), '-i', nodes, 'all'] + args = ['ansible', '-f', str(len(expanded_node_list(nodes))), '-m', 'shell', '-a', "scp -r %s %s:%s" % (remotefile, lhost, localfile), '-i', nodes, 'all'] # if flags: # args += [flags] #return CheckedPopen(args + [remotefile, localfile], From 21a8e231fca691964d6f29b3ae34f927cf49b4d2 Mon Sep 17 00:00:00 2001 From: acalhounRH Date: Tue, 5 Mar 2019 13:03:33 -0500 Subject: [PATCH 25/74] updated get_fqdn_list to work with ansible output --- common.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/common.py b/common.py index 3fc3e677..106eeb18 100644 --- a/common.py +++ b/common.py @@ -108,7 +108,13 @@ def get_fqdn_cmd(): def get_fqdn_list(nodes): stdout, stderr = pdsh(settings.getnodes(nodes), '%s' % get_fqdn_cmd()).communicate() print stdout - ret = [i.split(' ', 1)[1] for i in stdout.splitlines()] + + ret = [] + for line in stdout: + if "CHANGED" not in line: + ret.append(line) + + #ret = [i.split(' ', 1)[1] for i in stdout.splitlines()] print ret return ret From b5a87c22d307c561dc87af30c9c94ea61dea034d Mon Sep 17 00:00:00 2001 From: acalhounRH Date: Tue, 5 Mar 2019 13:09:54 -0500 Subject: [PATCH 26/74] changed stdout to stdout.splitlines --- common.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/common.py b/common.py index 106eeb18..c88f67c3 100644 --- a/common.py +++ b/common.py @@ -110,7 +110,7 @@ def get_fqdn_list(nodes): print stdout ret = [] - for line in stdout: + for line in stdout.splitlines(): if "CHANGED" not in line: ret.append(line) From 9a915cfbbc4960b2ddc67de09d7c33e179f9d150 Mon Sep 17 00:00:00 2001 From: acalhounRH Date: Tue, 5 Mar 2019 13:26:29 -0500 Subject: [PATCH 27/74] created function that creates a tmp inventory file --- common.py | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/common.py b/common.py index c88f67c3..a6df5a30 100644 --- a/common.py +++ b/common.py @@ -49,6 +49,15 @@ def wait(self): # pdsh() calls that require full parallelism (workload generation) # work correctly. +def ansible_hostfile(hosts): + + inventory_file = '/tmp/cbtinventory' + with open(inventory_file, 'w') as f: + for item in hosts: + f.write("%s\n" % item) + + return inventory_file + def expanded_node_list(nodes): # nodes is a comma-separated list for pdsh "-w" parameter # nodes may have some entries with '^' prefix, pdsh syntax meaning @@ -66,7 +75,8 @@ def expanded_node_list(nodes): def pdsh(nodes, command, continue_if_error=True): #args = ['pdsh', '-f', str(len(expanded_node_list(nodes))), '-R', 'ssh', '-w', nodes, command] - args = ['ansible', '-f', str(len(expanded_node_list(nodes))), '-m', 'shell', '-a', "%s" % command, '-i', nodes, 'all'] + inventory = ansible_hostfile(nodes) + args = ['ansible', '-f', str(len(expanded_node_list(nodes))), '-m', 'shell', '-a', "%s" % command, '-i', inventory, 'all'] # -S means pdsh fails if any host fails #if not continue_if_error: args.insert(1, '-S') return CheckedPopen(args,continue_if_error=continue_if_error) @@ -85,7 +95,8 @@ def rpdcp(nodes, flags, remotefile, localfile): #args = ['ansible', '-f', '10', '-m', 'fetch', '-a', "flat==yes src=%s dest=%s" % (remotefile, localfile), '-i', nodes, 'all'] lhost = socket.gethostname() remotefile = remotefile.replace("*", "") - args = ['ansible', '-f', str(len(expanded_node_list(nodes))), '-m', 'shell', '-a', "scp -r %s %s:%s" % (remotefile, lhost, localfile), '-i', nodes, 'all'] + inventory = ansible_hostfile(nodes) + args = ['ansible', '-f', str(len(expanded_node_list(nodes))), '-m', 'shell', '-a', "scp -r %s %s:%s" % (remotefile, lhost, localfile), '-i', inventory, 'all'] # if flags: # args += [flags] #return CheckedPopen(args + [remotefile, localfile], From 8f332555bb23ee58224a8cfb3ef51ed7865531fe Mon Sep 17 00:00:00 2001 From: acalhounRH Date: Tue, 5 Mar 2019 13:32:30 -0500 Subject: [PATCH 28/74] added debug statment --- common.py | 1 + 1 file changed, 1 insertion(+) diff --git a/common.py b/common.py index a6df5a30..a7cea4e5 100644 --- a/common.py +++ b/common.py @@ -52,6 +52,7 @@ def wait(self): def ansible_hostfile(hosts): inventory_file = '/tmp/cbtinventory' + print hosts with open(inventory_file, 'w') as f: for item in hosts: f.write("%s\n" % item) From 8d7ec5a16065c9d43cabcd053e9051f631163da9 Mon Sep 17 00:00:00 2001 From: acalhounRH Date: Tue, 5 Mar 2019 13:40:21 -0500 Subject: [PATCH 29/74] tryed to split hosts --- common.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/common.py b/common.py index a7cea4e5..04360c66 100644 --- a/common.py +++ b/common.py @@ -53,8 +53,10 @@ def ansible_hostfile(hosts): inventory_file = '/tmp/cbtinventory' print hosts + hosts = hosts.split(",") with open(inventory_file, 'w') as f: for item in hosts: + print item f.write("%s\n" % item) return inventory_file From 61f0291a14892a1ebfdad5bd87dc683a1fd23831 Mon Sep 17 00:00:00 2001 From: acalhounRH Date: Tue, 5 Mar 2019 13:44:41 -0500 Subject: [PATCH 30/74] removed print statments --- common.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/common.py b/common.py index 04360c66..67afa07c 100644 --- a/common.py +++ b/common.py @@ -52,11 +52,9 @@ def wait(self): def ansible_hostfile(hosts): inventory_file = '/tmp/cbtinventory' - print hosts hosts = hosts.split(",") with open(inventory_file, 'w') as f: for item in hosts: - print item f.write("%s\n" % item) return inventory_file From 24ddffebb19d2030b7f5c22cd9a7e30d3e6c455c Mon Sep 17 00:00:00 2001 From: acalhounRH Date: Tue, 5 Mar 2019 14:31:40 -0500 Subject: [PATCH 31/74] updated rbdfio to specify output file, hostname appended after volnum --- benchmark/librbdfio.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/benchmark/librbdfio.py b/benchmark/librbdfio.py index 09ca4fa8..80423876 100644 --- a/benchmark/librbdfio.py +++ b/benchmark/librbdfio.py @@ -135,7 +135,7 @@ def run(self): def mkfiocmd(self, volnum): rbdname = 'cbt-librbdfio-`%s`-%d' % (common.get_fqdn_cmd(), volnum) - out_file = '%s/output.%d' % (self.run_dir, volnum) + out_file = '%s/output.%d.%s' % (self.run_dir, volnum, common.get_fqdn_cmd()) fio_cmd = 'sudo %s --ioengine=rbd --clientname=admin --pool=%s --rbdname=%s --invalidate=0' % (self.cmd_path_full, self.pool_name, rbdname) fio_cmd += ' --rw=%s' % self.mode From f5b361aa866674ed30dca08a3c687cdb95dfa009 Mon Sep 17 00:00:00 2001 From: acalhounRH Date: Tue, 5 Mar 2019 14:36:29 -0500 Subject: [PATCH 32/74] added backtick to output file name --- benchmark/librbdfio.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/benchmark/librbdfio.py b/benchmark/librbdfio.py index 80423876..5f5469d8 100644 --- a/benchmark/librbdfio.py +++ b/benchmark/librbdfio.py @@ -135,7 +135,7 @@ def run(self): def mkfiocmd(self, volnum): rbdname = 'cbt-librbdfio-`%s`-%d' % (common.get_fqdn_cmd(), volnum) - out_file = '%s/output.%d.%s' % (self.run_dir, volnum, common.get_fqdn_cmd()) + out_file = '%s/output.%d.`%s`' % (self.run_dir, volnum, common.get_fqdn_cmd()) fio_cmd = 'sudo %s --ioengine=rbd --clientname=admin --pool=%s --rbdname=%s --invalidate=0' % (self.cmd_path_full, self.pool_name, rbdname) fio_cmd += ' --rw=%s' % self.mode From 690d4feca3f1d74328c896a6ae5dff4ccf40312d Mon Sep 17 00:00:00 2001 From: acalhounRH Date: Wed, 6 Mar 2019 12:27:56 -0500 Subject: [PATCH 33/74] removed stripping of * on remotefiles of rpdcp --- common.py | 1 - 1 file changed, 1 deletion(-) diff --git a/common.py b/common.py index 67afa07c..04b4c6cc 100644 --- a/common.py +++ b/common.py @@ -95,7 +95,6 @@ def rpdcp(nodes, flags, remotefile, localfile): #args = ['rpdcp', '-f', '10', '-R', 'ssh', '-w', nodes] #args = ['ansible', '-f', '10', '-m', 'fetch', '-a', "flat==yes src=%s dest=%s" % (remotefile, localfile), '-i', nodes, 'all'] lhost = socket.gethostname() - remotefile = remotefile.replace("*", "") inventory = ansible_hostfile(nodes) args = ['ansible', '-f', str(len(expanded_node_list(nodes))), '-m', 'shell', '-a', "scp -r %s %s:%s" % (remotefile, lhost, localfile), '-i', inventory, 'all'] # if flags: From 0ccdd0e66caf96cd91a3b8c21591ff7a9e1d4e02 Mon Sep 17 00:00:00 2001 From: acalhounRH Date: Mon, 25 Mar 2019 14:54:33 -0400 Subject: [PATCH 34/74] Added handle to result to fqdn if ip is provided --- benchmark/librbdfio.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/benchmark/librbdfio.py b/benchmark/librbdfio.py index 5f5469d8..5c1125be 100644 --- a/benchmark/librbdfio.py +++ b/benchmark/librbdfio.py @@ -194,6 +194,12 @@ def recovery_callback(self): def parse(self, out_dir): for client in settings.cluster.get('clients'): + try: + socket.inet_aton(client) + client = socket.gethostbyaddr(client) + except: + pass + for i in xrange(self.volumes_per_client): found = 0 out_file = '%s/output.%d.%s' % (out_dir, i, client) From 57ce7ac9a89b2541927806a6d5d93731372c1881 Mon Sep 17 00:00:00 2001 From: acalhounRH Date: Mon, 25 Mar 2019 15:14:40 -0400 Subject: [PATCH 35/74] added socket module --- benchmark/librbdfio.py | 1 + 1 file changed, 1 insertion(+) diff --git a/benchmark/librbdfio.py b/benchmark/librbdfio.py index 5c1125be..1ed04320 100644 --- a/benchmark/librbdfio.py +++ b/benchmark/librbdfio.py @@ -7,6 +7,7 @@ import threading import logging import json +import socket from cluster.ceph import Ceph from benchmark import Benchmark From f0ef1817aeafc0a3856099c61d83bf3d5e654b6d Mon Sep 17 00:00:00 2001 From: acalhounRH Date: Thu, 11 Apr 2019 10:58:07 -0400 Subject: [PATCH 36/74] changed sscp for rpdcp to rsync to avoid conflicts with coping data --- common.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/common.py b/common.py index 04b4c6cc..6fff5538 100644 --- a/common.py +++ b/common.py @@ -96,7 +96,7 @@ def rpdcp(nodes, flags, remotefile, localfile): #args = ['ansible', '-f', '10', '-m', 'fetch', '-a', "flat==yes src=%s dest=%s" % (remotefile, localfile), '-i', nodes, 'all'] lhost = socket.gethostname() inventory = ansible_hostfile(nodes) - args = ['ansible', '-f', str(len(expanded_node_list(nodes))), '-m', 'shell', '-a', "scp -r %s %s:%s" % (remotefile, lhost, localfile), '-i', inventory, 'all'] + args = ['ansible', '-f', str(len(expanded_node_list(nodes))), '-m', 'shell', '-a', "rsync -avz -e ssh %s %s:%s" % (remotefile, lhost, localfile), '-i', inventory, 'all'] # if flags: # args += [flags] #return CheckedPopen(args + [remotefile, localfile], From 756e83da6ea3b3ee6978b70b4c67f7a7e6e39d65 Mon Sep 17 00:00:00 2001 From: acalhounRH Date: Sat, 20 Apr 2019 01:27:34 -0400 Subject: [PATCH 37/74] reverted back to scp --- common.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/common.py b/common.py index 6fff5538..04b4c6cc 100644 --- a/common.py +++ b/common.py @@ -96,7 +96,7 @@ def rpdcp(nodes, flags, remotefile, localfile): #args = ['ansible', '-f', '10', '-m', 'fetch', '-a', "flat==yes src=%s dest=%s" % (remotefile, localfile), '-i', nodes, 'all'] lhost = socket.gethostname() inventory = ansible_hostfile(nodes) - args = ['ansible', '-f', str(len(expanded_node_list(nodes))), '-m', 'shell', '-a', "rsync -avz -e ssh %s %s:%s" % (remotefile, lhost, localfile), '-i', inventory, 'all'] + args = ['ansible', '-f', str(len(expanded_node_list(nodes))), '-m', 'shell', '-a', "scp -r %s %s:%s" % (remotefile, lhost, localfile), '-i', inventory, 'all'] # if flags: # args += [flags] #return CheckedPopen(args + [remotefile, localfile], From 34708759c79a495b04cbd13f0b1cdd5b7d81288c Mon Sep 17 00:00:00 2001 From: Alexander Calhoun Date: Wed, 1 May 2019 14:06:33 -0400 Subject: [PATCH 38/74] added gitignore --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 52e4e611..20e51686 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,3 @@ *.pyc *.pyo +/.project From 7647fcc0830f110a9853fb369024f3ce19f90d93 Mon Sep 17 00:00:00 2001 From: Alexander Calhoun Date: Mon, 6 May 2019 11:56:04 -0400 Subject: [PATCH 39/74] updated to append hostname to outfile --- benchmark/librbdfio.py | 2 +- benchmark/radosbench.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/benchmark/librbdfio.py b/benchmark/librbdfio.py index cd3e563c..9e2aabaf 100644 --- a/benchmark/librbdfio.py +++ b/benchmark/librbdfio.py @@ -143,7 +143,7 @@ def mkfiocmd(self, volnum): rbdname = 'cbt-librbdfio-`%s`-%d' % (common.get_fqdn_cmd(), volnum) logger.debug('Using rbdname %s', rbdname) - out_file = '%s/output.%d' % (self.run_dir, volnum) + out_file = '%s/output.%d.`%s`' % (self.run_dir, volnum, common.get_fqdn_cmd()) fio_cmd = 'sudo %s --ioengine=rbd --clientname=admin --pool=%s --rbdname=%s --invalidate=0' % (self.cmd_path_full, self.pool_name, rbdname) fio_cmd += ' --rw=%s' % self.mode diff --git a/benchmark/radosbench.py b/benchmark/radosbench.py index 95d445a5..8b33e41a 100644 --- a/benchmark/radosbench.py +++ b/benchmark/radosbench.py @@ -125,7 +125,7 @@ def _run(self, mode, run_dir, out_dir): logger.info('Running radosbench %s test.' % mode) ps = [] for i in xrange(self.concurrent_procs): - out_file = '%s/output.%s' % (run_dir, i) + out_file = '%s/output.%s.`%s`' % (run_dir, i, common.get_fqdn_cmd()) objecter_log = '%s/objecter.%s.log' % (run_dir, i) # default behavior is to use a single storage pool pool_name = self.pool @@ -144,7 +144,7 @@ def _run(self, mode, run_dir, out_dir): monitoring.stop(run_dir) # If we were doing recovery, wait until it's done. - if 'recovery_test' in self.cluster.config: + -if 'recovery_test' in self.cluster.config: self.cluster.wait_recovery_done() # Finally, get the historic ops From 791789356865464c7e75b71733eb8ba8678af9c3 Mon Sep 17 00:00:00 2001 From: Alexander Calhoun Date: Mon, 6 May 2019 12:05:55 -0400 Subject: [PATCH 40/74] change continue if error to true --- common.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/common.py b/common.py index 04b4c6cc..dd10e43b 100644 --- a/common.py +++ b/common.py @@ -101,7 +101,7 @@ def rpdcp(nodes, flags, remotefile, localfile): # args += [flags] #return CheckedPopen(args + [remotefile, localfile], # continue_if_error=False) - return CheckedPopen(args,continue_if_error=False) + return CheckedPopen(args,continue_if_error=True) def scp(node, localfile, remotefile): From 8eaedfca165dcedcd4a5154ab2b89f769516ec82 Mon Sep 17 00:00:00 2001 From: Alexander Calhoun Date: Mon, 6 May 2019 12:10:47 -0400 Subject: [PATCH 41/74] correction to typo --- benchmark/radosbench.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/benchmark/radosbench.py b/benchmark/radosbench.py index 8b33e41a..621433d0 100644 --- a/benchmark/radosbench.py +++ b/benchmark/radosbench.py @@ -125,7 +125,7 @@ def _run(self, mode, run_dir, out_dir): logger.info('Running radosbench %s test.' % mode) ps = [] for i in xrange(self.concurrent_procs): - out_file = '%s/output.%s.`%s`' % (run_dir, i, common.get_fqdn_cmd()) + out_file = '%s/output.%s.`%s' % (run_dir, i, common.get_fqdn_cmd()) objecter_log = '%s/objecter.%s.log' % (run_dir, i) # default behavior is to use a single storage pool pool_name = self.pool @@ -144,7 +144,7 @@ def _run(self, mode, run_dir, out_dir): monitoring.stop(run_dir) # If we were doing recovery, wait until it's done. - -if 'recovery_test' in self.cluster.config: + if 'recovery_test' in self.cluster.config: self.cluster.wait_recovery_done() # Finally, get the historic ops From ff9f82159b71b5152d3f94825d2f0680b07575c1 Mon Sep 17 00:00:00 2001 From: Alexander Calhoun Date: Mon, 6 May 2019 12:17:54 -0400 Subject: [PATCH 42/74] correcte missing back tic --- benchmark/radosbench.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/benchmark/radosbench.py b/benchmark/radosbench.py index 621433d0..505c2d54 100644 --- a/benchmark/radosbench.py +++ b/benchmark/radosbench.py @@ -125,7 +125,7 @@ def _run(self, mode, run_dir, out_dir): logger.info('Running radosbench %s test.' % mode) ps = [] for i in xrange(self.concurrent_procs): - out_file = '%s/output.%s.`%s' % (run_dir, i, common.get_fqdn_cmd()) + out_file = '%s/output.%s.`%s`' % (run_dir, i, common.get_fqdn_cmd()) objecter_log = '%s/objecter.%s.log' % (run_dir, i) # default behavior is to use a single storage pool pool_name = self.pool From 0ee3f02145bb59bd7912943e60502b748422bf37 Mon Sep 17 00:00:00 2001 From: Alexander Calhoun Date: Wed, 15 May 2019 09:40:58 -0400 Subject: [PATCH 43/74] Merged smallfile branch --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 20e51686..76dddd2c 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,4 @@ *.pyc *.pyo /.project +/.pydevproject From babd3decf9582c77361345484f31e89ddc69e609 Mon Sep 17 00:00:00 2001 From: Alexander Calhoun Date: Thu, 16 May 2019 15:21:49 -0400 Subject: [PATCH 44/74] added pbench start and stop to smallfile benchmark --- benchmark/smallfile.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/benchmark/smallfile.py b/benchmark/smallfile.py index 1b146e2b..35b1c9fa 100644 --- a/benchmark/smallfile.py +++ b/benchmark/smallfile.py @@ -117,6 +117,7 @@ def run(self): # Run smallfile monitoring.start(self.run_dir) + monitoring.start_pbench(self.out_dir) logger.info('Running smallfile test, see %s for parameters' % yaml_input_pathname) smfcmd = [ 'smallfile_cli.py', '--host-set', client_list_path, @@ -131,8 +132,10 @@ def run(self): with open(smf_out_path, 'w') as smf_outf: smf_outf.write(smf_out_str + '\n') logger.info('smallfile result: %s' % smf_out_path) + monitoring.start_pbench(self.out_dir) monitoring.stop(self.run_dir) + # save response times rsptimes_target_dir = os.path.join(self.out_dir, 'rsptimes') common.mkdir_p(rsptimes_target_dir) From 3c4bdf235519b152dbdff5a221c2811dedfcfd75 Mon Sep 17 00:00:00 2001 From: Alexander Calhoun Date: Thu, 16 May 2019 15:26:21 -0400 Subject: [PATCH 45/74] corrected pbench stop command --- benchmark/smallfile.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/benchmark/smallfile.py b/benchmark/smallfile.py index 35b1c9fa..575af4d4 100644 --- a/benchmark/smallfile.py +++ b/benchmark/smallfile.py @@ -132,7 +132,7 @@ def run(self): with open(smf_out_path, 'w') as smf_outf: smf_outf.write(smf_out_str + '\n') logger.info('smallfile result: %s' % smf_out_path) - monitoring.start_pbench(self.out_dir) + monitoring.stop_pbench(self.out_dir) monitoring.stop(self.run_dir) From 4f008b82ebe7747a5fa96236898192d4276f4b36 Mon Sep 17 00:00:00 2001 From: Alexander Calhoun Date: Tue, 28 May 2019 15:40:40 -0400 Subject: [PATCH 46/74] changed any_client to head iot reterive rsptime csv files --- benchmark/smallfile.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/benchmark/smallfile.py b/benchmark/smallfile.py index 575af4d4..82acd966 100644 --- a/benchmark/smallfile.py +++ b/benchmark/smallfile.py @@ -139,7 +139,7 @@ def run(self): # save response times rsptimes_target_dir = os.path.join(self.out_dir, 'rsptimes') common.mkdir_p(rsptimes_target_dir) - common.rpdcp(self.any_client, '', + common.rpdcp(self.head, '', os.path.join(os.path.join(topdir, 'network_shared'), 'rsptimes*csv'), rsptimes_target_dir) From 02da502f065ad3bbf67b898deb0bfd2c0bf556b9 Mon Sep 17 00:00:00 2001 From: Alexander Calhoun Date: Tue, 11 Jun 2019 09:38:25 -0400 Subject: [PATCH 47/74] update except "error" changed , to as --- settings.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/settings.py b/settings.py index 52ee70e3..83659cee 100644 --- a/settings.py +++ b/settings.py @@ -18,7 +18,7 @@ def initialize(ctx): try: with file(ctx.config_file) as f: map(config.update, yaml.safe_load_all(f)) - except IOError, e: + except IOError as e: raise argparse.ArgumentTypeError(str(e)) cluster = config.get('cluster', {}) From f10a3ac4e8c774674dbfce322cc371278d63a3fe Mon Sep 17 00:00:00 2001 From: Alexander Calhoun Date: Tue, 11 Jun 2019 10:03:17 -0400 Subject: [PATCH 48/74] corrected print statements added () to each print statement --- common.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/common.py b/common.py index dd10e43b..c6e64474 100644 --- a/common.py +++ b/common.py @@ -118,7 +118,7 @@ def get_fqdn_cmd(): def get_fqdn_list(nodes): stdout, stderr = pdsh(settings.getnodes(nodes), '%s' % get_fqdn_cmd()).communicate() - print stdout + print (stdout) ret = [] for line in stdout.splitlines(): @@ -126,11 +126,11 @@ def get_fqdn_list(nodes): ret.append(line) #ret = [i.split(' ', 1)[1] for i in stdout.splitlines()] - print ret + print (ret) return ret def clean_remote_dir (remote_dir): - print "cleaning remote dir %s" % remote_dir + print ("cleaning remote dir %s" % remote_dir) if remote_dir == "/" or not os.path.isabs(remote_dir): raise SystemExit("Cleaning the remote dir doesn't seem safe, bailing.") From 7d9d57e01b0767f2bcf3e95a31603c28e4d99513 Mon Sep 17 00:00:00 2001 From: Alexander Calhoun Date: Tue, 11 Jun 2019 10:07:52 -0400 Subject: [PATCH 49/74] added explicit import from benchmark file --- benchmark/smallfile.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/benchmark/smallfile.py b/benchmark/smallfile.py index 82acd966..a5222d78 100644 --- a/benchmark/smallfile.py +++ b/benchmark/smallfile.py @@ -23,7 +23,7 @@ import json import subprocess -from benchmark import Benchmark +from benchmark.benchmark import Benchmark logger = logging.getLogger("cbt") From 864d879015175e1d1789045d629b7266af21a882 Mon Sep 17 00:00:00 2001 From: Alexander Calhoun Date: Tue, 11 Jun 2019 10:12:17 -0400 Subject: [PATCH 50/74] corrected taberror on line 549 removed 1 tab on line 549 --- cluster/ceph.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/cluster/ceph.py b/cluster/ceph.py index 01390667..8b27b127 100644 --- a/cluster/ceph.py +++ b/cluster/ceph.py @@ -547,7 +547,7 @@ def make_profiles(self): k = profile.get('erasure_k', 6) m = profile.get('erasure_m', 2) common.pdsh(settings.getnodes('head'), '%s -c %s osd erasure-code-profile set %s crush-failure-domain=osd k=%s m=%s' % (self.ceph_cmd, self.tmp_conf, name, k, m)).communicate() - self.set_ruleset(name) + self.set_ruleset(name) def mkpool(self, name, profile_name, application, base_name=None): pool_profiles = self.config.get('pool_profiles', {'default': {}}) @@ -570,7 +570,7 @@ def mkpool(self, name, profile_name, application, base_name=None): target_max_bytes = profile.get('target_max_bytes', None) min_read_recency_for_promote = profile.get('min_read_recency_for_promote', None) min_write_recency_for_promote = profile.get('min_write_recency_for_promote', None) - # Options for prefilling objects + # Options for prefilling obImportError: cannot import name ''jects prefill_objects = profile.get('prefill_objects', 0) prefill_object_size = profile.get('prefill_object_size', 0) prefill_time = profile.get('prefill_time', 0) From a5dd3ec9226fa5c0688411ff509bb6afeece1a4b Mon Sep 17 00:00:00 2001 From: Alexander Calhoun Date: Tue, 11 Jun 2019 10:14:39 -0400 Subject: [PATCH 51/74] resolved TabError: inconsistent use of tabs and spaces in indentation correction made on line 549 of ceph.py --- cluster/ceph.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cluster/ceph.py b/cluster/ceph.py index 8b27b127..b3a88759 100644 --- a/cluster/ceph.py +++ b/cluster/ceph.py @@ -546,7 +546,7 @@ def make_profiles(self): for name,profile in erasure_profiles.items(): k = profile.get('erasure_k', 6) m = profile.get('erasure_m', 2) - common.pdsh(settings.getnodes('head'), '%s -c %s osd erasure-code-profile set %s crush-failure-domain=osd k=%s m=%s' % (self.ceph_cmd, self.tmp_conf, name, k, m)).communicate() + common.pdsh(settings.getnodes('head'), '%s -c %s osd erasure-code-profile set %s crush-failure-domain=osd k=%s m=%s' % (self.ceph_cmd, self.tmp_conf, name, k, m)).communicate() self.set_ruleset(name) def mkpool(self, name, profile_name, application, base_name=None): From c343f327242c3dae0aa1dfe60bfc3d9fff1d151f Mon Sep 17 00:00:00 2001 From: Alexander Calhoun Date: Tue, 11 Jun 2019 10:17:33 -0400 Subject: [PATCH 52/74] specific cluster in import comand for Cluster --- cluster/ceph.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cluster/ceph.py b/cluster/ceph.py index b3a88759..5e3896e9 100644 --- a/cluster/ceph.py +++ b/cluster/ceph.py @@ -8,7 +8,7 @@ import threading import logging -from cluster import Cluster +from cluster.cluster import Cluster logger = logging.getLogger("cbt") From efdeebaab3cc1ab4635fd7f5bb8097d60b48a7a1 Mon Sep 17 00:00:00 2001 From: Alexander Calhoun Date: Tue, 11 Jun 2019 10:20:12 -0400 Subject: [PATCH 53/74] made updates to resovle import error on relative modules --- benchmark/cephtestrados.py | 4 ++-- benchmark/cosbench.py | 2 +- benchmark/getput.py | 2 +- benchmark/kvmrbdfio.py | 2 +- benchmark/librbdfio.py | 2 +- benchmark/nullbench.py | 2 +- benchmark/radosbench.py | 2 +- benchmark/rawfio.py | 2 +- benchmark/rbdfio.py | 2 +- 9 files changed, 10 insertions(+), 10 deletions(-) diff --git a/benchmark/cephtestrados.py b/benchmark/cephtestrados.py index 88c46496..24a29eab 100644 --- a/benchmark/cephtestrados.py +++ b/benchmark/cephtestrados.py @@ -10,7 +10,7 @@ logger = logging.getLogger('cbt') from cluster.ceph import Ceph -from benchmark import Benchmark +from benchmark.benchmark import Benchmark class CephTestRados(Benchmark): @@ -59,7 +59,7 @@ def addweight(self, weight): def exists(self): if os.path.exists(self.out_dir): - print 'Skipping existing test in %s.' % self.out_dir + print ('Skipping existing test in %s.' % self.out_dir) return True return False diff --git a/benchmark/cosbench.py b/benchmark/cosbench.py index 42b94284..7e8734ca 100644 --- a/benchmark/cosbench.py +++ b/benchmark/cosbench.py @@ -11,7 +11,7 @@ import logging from cluster.ceph import Ceph -from benchmark import Benchmark +from benchmark.benchmark import Benchmark logger = logging.getLogger("cbt") diff --git a/benchmark/getput.py b/benchmark/getput.py index e8a51b86..ec7b08d0 100644 --- a/benchmark/getput.py +++ b/benchmark/getput.py @@ -9,7 +9,7 @@ import re from cluster.ceph import Ceph -from benchmark import Benchmark +from benchmark.benchmark import Benchmark logger = logging.getLogger("cbt") diff --git a/benchmark/kvmrbdfio.py b/benchmark/kvmrbdfio.py index 829f9b2d..c7724216 100644 --- a/benchmark/kvmrbdfio.py +++ b/benchmark/kvmrbdfio.py @@ -7,7 +7,7 @@ import string import logging -from benchmark import Benchmark +from benchmark.benchmark import Benchmark logger = logging.getLogger("cbt") diff --git a/benchmark/librbdfio.py b/benchmark/librbdfio.py index 9e2aabaf..95d7dcf5 100644 --- a/benchmark/librbdfio.py +++ b/benchmark/librbdfio.py @@ -10,7 +10,7 @@ import socket from cluster.ceph import Ceph -from benchmark import Benchmark +from benchmark.benchmark import Benchmark logger = logging.getLogger("cbt") diff --git a/benchmark/nullbench.py b/benchmark/nullbench.py index 083f8ee4..b366bbfd 100644 --- a/benchmark/nullbench.py +++ b/benchmark/nullbench.py @@ -5,7 +5,7 @@ import os from cluster.ceph import Ceph -from benchmark import Benchmark +from benchmark.benchmark import Benchmark class Nullbench(Benchmark): diff --git a/benchmark/radosbench.py b/benchmark/radosbench.py index 505c2d54..9d071462 100644 --- a/benchmark/radosbench.py +++ b/benchmark/radosbench.py @@ -10,7 +10,7 @@ import json from cluster.ceph import Ceph -from benchmark import Benchmark +from benchmark.benchmark import Benchmark logger = logging.getLogger("cbt") diff --git a/benchmark/rawfio.py b/benchmark/rawfio.py index c4387a2b..e0286b57 100644 --- a/benchmark/rawfio.py +++ b/benchmark/rawfio.py @@ -7,7 +7,7 @@ import string import logging -from benchmark import Benchmark +from benchmark.benchmark import Benchmark logger = logging.getLogger("cbt") diff --git a/benchmark/rbdfio.py b/benchmark/rbdfio.py index 5140820f..0fdb8167 100644 --- a/benchmark/rbdfio.py +++ b/benchmark/rbdfio.py @@ -6,7 +6,7 @@ import time import logging -from benchmark import Benchmark +from benchmark.benchmark import Benchmark logger = logging.getLogger("cbt") From 3d23dee1b72f10d0c723d00d09d6062063314c93 Mon Sep 17 00:00:00 2001 From: Alexander Calhoun Date: Tue, 11 Jun 2019 10:21:33 -0400 Subject: [PATCH 54/74] corrected tab error on line 53 --- benchmark/librbdfio.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/benchmark/librbdfio.py b/benchmark/librbdfio.py index 95d7dcf5..4e445217 100644 --- a/benchmark/librbdfio.py +++ b/benchmark/librbdfio.py @@ -50,7 +50,7 @@ def __init__(self, cluster, config): self.pool_name = config.get("poolname", "cbt-librbdfio") self.rbdname = config.get('rbdname', '') - self.total_procs = self.procs_per_volume * self.volumes_per_client * len(settings.getnodes('clients').split(',')) + self.total_procs = self.procs_per_volume * self.volumes_per_client * len(settings.getnodes('clients').split(',')) self.run_dir = '%s/osd_ra-%08d/op_size-%08d/concurrent_procs-%03d/iodepth-%03d/%s' % (self.run_dir, int(self.osd_ra), int(self.op_size), int(self.total_procs), int(self.iodepth), self.mode) self.out_dir = self.archive_dir From 959a72430b336d77dc0ccd0e4516b800818ec7c6 Mon Sep 17 00:00:00 2001 From: Alexander Calhoun Date: Tue, 11 Jun 2019 10:26:18 -0400 Subject: [PATCH 55/74] changed file() to open() on line 19. --- settings.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/settings.py b/settings.py index 83659cee..a1085613 100644 --- a/settings.py +++ b/settings.py @@ -16,7 +16,7 @@ def initialize(ctx): config = {} try: - with file(ctx.config_file) as f: + with open(ctx.config_file) as f: map(config.update, yaml.safe_load_all(f)) except IOError as e: raise argparse.ArgumentTypeError(str(e)) From b29fc39f2243a0f1648d2304a3d3df48bf67392a Mon Sep 17 00:00:00 2001 From: Alexander Calhoun Date: Tue, 11 Jun 2019 10:27:25 -0400 Subject: [PATCH 56/74] changed back to file but added io.file() --- settings.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/settings.py b/settings.py index a1085613..740a4e48 100644 --- a/settings.py +++ b/settings.py @@ -3,6 +3,7 @@ import sys import os import logging +import io logger = logging.getLogger("cbt") @@ -16,7 +17,7 @@ def initialize(ctx): config = {} try: - with open(ctx.config_file) as f: + with io.file(ctx.config_file) as f: map(config.update, yaml.safe_load_all(f)) except IOError as e: raise argparse.ArgumentTypeError(str(e)) From dc582f14ad3a4e6b416483b7b294aa25c7a798ba Mon Sep 17 00:00:00 2001 From: Alexander Calhoun Date: Tue, 11 Jun 2019 10:29:22 -0400 Subject: [PATCH 57/74] revert back to open --- settings.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/settings.py b/settings.py index 740a4e48..50a732a6 100644 --- a/settings.py +++ b/settings.py @@ -17,7 +17,7 @@ def initialize(ctx): config = {} try: - with io.file(ctx.config_file) as f: + with open(ctx.config_file) as f: map(config.update, yaml.safe_load_all(f)) except IOError as e: raise argparse.ArgumentTypeError(str(e)) From d5f9ed1f8b61963c6d96161768dadddb1a4b1e8d Mon Sep 17 00:00:00 2001 From: Alexander Calhoun Date: Tue, 11 Jun 2019 10:36:39 -0400 Subject: [PATCH 58/74] added .read to open --- settings.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/settings.py b/settings.py index 50a732a6..32737bea 100644 --- a/settings.py +++ b/settings.py @@ -17,7 +17,7 @@ def initialize(ctx): config = {} try: - with open(ctx.config_file) as f: + with open(ctx.config_file).read() as f: map(config.update, yaml.safe_load_all(f)) except IOError as e: raise argparse.ArgumentTypeError(str(e)) From e88b4759cd393671d08d5e3005474caaf738820a Mon Sep 17 00:00:00 2001 From: Alexander Calhoun Date: Tue, 11 Jun 2019 10:40:52 -0400 Subject: [PATCH 59/74] updated the way cbt reads in the configfile into a dict (config) --- settings.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/settings.py b/settings.py index 32737bea..1f764a02 100644 --- a/settings.py +++ b/settings.py @@ -17,7 +17,8 @@ def initialize(ctx): config = {} try: - with open(ctx.config_file).read() as f: + file = open(ctx.config_file) + with file.read() as f: map(config.update, yaml.safe_load_all(f)) except IOError as e: raise argparse.ArgumentTypeError(str(e)) From 2ea85c1d5fb178f13959039766262e0de523df16 Mon Sep 17 00:00:00 2001 From: Alexander Calhoun Date: Tue, 11 Jun 2019 10:42:07 -0400 Subject: [PATCH 60/74] updated init to map configfile with config dict --- settings.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/settings.py b/settings.py index 1f764a02..466f30e3 100644 --- a/settings.py +++ b/settings.py @@ -18,8 +18,8 @@ def initialize(ctx): config = {} try: file = open(ctx.config_file) - with file.read() as f: - map(config.update, yaml.safe_load_all(f)) + #with file.read() as f: + map(config.update, yaml.safe_load_all(file.read())) except IOError as e: raise argparse.ArgumentTypeError(str(e)) From 1da2a588eab97264ef7a2040108185011607f63a Mon Sep 17 00:00:00 2001 From: Alexander Calhoun Date: Tue, 11 Jun 2019 10:42:59 -0400 Subject: [PATCH 61/74] added debug print to print out config --- settings.py | 1 + 1 file changed, 1 insertion(+) diff --git a/settings.py b/settings.py index 466f30e3..3145fc21 100644 --- a/settings.py +++ b/settings.py @@ -20,6 +20,7 @@ def initialize(ctx): file = open(ctx.config_file) #with file.read() as f: map(config.update, yaml.safe_load_all(file.read())) + print(config) except IOError as e: raise argparse.ArgumentTypeError(str(e)) From 4d9ab44e1fb4e9cddeef6a7af51d93770dde21fd Mon Sep 17 00:00:00 2001 From: Alexander Calhoun Date: Tue, 11 Jun 2019 10:46:55 -0400 Subject: [PATCH 62/74] updated to load yaml file using open --- settings.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/settings.py b/settings.py index 3145fc21..96c9f0de 100644 --- a/settings.py +++ b/settings.py @@ -18,8 +18,8 @@ def initialize(ctx): config = {} try: file = open(ctx.config_file) - #with file.read() as f: - map(config.update, yaml.safe_load_all(file.read())) + with open(ctx.config_file, 'r') as f: + map(config.update, yaml.safe_load(f)) print(config) except IOError as e: raise argparse.ArgumentTypeError(str(e)) From e917fce217c51af4e0acf03f69df0de556dca74f Mon Sep 17 00:00:00 2001 From: Alexander Calhoun Date: Tue, 11 Jun 2019 10:50:29 -0400 Subject: [PATCH 63/74] removed map() when loading config file --- settings.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/settings.py b/settings.py index 96c9f0de..a031c90d 100644 --- a/settings.py +++ b/settings.py @@ -19,7 +19,8 @@ def initialize(ctx): try: file = open(ctx.config_file) with open(ctx.config_file, 'r') as f: - map(config.update, yaml.safe_load(f)) + #map(config.update, yaml.safe_load(f)) + config.update(yaml.safe_load(f)) print(config) except IOError as e: raise argparse.ArgumentTypeError(str(e)) From 34815a7b4f0b8ad0587301a0192be08024ed5bcc Mon Sep 17 00:00:00 2001 From: Alexander Calhoun Date: Tue, 11 Jun 2019 10:54:12 -0400 Subject: [PATCH 64/74] updated to load highlevel dict loaded from yaml --- settings.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/settings.py b/settings.py index a031c90d..d343bf2b 100644 --- a/settings.py +++ b/settings.py @@ -19,8 +19,9 @@ def initialize(ctx): try: file = open(ctx.config_file) with open(ctx.config_file, 'r') as f: - #map(config.update, yaml.safe_load(f)) - config.update(yaml.safe_load(f)) + #map(config.update, + for kv in yaml.safe_load(f)): + config.update(kv) print(config) except IOError as e: raise argparse.ArgumentTypeError(str(e)) From 73d797d4528af45fcf6cb03282d72eea5956a9fc Mon Sep 17 00:00:00 2001 From: Alexander Calhoun Date: Tue, 11 Jun 2019 10:55:00 -0400 Subject: [PATCH 65/74] removed typo and corrected tap on line 24 --- settings.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/settings.py b/settings.py index d343bf2b..3e9011df 100644 --- a/settings.py +++ b/settings.py @@ -20,8 +20,8 @@ def initialize(ctx): file = open(ctx.config_file) with open(ctx.config_file, 'r') as f: #map(config.update, - for kv in yaml.safe_load(f)): - config.update(kv) + for kv in yaml.safe_load(f): + config.update(kv) print(config) except IOError as e: raise argparse.ArgumentTypeError(str(e)) From 32ed3a68526759563aa71c72a32d74e0a0ea20a0 Mon Sep 17 00:00:00 2001 From: Alexander Calhoun Date: Tue, 11 Jun 2019 10:58:24 -0400 Subject: [PATCH 66/74] corrected way dict update is being performed to retain struct --- settings.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/settings.py b/settings.py index 3e9011df..97c1e2b2 100644 --- a/settings.py +++ b/settings.py @@ -20,8 +20,9 @@ def initialize(ctx): file = open(ctx.config_file) with open(ctx.config_file, 'r') as f: #map(config.update, - for kv in yaml.safe_load(f): - config.update(kv) + dict = yaml.safe_load(f) + for k in dict: + config.update(dict[k]) print(config) except IOError as e: raise argparse.ArgumentTypeError(str(e)) From 5c9c4eba6f6d353af53c8c8be154a48b812c27ed Mon Sep 17 00:00:00 2001 From: Alexander Calhoun Date: Tue, 11 Jun 2019 11:00:01 -0400 Subject: [PATCH 67/74] removed loop for each k in dict, saving full dict into config --- settings.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/settings.py b/settings.py index 97c1e2b2..ef066031 100644 --- a/settings.py +++ b/settings.py @@ -21,8 +21,7 @@ def initialize(ctx): with open(ctx.config_file, 'r') as f: #map(config.update, dict = yaml.safe_load(f) - for k in dict: - config.update(dict[k]) + config.update(dict) print(config) except IOError as e: raise argparse.ArgumentTypeError(str(e)) From 4ab8b81d02957fcf500b62e6aca7f2e46ff35432 Mon Sep 17 00:00:00 2001 From: Alexander Calhoun Date: Tue, 11 Jun 2019 11:04:18 -0400 Subject: [PATCH 68/74] cleaning up how configuration file is loaded. --- settings.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/settings.py b/settings.py index ef066031..07be9dd4 100644 --- a/settings.py +++ b/settings.py @@ -19,10 +19,7 @@ def initialize(ctx): try: file = open(ctx.config_file) with open(ctx.config_file, 'r') as f: - #map(config.update, - dict = yaml.safe_load(f) - config.update(dict) - print(config) + config.update(yaml.safe_load(f)) except IOError as e: raise argparse.ArgumentTypeError(str(e)) From e36f897581ac6187b7256dcaf03906f7277ef2ca Mon Sep 17 00:00:00 2001 From: Alexander Calhoun Date: Tue, 11 Jun 2019 11:05:25 -0400 Subject: [PATCH 69/74] changed iteritems() to items(), iteritems is removed in python3 --- benchmarkfactory.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/benchmarkfactory.py b/benchmarkfactory.py index aaa300fc..f7a35655 100644 --- a/benchmarkfactory.py +++ b/benchmarkfactory.py @@ -14,7 +14,7 @@ from benchmark.getput import Getput def get_all(cluster, iteration): - for benchmark, config in sorted(settings.benchmarks.iteritems()): + for benchmark, config in sorted(settings.benchmarks.items()): default = {"benchmark": benchmark, "iteration": iteration} for current in all_configs(config): @@ -32,7 +32,7 @@ def all_configs(config): cycle_over_names = [] default = {} - for param, value in config.iteritems(): + for param, value in config.items(): if isinstance(value, list): cycle_over_lists.append(value) cycle_over_names.append(param) From 7be67be2be1b87cf7cd4408220969e0f2c6a87d8 Mon Sep 17 00:00:00 2001 From: Alexander Calhoun Date: Tue, 11 Jun 2019 11:07:30 -0400 Subject: [PATCH 70/74] changed xrange to range, xrange has been removed in python 3 --- benchmark/librbdfio.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/benchmark/librbdfio.py b/benchmark/librbdfio.py index 4e445217..e6f6f5e8 100644 --- a/benchmark/librbdfio.py +++ b/benchmark/librbdfio.py @@ -57,7 +57,7 @@ def __init__(self, cluster, config): self.norandommap = config.get("norandommap", False) # Make the file names string (repeated across volumes) self.names = '' - for proc_num in xrange(self.procs_per_volume): + for proc_num in range(self.procs_per_volume): rbd_name = 'cbt-librbdfio-`%s`-file-%d' % (common.get_fqdn_cmd(), proc_num) self.names += '--name=%s ' % rbd_name @@ -88,7 +88,7 @@ def initialize(self): ps = [] logger.info('Attempting to populating fio files...') if (self.use_existing_volumes == False): - for volnum in xrange(self.volumes_per_client): + for volnum in range(self.volumes_per_client): rbd_name = 'cbt-librbdfio-`%s`-%d' % (common.get_fqdn_cmd(), volnum) pre_cmd = 'sudo %s --ioengine=rbd --clientname=admin --pool=%s --rbdname=%s --invalidate=0 --rw=write --numjobs=%s --bs=4M --size %dM %s --output-format=%s > /dev/null' % (self.cmd_path, self.pool_name, rbd_name, self.numjobs, self.vol_size, self.names, self.fio_out_format) p = common.pdsh(settings.getnodes('clients'), pre_cmd) @@ -118,7 +118,7 @@ def run(self): logger.info('Running rbd fio %s test.', self.mode) ps = [] - for i in xrange(self.volumes_per_client): + for i in range(self.volumes_per_client): fio_cmd = self.mkfiocmd(i) p = common.pdsh(settings.getnodes('clients'), fio_cmd) ps.append(p) @@ -195,7 +195,7 @@ def mkimages(self): self.cluster.rmpool(self.data_pool, self.data_pool_profile) self.cluster.mkpool(self.data_pool, self.data_pool_profile, 'rbd') for node in common.get_fqdn_list('clients'): - for volnum in xrange(0, self.volumes_per_client): + for volnum in range(0, self.volumes_per_client): node = node.rpartition("@")[2] self.cluster.mkimage('cbt-librbdfio-%s-%d' % (node,volnum), self.vol_size, self.pool_name, self.data_pool, self.vol_object_size) monitoring.stop() @@ -211,7 +211,7 @@ def parse(self, out_dir): except: pass - for i in xrange(self.volumes_per_client): + for i in range(self.volumes_per_client): found = 0 out_file = '%s/output.%d.%s' % (out_dir, i, client) json_out_file = '%s/json_output.%d.%s' % (out_dir, i, client) From 90af4f7313f0e18895110fb5c4b26fbaea84ee40 Mon Sep 17 00:00:00 2001 From: Alexander Calhoun Date: Tue, 11 Jun 2019 11:14:42 -0400 Subject: [PATCH 71/74] decoded stdout in order to turn into a str and eval against check_list --- cluster/ceph.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cluster/ceph.py b/cluster/ceph.py index 5e3896e9..c1ec187e 100644 --- a/cluster/ceph.py +++ b/cluster/ceph.py @@ -464,6 +464,7 @@ def check_health(self, check_list=None, logfile=None): check_list = ["degraded", "peering", "recovery_wait", "stuck", "inactive", "unclean", "recovery", "stale"] while True: stdout, stderr = common.pdsh(settings.getnodes('head'), '%s -c %s health %s' % (self.ceph_cmd, self.tmp_conf, logline)).communicate() + stdout.decode if check_list and not any(x in stdout for x in check_list): break if "HEALTH_OK" in stdout: From a9f6e19654cb1c437782b21a850c7e87c7535bb4 Mon Sep 17 00:00:00 2001 From: Alexander Calhoun Date: Tue, 11 Jun 2019 11:15:07 -0400 Subject: [PATCH 72/74] saved stdout to stdout.decode --- cluster/ceph.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cluster/ceph.py b/cluster/ceph.py index c1ec187e..eba791d9 100644 --- a/cluster/ceph.py +++ b/cluster/ceph.py @@ -464,7 +464,7 @@ def check_health(self, check_list=None, logfile=None): check_list = ["degraded", "peering", "recovery_wait", "stuck", "inactive", "unclean", "recovery", "stale"] while True: stdout, stderr = common.pdsh(settings.getnodes('head'), '%s -c %s health %s' % (self.ceph_cmd, self.tmp_conf, logline)).communicate() - stdout.decode + stdout = stdout.decode if check_list and not any(x in stdout for x in check_list): break if "HEALTH_OK" in stdout: From 0cd999e80bcdd6294f79c3d9f9ae3447c132c23b Mon Sep 17 00:00:00 2001 From: Alexander Calhoun Date: Tue, 11 Jun 2019 11:18:30 -0400 Subject: [PATCH 73/74] corrected line 467 to decode stdout and save return value as stdout --- cluster/ceph.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cluster/ceph.py b/cluster/ceph.py index eba791d9..157d5873 100644 --- a/cluster/ceph.py +++ b/cluster/ceph.py @@ -464,7 +464,7 @@ def check_health(self, check_list=None, logfile=None): check_list = ["degraded", "peering", "recovery_wait", "stuck", "inactive", "unclean", "recovery", "stale"] while True: stdout, stderr = common.pdsh(settings.getnodes('head'), '%s -c %s health %s' % (self.ceph_cmd, self.tmp_conf, logline)).communicate() - stdout = stdout.decode + stdout = stdout.decode() if check_list and not any(x in stdout for x in check_list): break if "HEALTH_OK" in stdout: From 5d840a511e12f6fec82026aa21f64efe94e444e8 Mon Sep 17 00:00:00 2001 From: Alexander Calhoun Date: Tue, 11 Jun 2019 11:29:41 -0400 Subject: [PATCH 74/74] updated to save stdout as str with the use of decode --- common.py | 1 + 1 file changed, 1 insertion(+) diff --git a/common.py b/common.py index c6e64474..9bb752a8 100644 --- a/common.py +++ b/common.py @@ -118,6 +118,7 @@ def get_fqdn_cmd(): def get_fqdn_list(nodes): stdout, stderr = pdsh(settings.getnodes(nodes), '%s' % get_fqdn_cmd()).communicate() + stdout = stdout.decode() print (stdout) ret = []