diff --git a/benchmark/radosbench.py b/benchmark/radosbench.py index 15128771..62023a5b 100644 --- a/benchmark/radosbench.py +++ b/benchmark/radosbench.py @@ -131,7 +131,7 @@ def _run(self, mode, run_dir, out_dir): run_name = '--run-name %s`%s`-%s'%(self.object_set_id, common.get_fqdn_cmd(), i) if self.pool_per_proc: # support previous behavior of 1 storage pool per rados process - pool_name = 'rados-bench-``-%s'% (common.get_fqdn_cmd(), i) + pool_name = 'rados-bench-`%s`-%s'% (common.get_fqdn_cmd(), i) run_name = '' rados_bench_cmd = '%s -c %s -p %s bench %s %s %s %s %s %s %s --no-cleanup 2> %s > %s' % \ (self.cmd_path_full, self.tmp_conf, pool_name, op_size_str, self.time, mode, concurrent_ops_str, max_objects_str, write_omap_str, run_name, objecter_log, out_file) diff --git a/cluster/ceph.py b/cluster/ceph.py index 9e988224..09811c18 100644 --- a/cluster/ceph.py +++ b/cluster/ceph.py @@ -435,7 +435,7 @@ def check_health(self, check_list=None, logfile=None): # Match any of these things to continue checking health check_list = ["degraded", "peering", "recovery_wait", "stuck", "inactive", "unclean", "recovery", "stale"] while True: - stdout, stderr = common.pdsh(settings.getnodes('head'), '%s -c %s health %s' % (self.ceph_cmd, self.tmp_conf, logline)).communicate() + stdout, stderr = common.pdsh(settings.getnodes('head'), '%s -c %s status %s' % (self.ceph_cmd, self.tmp_conf, logline)).communicate() if check_list and not any(x in stdout for x in check_list): break if "HEALTH_OK" in stdout: