From 6e046dfc90e0a119ceb13935dc6d15efb2845184 Mon Sep 17 00:00:00 2001 From: Patrick Donnelly Date: Mon, 18 Dec 2017 21:29:11 -0800 Subject: [PATCH] qa: check pool full flags Cluster-wide flag removed in b4ca5ae462c6f12ca48b787529938862646282cd. Fixes: http://tracker.ceph.com/issues/22475 Signed-off-by: Patrick Donnelly --- qa/tasks/cephfs/cephfs_test_case.py | 6 ------ qa/tasks/cephfs/filesystem.py | 7 +++---- qa/tasks/cephfs/test_full.py | 4 ++-- 3 files changed, 5 insertions(+), 12 deletions(-) diff --git a/qa/tasks/cephfs/cephfs_test_case.py b/qa/tasks/cephfs/cephfs_test_case.py index e6261ecccf294..acf1fdcfa1f0f 100644 --- a/qa/tasks/cephfs/cephfs_test_case.py +++ b/qa/tasks/cephfs/cephfs_test_case.py @@ -111,12 +111,6 @@ def setUp(self): self.fs = None # is now invalid! self.recovery_fs = None - # In case the previous filesystem had filled up the RADOS cluster, wait for that - # flag to pass. - osd_mon_report_interval_max = int(self.mds_cluster.get_config("osd_mon_report_interval_max", service_type='osd')) - self.wait_until_true(lambda: not self.mds_cluster.is_full(), - timeout=osd_mon_report_interval_max * 5) - # In case anything is in the OSD blacklist list, clear it out. This is to avoid # the OSD map changing in the background (due to blacklist expiry) while tests run. try: diff --git a/qa/tasks/cephfs/filesystem.py b/qa/tasks/cephfs/filesystem.py index 6dfaa8712a18b..b372004490d38 100644 --- a/qa/tasks/cephfs/filesystem.py +++ b/qa/tasks/cephfs/filesystem.py @@ -357,10 +357,6 @@ def clear_firewall(self): def get_mds_info(self, mds_id): return FSStatus(self.mon_manager).get_mds(mds_id) - def is_full(self): - flags = json.loads(self.mon_manager.raw_cluster_cmd("osd", "dump", "--format=json-pretty"))['flags'] - return 'full' in flags - def is_pool_full(self, pool_name): pools = json.loads(self.mon_manager.raw_cluster_cmd("osd", "dump", "--format=json-pretty"))['pools'] for pool in pools: @@ -1217,3 +1213,6 @@ def data_scan(self, args, quiet=False, worker_count=1): return workers[0].value else: return None + + def is_full(self): + return self.is_pool_full(self.get_data_pool_name()) diff --git a/qa/tasks/cephfs/test_full.py b/qa/tasks/cephfs/test_full.py index e69ccb373b982..867ac0ef30e1e 100644 --- a/qa/tasks/cephfs/test_full.py +++ b/qa/tasks/cephfs/test_full.py @@ -387,12 +387,12 @@ def setUp(self): "max_bytes", "{0}".format(self.pool_capacity)) def is_full(self): - return self.fs.is_pool_full(self.fs.get_data_pool_name()) + return self.fs.is_full() class TestClusterFull(FullnessTestCase): """ - Test cluster-wide fullness, which indicates that an OSD has become too full + Test data pool fullness, which indicates that an OSD has become too full """ pool_capacity = None REQUIRE_MEMSTORE = True