Skip to content

Commit

Permalink
Merge tag 'v13.0.1'
Browse files Browse the repository at this point in the history
  • Loading branch information
liewegas committed Jan 3, 2018
2 parents a5fd0e7 + 9cce242 commit 819a357
Show file tree
Hide file tree
Showing 7 changed files with 16 additions and 20 deletions.
2 changes: 1 addition & 1 deletion CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
cmake_minimum_required(VERSION 2.8.12)

project(ceph CXX C ASM)
set(VERSION 12.1.2)
set(VERSION 13.0.1)

if(POLICY CMP0046)
# Tweak policies (this one disables "missing" dependency warning)
Expand Down
6 changes: 0 additions & 6 deletions qa/tasks/cephfs/cephfs_test_case.py
Original file line number Diff line number Diff line change
Expand Up @@ -111,12 +111,6 @@ def setUp(self):
self.fs = None # is now invalid!
self.recovery_fs = None

# In case the previous filesystem had filled up the RADOS cluster, wait for that
# flag to pass.
osd_mon_report_interval_max = int(self.mds_cluster.get_config("osd_mon_report_interval_max", service_type='osd'))
self.wait_until_true(lambda: not self.mds_cluster.is_full(),
timeout=osd_mon_report_interval_max * 5)

# In case anything is in the OSD blacklist list, clear it out. This is to avoid
# the OSD map changing in the background (due to blacklist expiry) while tests run.
try:
Expand Down
7 changes: 3 additions & 4 deletions qa/tasks/cephfs/filesystem.py
Original file line number Diff line number Diff line change
Expand Up @@ -365,10 +365,6 @@ def clear_firewall(self):
def get_mds_info(self, mds_id):
return FSStatus(self.mon_manager).get_mds(mds_id)

def is_full(self):
flags = json.loads(self.mon_manager.raw_cluster_cmd("osd", "dump", "--format=json-pretty"))['flags']
return 'full' in flags

def is_pool_full(self, pool_name):
pools = json.loads(self.mon_manager.raw_cluster_cmd("osd", "dump", "--format=json-pretty"))['pools']
for pool in pools:
Expand Down Expand Up @@ -1232,3 +1228,6 @@ def data_scan(self, args, quiet=False, worker_count=1):
return workers[0].value
else:
return None

def is_full(self):
return self.is_pool_full(self.get_data_pool_name())
4 changes: 2 additions & 2 deletions qa/tasks/cephfs/test_full.py
Original file line number Diff line number Diff line change
Expand Up @@ -387,12 +387,12 @@ def setUp(self):
"max_bytes", "{0}".format(self.pool_capacity))

def is_full(self):
return self.fs.is_pool_full(self.fs.get_data_pool_name())
return self.fs.is_full()


class TestClusterFull(FullnessTestCase):
"""
Test cluster-wide fullness, which indicates that an OSD has become too full
Test data pool fullness, which indicates that an OSD has become too full
"""
pool_capacity = None
REQUIRE_MEMSTORE = True
Expand Down
12 changes: 6 additions & 6 deletions src/mds/MDSRank.cc
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ MDSRank::MDSRank(
Context *suicide_hook_)
:
whoami(whoami_), incarnation(0),
mds_lock(mds_lock_), clog(clog_), timer(timer_),
mds_lock(mds_lock_), cct(msgr->cct), clog(clog_), timer(timer_),
mdsmap(mdsmap_),
objecter(new Objecter(g_ceph_context, msgr, monc_, nullptr, 0, 0)),
server(NULL), mdcache(NULL), locker(NULL), mdlog(NULL),
Expand Down Expand Up @@ -97,7 +97,7 @@ MDSRank::MDSRank(

objecter->unset_honor_osdmap_full();

finisher = new Finisher(msgr->cct);
finisher = new Finisher(cct);

mdcache = new MDCache(this, purge_queue);
mdlog = new MDLog(this);
Expand All @@ -112,10 +112,10 @@ MDSRank::MDSRank(
server = new Server(this);
locker = new Locker(this, mdcache);

op_tracker.set_complaint_and_threshold(msgr->cct->_conf->mds_op_complaint_time,
msgr->cct->_conf->mds_op_log_threshold);
op_tracker.set_history_size_and_duration(msgr->cct->_conf->mds_op_history_size,
msgr->cct->_conf->mds_op_history_duration);
op_tracker.set_complaint_and_threshold(cct->_conf->mds_op_complaint_time,
cct->_conf->mds_op_log_threshold);
op_tracker.set_history_size_and_duration(cct->_conf->mds_op_history_size,
cct->_conf->mds_op_history_duration);
}

MDSRank::~MDSRank()
Expand Down
2 changes: 2 additions & 0 deletions src/mds/MDSRank.h
Original file line number Diff line number Diff line change
Expand Up @@ -136,6 +136,8 @@ class MDSRank {
// a separate lock here in future potentially.
Mutex &mds_lock;

class CephContext *cct;

bool is_daemon_stopping() const;

// Reference to global cluster log client, just to avoid initialising
Expand Down
3 changes: 2 additions & 1 deletion src/mds/Server.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1739,7 +1739,8 @@ void Server::handle_osd_map()
* using osdmap_full_flag(), because we want to know "is the flag set"
* rather than "does the flag apply to us?" */
mds->objecter->with_osdmap([this](const OSDMap& o) {
is_full = o.test_flag(CEPH_OSDMAP_FULL);
auto pi = o.get_pg_pool(mds->mdsmap->get_metadata_pool());
is_full = pi && pi->has_flag(pg_pool_t::FLAG_FULL);
dout(7) << __func__ << ": full = " << is_full << " epoch = "
<< o.get_epoch() << dendl;
});
Expand Down

0 comments on commit 819a357

Please sign in to comment.