Skip to content

Commit

Permalink
librbd: fix recursive locking issues
Browse files Browse the repository at this point in the history
Signed-off-by: Jason Dillaman <[email protected]>
  • Loading branch information
Jason Dillaman committed Jun 4, 2015
1 parent d6b733d commit 1b57cc1
Show file tree
Hide file tree
Showing 5 changed files with 48 additions and 48 deletions.
33 changes: 20 additions & 13 deletions src/librbd/AioRequest.cc
Original file line number Diff line number Diff line change
Expand Up @@ -237,20 +237,23 @@ namespace librbd {

void AioRead::send_copyup()
{
RWLock::RLocker snap_locker(m_ictx->snap_lock);
RWLock::RLocker parent_locker(m_ictx->parent_lock);
{
RWLock::RLocker snap_locker(m_ictx->snap_lock);
RWLock::RLocker parent_locker(m_ictx->parent_lock);
if (!compute_parent_extents()) {
return;
}
}

Mutex::Locker copyup_locker(m_ictx->copyup_list_lock);
map<uint64_t, CopyupRequest*>::iterator it =
m_ictx->copyup_list.find(m_object_no);
if (it == m_ictx->copyup_list.end()) {
if (compute_parent_extents()) {
// create and kick off a CopyupRequest
CopyupRequest *new_req = new CopyupRequest(m_ictx, m_oid,
m_object_no,
m_parent_extents);
m_ictx->copyup_list[m_object_no] = new_req;
new_req->queue_send();
}
// create and kick off a CopyupRequest
CopyupRequest *new_req = new CopyupRequest(m_ictx, m_oid, m_object_no,
m_parent_extents);
m_ictx->copyup_list[m_object_no] = new_req;
new_req->queue_send();
}
}

Expand Down Expand Up @@ -321,11 +324,15 @@ namespace librbd {
ldout(m_ictx->cct, 20) << "WRITE_CHECK_GUARD" << dendl;

if (r == -ENOENT) {
RWLock::RLocker l(m_ictx->snap_lock);
RWLock::RLocker l2(m_ictx->parent_lock);
bool has_parent;
{
RWLock::RLocker snap_locker(m_ictx->snap_lock);
RWLock::RLocker parent_locker(m_ictx->parent_lock);
has_parent = compute_parent_extents();
}

// If parent still exists, overlap might also have changed.
if (compute_parent_extents()) {
if (has_parent) {
send_copyup();
} else {
// parent may have disappeared -- send original write again
Expand Down
8 changes: 4 additions & 4 deletions src/librbd/CopyupRequest.cc
Original file line number Diff line number Diff line change
Expand Up @@ -263,16 +263,16 @@ class UpdateObjectMap : public C_AsyncObjectThrottle {

bool CopyupRequest::send_object_map() {
{
RWLock::RLocker l(m_ictx->owner_lock);
RWLock::RLocker owner_locker(m_ictx->owner_lock);
RWLock::RLocker snap_locker(m_ictx->snap_lock);
if (m_ictx->object_map.enabled()) {
if (!m_ictx->image_watcher->is_lock_owner()) {
ldout(m_ictx->cct, 20) << "exclusive lock not held for copyup request"
<< dendl;
ldout(m_ictx->cct, 20) << "exclusive lock not held for copyup request"
<< dendl;
assert(m_pending_requests.empty());
return true;
}

RWLock::RLocker snap_locker(m_ictx->snap_lock);
RWLock::WLocker object_map_locker(m_ictx->object_map_lock);
if (m_ictx->object_map[m_object_no] != OBJECT_EXISTS ||
!m_ictx->snaps.empty()) {
Expand Down
22 changes: 9 additions & 13 deletions src/librbd/ImageCtx.cc
Original file line number Diff line number Diff line change
Expand Up @@ -667,6 +667,8 @@ class ThreadPoolSingleton : public ThreadPool {

void ImageCtx::shutdown_cache() {
flush_async_operations();

RWLock::RLocker owner_locker(owner_lock);
invalidate_cache(true);
object_cacher->stop();
}
Expand Down Expand Up @@ -789,21 +791,15 @@ class ThreadPoolSingleton : public ThreadPool {
}

void ImageCtx::flush_async_operations(Context *on_finish) {
bool complete = false;
{
Mutex::Locker l(async_ops_lock);
if (async_ops.empty()) {
complete = true;
} else {
ldout(cct, 20) << "flush async operations: " << on_finish << " "
<< "count=" << async_ops.size() << dendl;
async_ops.front()->add_flush_context(on_finish);
}
Mutex::Locker l(async_ops_lock);
if (async_ops.empty()) {
op_work_queue->queue(on_finish, 0);
return;
}

if (complete) {
on_finish->complete(0);
}
ldout(cct, 20) << "flush async operations: " << on_finish << " "
<< "count=" << async_ops.size() << dendl;
async_ops.front()->add_flush_context(on_finish);
}

void ImageCtx::cancel_async_requests() {
Expand Down
4 changes: 3 additions & 1 deletion src/librbd/ImageWatcher.cc
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
#include "librbd/ImageWatcher.h"
#include "librbd/AioCompletion.h"
#include "librbd/ImageCtx.h"
#include "librbd/internal.h"
#include "librbd/ObjectMap.h"
#include "librbd/TaskFinisher.h"
#include "cls/lock/cls_lock_client.h"
Expand Down Expand Up @@ -299,9 +300,10 @@ int ImageWatcher::lock() {
ldout(m_image_ctx.cct, 10) << "acquired exclusive lock" << dendl;
m_lock_owner_state = LOCK_OWNER_STATE_LOCKED;

ClientId owner_client_id = get_client_id();
{
Mutex::Locker l(m_owner_client_id_lock);
m_owner_client_id = get_client_id();
m_owner_client_id = owner_client_id;
}

if (m_image_ctx.object_map.enabled()) {
Expand Down
29 changes: 12 additions & 17 deletions src/librbd/internal.cc
Original file line number Diff line number Diff line change
Expand Up @@ -809,6 +809,7 @@ int invoke_async_request(ImageCtx *ictx, const std::string& request_type,
return r;
}
} else {
RWLock::RLocker owner_lock(ictx->owner_lock);
r = snap_remove_helper(ictx, NULL, snap_name);
if (r < 0) {
return r;
Expand All @@ -823,10 +824,9 @@ int invoke_async_request(ImageCtx *ictx, const std::string& request_type,

int snap_remove_helper(ImageCtx *ictx, Context *ctx, const char *snap_name)
{
assert(ictx->owner_lock.is_locked());
{
RWLock::RLocker snap_locker(ictx->snap_lock);
if ((ictx->features & RBD_FEATURE_FAST_DIFF) != 0) {
assert(ictx->owner_lock.is_locked());
assert(!ictx->image_watcher->is_lock_supported() ||
ictx->image_watcher->is_lock_owner());
}
Expand Down Expand Up @@ -1966,9 +1966,7 @@ int invoke_async_request(ImageCtx *ictx, const std::string& request_type,
}
assert(watchers.size() == 1);

ictx->md_lock.get_read();
trim_image(ictx, 0, prog_ctx);
ictx->md_lock.put_read();

ictx->parent_lock.get_read();
// struct assignment
Expand Down Expand Up @@ -2475,14 +2473,13 @@ int invoke_async_request(ImageCtx *ictx, const std::string& request_type,
if (r < 0)
return r;

RWLock::RLocker l(ictx->owner_lock);
RWLock::RLocker owner_locker(ictx->owner_lock);
snap_t snap_id;
uint64_t new_size;
{
RWLock::WLocker l2(ictx->md_lock);
{
// need to drop snap_lock before invalidating cache
RWLock::RLocker l3(ictx->snap_lock);
RWLock::RLocker snap_locker(ictx->snap_lock);
if (!ictx->snap_exists) {
return -ENOENT;
}
Expand Down Expand Up @@ -2514,6 +2511,7 @@ int invoke_async_request(ImageCtx *ictx, const std::string& request_type,
// need to flush any pending writes before resizing and rolling back -
// writes might create new snapshots. Rolling back will replace
// the current version, so we have to invalidate that too.
RWLock::WLocker md_locker(ictx->md_lock);
ictx->flush_async_operations();
r = ictx->invalidate_cache();
if (r < 0) {
Expand Down Expand Up @@ -2988,21 +2986,18 @@ int invoke_async_request(ImageCtx *ictx, const std::string& request_type,
CephContext *cct = ictx->cct;
ldout(cct, 20) << "async_rebuild_object_map " << ictx << dendl;

if (ictx->read_only) {
return -EROFS;
}
if (!ictx->test_features(RBD_FEATURE_OBJECT_MAP)) {
return -EINVAL;
}

int r = ictx_check(ictx);
if (r < 0) {
return r;
}

{
RWLock::RLocker snap_locker(ictx->snap_lock);
if (ictx->read_only) {
return -EROFS;
}
if (!ictx->test_features(RBD_FEATURE_OBJECT_MAP)) {
return -EINVAL;
}
}

RebuildObjectMapRequest *req = new RebuildObjectMapRequest(*ictx, ctx,
prog_ctx);
req->send();
Expand Down

0 comments on commit 1b57cc1

Please sign in to comment.