diff --git a/src/common/mutex_debug.h b/src/common/mutex_debug.h index 4a0c2cbb447cd..c1a4ff2a43501 100644 --- a/src/common/mutex_debug.h +++ b/src/common/mutex_debug.h @@ -15,6 +15,7 @@ #ifndef CEPH_COMMON_MUTEX_DEBUG_H #define CEPH_COMMON_MUTEX_DEBUG_H +#include #include #include @@ -38,7 +39,7 @@ class mutex_debugging_base bool lockdep; // track this mutex using lockdep_* bool backtrace; // gather backtrace on lock acquisition - int nlock = 0; + std::atomic nlock = 0; std::thread::id locked_by = {}; bool _enable_lockdep() const { @@ -57,10 +58,10 @@ class mutex_debugging_base return (nlock > 0); } bool is_locked_by_me() const { - return nlock > 0 && locked_by == std::this_thread::get_id(); + return nlock.load(std::memory_order_acquire) > 0 && locked_by == std::this_thread::get_id(); } operator bool() const { - return nlock > 0 && locked_by == std::this_thread::get_id(); + return is_locked_by_me(); } }; @@ -152,17 +153,19 @@ class mutex_debug_impl : public mutex_debugging_base if (!recursive) ceph_assert(nlock == 0); locked_by = std::this_thread::get_id(); - nlock++; + nlock.fetch_add(1, std::memory_order_release); } void _pre_unlock() { - ceph_assert(nlock > 0); - --nlock; + if (recursive) { + ceph_assert(nlock > 0); + } else { + ceph_assert(nlock == 1); + } ceph_assert(locked_by == std::this_thread::get_id()); - if (!recursive) - ceph_assert(nlock == 0); - if (nlock == 0) + if (nlock == 1) locked_by = std::thread::id(); + nlock.fetch_sub(1, std::memory_order_release); } bool try_lock(bool no_lockdep = false) {