From 54edc7955b0593423d2c03ca308b5e5fcdc528a3 Mon Sep 17 00:00:00 2001 From: Ilya Dryomov Date: Thu, 23 Nov 2023 20:35:41 +0100 Subject: [PATCH 1/4] librados: clarify get_pool_is_selfmanaged_snaps_mode() semantics Refer to the commit message of 8a9769a1d5fd ("librados: add get_pool_is_selfmanaged_snaps_mode() function"). Signed-off-by: Ilya Dryomov --- src/include/rados/librados.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/include/rados/librados.hpp b/src/include/rados/librados.hpp index cb8261af12d24..f2f9e47a2ad77 100644 --- a/src/include/rados/librados.hpp +++ b/src/include/rados/librados.hpp @@ -1477,7 +1477,7 @@ inline namespace v14_2_0 { int get_pool_stats(std::list& v, std::string& category, std::map& stats); - /// check if pool has selfmanaged snaps + /// check if pool has or had selfmanaged snaps bool get_pool_is_selfmanaged_snaps_mode(const std::string& poolname); int cluster_stat(cluster_stat_t& result); From 0999e63bfbbee46b8e19c3f05881eee64dba8b5e Mon Sep 17 00:00:00 2001 From: Ilya Dryomov Date: Wed, 22 Nov 2023 14:39:13 +0100 Subject: [PATCH 2/4] librados: make querying pools for selfmanaged snaps reliable If get_pool_is_selfmanaged_snaps_mode() is invoked on a fresh RADOS client instance that still lacks an osdmap, it returns false, same as for "this pool is not in selfmanaged snaps mode". The same happens if the pool in question doesn't exist since the signature doesn't allow to return an error. The motivation for this API was to prevent users from running "rados cppool" on a pool with unmanaged snapshots and deleting the original thinking that they have a full copy. Unfortunately, it's exactly "rados cppool" that fell into this trap, so no warning is printed and --yes-i-really-mean-it flag isn't enforced. Fixes: https://tracker.ceph.com/issues/63607 Signed-off-by: Ilya Dryomov --- PendingReleaseNotes | 3 ++ src/include/rados/librados.hpp | 5 ++- src/librados/RadosClient.cc | 18 ++++++--- src/librados/RadosClient.h | 2 +- src/librados/librados_cxx.cc | 9 ++++- src/test/librados/snapshots_cxx.cc | 60 ++++++++++++++++++++++++++---- src/tools/rados/rados.cc | 14 ++++++- 7 files changed, 93 insertions(+), 18 deletions(-) diff --git a/PendingReleaseNotes b/PendingReleaseNotes index 9b3dfede8bbd7..91d29a890f071 100644 --- a/PendingReleaseNotes +++ b/PendingReleaseNotes @@ -77,6 +77,9 @@ CephFS: Disallow delegating preallocated inode ranges to clients. Config cannot repair the dates of existing object locks. Such objects can be identified with a HeadObject request based on the x-amz-object-lock-retain-until-date response header. +* RADOS: `get_pool_is_selfmanaged_snaps_mode` C++ API has been deprecated + due to being prone to false negative results. It's safer replacement is + `pool_is_in_selfmanaged_snaps_mode`. >=18.0.0 diff --git a/src/include/rados/librados.hpp b/src/include/rados/librados.hpp index f2f9e47a2ad77..2cd418627be95 100644 --- a/src/include/rados/librados.hpp +++ b/src/include/rados/librados.hpp @@ -1477,8 +1477,11 @@ inline namespace v14_2_0 { int get_pool_stats(std::list& v, std::string& category, std::map& stats); + /// check if pool has or had selfmanaged snaps - bool get_pool_is_selfmanaged_snaps_mode(const std::string& poolname); + bool get_pool_is_selfmanaged_snaps_mode(const std::string& poolname) + __attribute__ ((deprecated)); + int pool_is_in_selfmanaged_snaps_mode(const std::string& poolname); int cluster_stat(cluster_stat_t& result); int cluster_fsid(std::string *fsid); diff --git a/src/librados/RadosClient.cc b/src/librados/RadosClient.cc index 9abd923f95b03..db9143e2d5b8b 100644 --- a/src/librados/RadosClient.cc +++ b/src/librados/RadosClient.cc @@ -631,16 +631,22 @@ int librados::RadosClient::get_pool_stats(std::list& pools, return 0; } -bool librados::RadosClient::get_pool_is_selfmanaged_snaps_mode( +int librados::RadosClient::pool_is_in_selfmanaged_snaps_mode( const std::string& pool) { - bool ret = false; - objecter->with_osdmap([&](const OSDMap& osdmap) { + int r = wait_for_osdmap(); + if (r < 0) { + return r; + } + + return objecter->with_osdmap([&pool](const OSDMap& osdmap) { int64_t poolid = osdmap.lookup_pg_pool_name(pool); - if (poolid >= 0) - ret = osdmap.get_pg_pool(poolid)->is_unmanaged_snaps_mode(); + if (poolid < 0) { + return -ENOENT; + } + return static_cast( + osdmap.get_pg_pool(poolid)->is_unmanaged_snaps_mode()); }); - return ret; } int librados::RadosClient::get_fs_stats(ceph_statfs& stats) diff --git a/src/librados/RadosClient.h b/src/librados/RadosClient.h index 052249a76db36..a93f8185108dc 100644 --- a/src/librados/RadosClient.h +++ b/src/librados/RadosClient.h @@ -131,7 +131,7 @@ class librados::RadosClient : public Dispatcher, int get_pool_stats(std::list& ls, std::map *result, bool *per_pool); int get_fs_stats(ceph_statfs& result); - bool get_pool_is_selfmanaged_snaps_mode(const std::string& pool); + int pool_is_in_selfmanaged_snaps_mode(const std::string& pool); /* -1 was set as the default value and monitor will pickup the right crush rule with below order: diff --git a/src/librados/librados_cxx.cc b/src/librados/librados_cxx.cc index d20c67556c03e..926ddf86dab44 100644 --- a/src/librados/librados_cxx.cc +++ b/src/librados/librados_cxx.cc @@ -2719,9 +2719,16 @@ int librados::Rados::get_pool_stats(std::list& v, return -EOPNOTSUPP; } +// deprecated, use pool_is_in_selfmanaged_snaps_mode() instead bool librados::Rados::get_pool_is_selfmanaged_snaps_mode(const std::string& pool) { - return client->get_pool_is_selfmanaged_snaps_mode(pool); + // errors are ignored, prone to false negative results + return client->pool_is_in_selfmanaged_snaps_mode(pool) > 0; +} + +int librados::Rados::pool_is_in_selfmanaged_snaps_mode(const std::string& pool) +{ + return client->pool_is_in_selfmanaged_snaps_mode(pool); } int librados::Rados::cluster_stat(cluster_stat_t& result) diff --git a/src/test/librados/snapshots_cxx.cc b/src/test/librados/snapshots_cxx.cc index 8098b2cb7817e..95dbe5da0125a 100644 --- a/src/test/librados/snapshots_cxx.cc +++ b/src/test/librados/snapshots_cxx.cc @@ -25,9 +25,9 @@ TEST_F(LibRadosSnapshotsPP, SnapListPP) { bufferlist bl1; bl1.append(buf, sizeof(buf)); ASSERT_EQ(0, ioctx.write("foo", bl1, sizeof(buf), 0)); - ASSERT_FALSE(cluster.get_pool_is_selfmanaged_snaps_mode(pool_name)); + ASSERT_EQ(0, cluster.pool_is_in_selfmanaged_snaps_mode(pool_name)); ASSERT_EQ(0, ioctx.snap_create("snap1")); - ASSERT_FALSE(cluster.get_pool_is_selfmanaged_snaps_mode(pool_name)); + ASSERT_EQ(0, cluster.pool_is_in_selfmanaged_snaps_mode(pool_name)); std::vector snaps; EXPECT_EQ(0, ioctx.snap_list(&snaps)); EXPECT_EQ(1U, snaps.size()); @@ -35,7 +35,7 @@ TEST_F(LibRadosSnapshotsPP, SnapListPP) { EXPECT_EQ(0, ioctx.snap_lookup("snap1", &rid)); EXPECT_EQ(rid, snaps[0]); EXPECT_EQ(0, ioctx.snap_remove("snap1")); - ASSERT_FALSE(cluster.get_pool_is_selfmanaged_snaps_mode(pool_name)); + ASSERT_EQ(0, cluster.pool_is_in_selfmanaged_snaps_mode(pool_name)); } TEST_F(LibRadosSnapshotsPP, SnapRemovePP) { @@ -109,9 +109,9 @@ TEST_F(LibRadosSnapshotsPP, SnapCreateRemovePP) { TEST_F(LibRadosSnapshotsSelfManagedPP, SnapPP) { std::vector my_snaps; my_snaps.push_back(-2); - ASSERT_FALSE(cluster.get_pool_is_selfmanaged_snaps_mode(pool_name)); + ASSERT_EQ(0, cluster.pool_is_in_selfmanaged_snaps_mode(pool_name)); ASSERT_EQ(0, ioctx.selfmanaged_snap_create(&my_snaps.back())); - ASSERT_TRUE(cluster.get_pool_is_selfmanaged_snaps_mode(pool_name)); + ASSERT_EQ(1, cluster.pool_is_in_selfmanaged_snaps_mode(pool_name)); ::std::reverse(my_snaps.begin(), my_snaps.end()); ASSERT_EQ(0, ioctx.selfmanaged_snap_set_write_ctx(my_snaps[0], my_snaps)); ::std::reverse(my_snaps.begin(), my_snaps.end()); @@ -148,7 +148,7 @@ TEST_F(LibRadosSnapshotsSelfManagedPP, SnapPP) { ASSERT_EQ(0, ioctx.selfmanaged_snap_remove(my_snaps.back())); my_snaps.pop_back(); ioctx.snap_set_read(LIBRADOS_SNAP_HEAD); - ASSERT_TRUE(cluster.get_pool_is_selfmanaged_snaps_mode(pool_name)); + ASSERT_EQ(1, cluster.pool_is_in_selfmanaged_snaps_mode(pool_name)); ASSERT_EQ(0, ioctx.remove("foo")); } @@ -509,7 +509,7 @@ TEST_F(LibRadosSnapshotsSelfManagedPP, ReusePurgedSnap) { std::vector my_snaps; my_snaps.push_back(-2); ASSERT_EQ(0, ioctx.selfmanaged_snap_create(&my_snaps.back())); - ASSERT_TRUE(cluster.get_pool_is_selfmanaged_snaps_mode(pool_name)); + ASSERT_EQ(1, cluster.pool_is_in_selfmanaged_snaps_mode(pool_name)); ::std::reverse(my_snaps.begin(), my_snaps.end()); ASSERT_EQ(0, ioctx.selfmanaged_snap_set_write_ctx(my_snaps[0], my_snaps)); ::std::reverse(my_snaps.begin(), my_snaps.end()); @@ -548,6 +548,52 @@ TEST_F(LibRadosSnapshotsSelfManagedPP, ReusePurgedSnap) { //sleep(600); } +TEST(LibRadosPoolIsInSelfmanagedSnapsMode, NotConnected) { + librados::Rados cluster; + ASSERT_EQ(0, cluster.init(nullptr)); + + EXPECT_EQ(-ENOTCONN, cluster.pool_is_in_selfmanaged_snaps_mode("foo")); +} + +TEST(LibRadosPoolIsInSelfmanagedSnapsMode, FreshInstance) { + librados::Rados cluster1; + std::string pool_name = get_temp_pool_name(); + ASSERT_EQ("", create_one_pool_pp(pool_name, cluster1)); + EXPECT_EQ(0, cluster1.pool_is_in_selfmanaged_snaps_mode(pool_name)); + { + librados::Rados cluster2; + ASSERT_EQ("", connect_cluster_pp(cluster2)); + EXPECT_EQ(0, cluster2.pool_is_in_selfmanaged_snaps_mode(pool_name)); + } + + librados::IoCtx ioctx; + cluster1.ioctx_create(pool_name.c_str(), ioctx); + uint64_t snap_id; + ASSERT_EQ(0, ioctx.selfmanaged_snap_create(&snap_id)); + EXPECT_EQ(1, cluster1.pool_is_in_selfmanaged_snaps_mode(pool_name)); + { + librados::Rados cluster2; + ASSERT_EQ("", connect_cluster_pp(cluster2)); + EXPECT_EQ(1, cluster2.pool_is_in_selfmanaged_snaps_mode(pool_name)); + } + + ASSERT_EQ(0, ioctx.selfmanaged_snap_remove(snap_id)); + EXPECT_EQ(1, cluster1.pool_is_in_selfmanaged_snaps_mode(pool_name)); + { + librados::Rados cluster2; + ASSERT_EQ("", connect_cluster_pp(cluster2)); + EXPECT_EQ(1, cluster2.pool_is_in_selfmanaged_snaps_mode(pool_name)); + } + + ASSERT_EQ(0, cluster1.pool_delete(pool_name.c_str())); + EXPECT_EQ(-ENOENT, cluster1.pool_is_in_selfmanaged_snaps_mode(pool_name)); + { + librados::Rados cluster2; + ASSERT_EQ("", connect_cluster_pp(cluster2)); + EXPECT_EQ(-ENOENT, cluster2.pool_is_in_selfmanaged_snaps_mode(pool_name)); + } +} + // EC testing TEST_F(LibRadosSnapshotsECPP, SnapListPP) { SKIP_IF_CRIMSON(); diff --git a/src/tools/rados/rados.cc b/src/tools/rados/rados.cc index b8cf5e4d1dc36..db3aa543fb563 100644 --- a/src/tools/rados/rados.cc +++ b/src/tools/rados/rados.cc @@ -3120,7 +3120,12 @@ static int rados_tool_common(const std::map < std::string, std::string > &opts, cerr << "WARNING: pool copy does not preserve user_version, which some " << "apps may rely on." << std::endl; - if (rados.get_pool_is_selfmanaged_snaps_mode(src_pool)) { + ret = rados.pool_is_in_selfmanaged_snaps_mode(src_pool); + if (ret < 0) { + cerr << "failed to query pool " << src_pool << " for selfmanaged snaps: " + << cpp_strerror(ret) << std::endl; + return 1; + } else if (ret > 0) { cerr << "WARNING: pool " << src_pool << " has selfmanaged snaps, which are not preserved\n" << " by the cppool operation. This will break any snapshot user." << std::endl; @@ -3213,7 +3218,12 @@ static int rados_tool_common(const std::map < std::string, std::string > &opts, return 1; } - if (rados.get_pool_is_selfmanaged_snaps_mode(pool_name)) { + ret = rados.pool_is_in_selfmanaged_snaps_mode(pool_name); + if (ret < 0) { + cerr << "failed to query pool " << pool_name << " for selfmanaged snaps: " + << cpp_strerror(ret) << std::endl; + return 1; + } else if (ret > 0) { cerr << "can't create snapshot: pool " << pool_name << " is in selfmanaged snaps mode" << std::endl; return 1; From 2b135a2eb60e0dd5ab22d213d3d2435608c6a9d3 Mon Sep 17 00:00:00 2001 From: Ilya Dryomov Date: Thu, 23 Nov 2023 20:24:24 +0100 Subject: [PATCH 3/4] qa: make sure "rados cppool" requires --yes-i-really-mean-it for RBD Safeguards in rados CLI tool isn't really the subject of this test, but it fits nicely. Signed-off-by: Ilya Dryomov --- qa/workunits/mon/rbd_snaps_ops.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/qa/workunits/mon/rbd_snaps_ops.sh b/qa/workunits/mon/rbd_snaps_ops.sh index eb88565eab9c2..0e5b16b7b80b4 100755 --- a/qa/workunits/mon/rbd_snaps_ops.sh +++ b/qa/workunits/mon/rbd_snaps_ops.sh @@ -36,6 +36,7 @@ expect 'rbd --pool=test snap ls image' 0 expect 'rbd --pool=test snap rm image@snapshot' 0 expect 'ceph osd pool mksnap test snapshot' 22 +expect 'rados -p test mksnap snapshot' 1 expect 'ceph osd pool delete test test --yes-i-really-really-mean-it' 0 @@ -52,6 +53,8 @@ expect 'rbd --pool test-foo snap create image@snapshot' 0 ceph osd pool delete test-bar test-bar --yes-i-really-really-mean-it || true expect 'ceph osd pool create test-bar 8' 0 expect 'ceph osd pool application enable test-bar rbd' +# "rados cppool" without --yes-i-really-mean-it should fail +expect 'rados cppool test-foo test-bar' 1 expect 'rados cppool test-foo test-bar --yes-i-really-mean-it' 0 expect 'rbd --pool test-bar snap rm image@snapshot' 95 expect 'ceph osd pool delete test-foo test-foo --yes-i-really-really-mean-it' 0 From de6635582b7615eb54800a616711073a3c7b0dbd Mon Sep 17 00:00:00 2001 From: Ilya Dryomov Date: Fri, 24 Nov 2023 19:53:48 +0100 Subject: [PATCH 4/4] tools/rados: just return instead of exit(1) in "rados cppool" handler Otherwise an occasional segfault occurs. This instance was missed in commit 2c149262888c ("tools/rados: always call rados.shutdown() before exit()"). Signed-off-by: Ilya Dryomov --- src/tools/rados/rados.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/tools/rados/rados.cc b/src/tools/rados/rados.cc index db3aa543fb563..4a38d33abd2d8 100644 --- a/src/tools/rados/rados.cc +++ b/src/tools/rados/rados.cc @@ -3133,7 +3133,7 @@ static int rados_tool_common(const std::map < std::string, std::string > &opts, cerr << " If you insist on making a broken copy, you can pass\n" << " --yes-i-really-mean-it to proceed anyway." << std::endl; - exit(1); + return 1; } }