Skip to content

Commit

Permalink
crimson/.../snaptrim_event: convert SnapTrimEvent::start to a coroutine
Browse files Browse the repository at this point in the history
Signed-off-by: Samuel Just <[email protected]>
  • Loading branch information
athanatos committed May 28, 2024
1 parent 08f2aaf commit 4c5c4f2
Showing 1 changed file with 74 additions and 78 deletions.
152 changes: 74 additions & 78 deletions src/crimson/osd/osd_operations/snaptrim_event.cc
Original file line number Diff line number Diff line change
@@ -1,10 +1,12 @@
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*-
// vim: ts=8 sw=2 smarttab

#include "crimson/common/coroutine.h"
#include "crimson/osd/osd_operations/snaptrim_event.h"
#include "crimson/osd/ops_executer.h"
#include "crimson/osd/pg.h"
#include <seastar/core/sleep.hh>
#include <seastar/util/defer.hh>

namespace {
seastar::logger& logger() {
Expand Down Expand Up @@ -63,30 +65,37 @@ CommonPGPipeline& SnapTrimEvent::client_pp()
SnapTrimEvent::snap_trim_event_ret_t
SnapTrimEvent::start()
{
auto exit_handle = seastar::defer([this] {
logger().debug("{}: exit", *this);
handle.exit();
});

ShardServices &shard_services = pg->get_shard_services();
return enter_stage<interruptor>(
client_pp().wait_for_active
).then_interruptible([this] {
return with_blocking_event<PGActivationBlocker::BlockingEvent,
interruptor>([this] (auto&& trigger) {
co_await enter_stage<interruptor>(client_pp().wait_for_active);

co_await with_blocking_event<PGActivationBlocker::BlockingEvent, interruptor>(
[this] (auto&& trigger) {
return pg->wait_for_active_blocker.wait(std::move(trigger));
});
}).then_interruptible([this] {
return enter_stage<interruptor>(
client_pp().recover_missing);
}).then_interruptible([] {
//return do_recover_missing(pg, get_target_oid());
return seastar::now();
}).then_interruptible([this] {
return enter_stage<interruptor>(
client_pp().get_obc);
}).then_interruptible([this] {
return pg->background_process_lock.lock_with_op(*this);
}).then_interruptible([this] {
return enter_stage<interruptor>(

co_await enter_stage<interruptor>(
client_pp().recover_missing);

// co_await do_recover_missing(pg, get_target_oid());

co_await enter_stage<interruptor>(
client_pp().get_obc);

{
co_await pg->background_process_lock.lock_with_op(*this);
auto unlocker = seastar::defer([this] {
pg->background_process_lock.unlock();
});

co_await enter_stage<interruptor>(
client_pp().process);
}).then_interruptible([&shard_services, this] {
return interruptor::async([this] {

auto to_trim_fut = interruptor::async([this] {
using crimson::common::local_conf;
const auto max =
local_conf().get_val<uint64_t>("osd_pg_max_concurrent_snap_trims");
Expand All @@ -100,65 +109,52 @@ SnapTrimEvent::start()
}
logger().debug("{}: async almost done line {}", *this, __LINE__);
return std::move(*to_trim);
}).then_interruptible([&shard_services, this] (const auto& to_trim) {
if (to_trim.empty()) {
// the legit ENOENT -> done
logger().debug("{}: to_trim is empty! Stopping iteration", *this);
pg->background_process_lock.unlock();
return snap_trim_iertr::make_ready_future<seastar::stop_iteration>(
seastar::stop_iteration::yes);
}
return [&shard_services, this](const auto &to_trim) {
for (const auto& object : to_trim) {
logger().debug("{}: trimming {}", *this, object);
subop_blocker.emplace_back(
shard_services.start_operation_may_interrupt<
interruptor, SnapTrimObjSubEvent>(
pg,
object,
snapid));
}
return interruptor::now();
}(to_trim).then_interruptible([this] {
return enter_stage<interruptor>(wait_subop);
}).then_interruptible([this] {
logger().debug("{}: awaiting completion", *this);
return subop_blocker.interruptible_wait_completion();
}).finally([this] {
pg->background_process_lock.unlock();
}).si_then([this] {
if (!needs_pause) {
return interruptor::now();
}
// let's know operators we're waiting
return enter_stage<interruptor>(
wait_trim_timer
).then_interruptible([this] {
using crimson::common::local_conf;
const auto time_to_sleep =
local_conf().template get_val<double>("osd_snap_trim_sleep");
logger().debug("{}: time_to_sleep {}", *this, time_to_sleep);
// TODO: this logic should be more sophisticated and distinguish
// between SSDs, HDDs and the hybrid case
return seastar::sleep(
std::chrono::milliseconds(std::lround(time_to_sleep * 1000)));
});
}).si_then([this] {
logger().debug("{}: all completed", *this);
return snap_trim_iertr::make_ready_future<seastar::stop_iteration>(
seastar::stop_iteration::no);
});
}).si_then([this](auto stop) {
return handle.complete().then([stop] {
return snap_trim_iertr::make_ready_future<seastar::stop_iteration>(stop);
});
});
}).finally([this] {
// This SnapTrimEvent op lifetime is maintained within
// PerShardState::start_operation() implementation.
logger().debug("{}: exit", *this);
handle.exit();
});
auto to_trim = co_await std::move(to_trim_fut);

if (to_trim.empty()) {
// the legit ENOENT -> done
logger().debug("{}: to_trim is empty! Stopping iteration", *this);
co_return seastar::stop_iteration::yes;
}
for (const auto& object : to_trim) {
logger().debug("{}: trimming {}", *this, object);
subop_blocker.emplace_back(
shard_services.start_operation_may_interrupt<
interruptor, SnapTrimObjSubEvent>(
pg,
object,
snapid));
}

co_await enter_stage<interruptor>(wait_subop);

logger().debug("{}: awaiting completion", *this);
co_await subop_blocker.interruptible_wait_completion();
}

if (needs_pause) {
// let's know operators we're waiting
co_await enter_stage<interruptor>(
wait_trim_timer
);

using crimson::common::local_conf;
const auto time_to_sleep =
local_conf().template get_val<double>("osd_snap_trim_sleep");
logger().debug("{}: time_to_sleep {}", *this, time_to_sleep);
// TODO: this logic should be more sophisticated and distinguish
// between SSDs, HDDs and the hybrid case
co_await interruptor::make_interruptible(
seastar::sleep(
std::chrono::milliseconds(std::lround(time_to_sleep * 1000))));
}

logger().debug("{}: completed", *this);
co_await interruptor::make_interruptible(handle.complete());

logger().debug("{}: all completed", *this);
co_return seastar::stop_iteration::no;
}


Expand Down

0 comments on commit 4c5c4f2

Please sign in to comment.