diff --git a/src/common/obj_bencher.cc b/src/common/obj_bencher.cc index 47b9a7244921b..9b0bb79fc308f 100644 --- a/src/common/obj_bencher.cc +++ b/src/common/obj_bencher.cc @@ -239,27 +239,31 @@ int ObjBencher::aio_bench( uint64_t op_size, uint64_t object_size, unsigned max_objects, bool cleanup, bool hints, - const std::string& run_name, bool no_verify) { + const std::string& run_name, bool reuse_bench, bool no_verify) { if (concurrentios <= 0) return -EINVAL; int num_objects = 0; int r = 0; - int prevPid = 0; + int prev_pid = 0; std::chrono::duration timePassed; // default metadata object is used if user does not specify one const std::string run_name_meta = (run_name.empty() ? BENCH_LASTRUN_METADATA : run_name); //get data from previous write run, if available - if (operation != OP_WRITE) { + if (operation != OP_WRITE || reuse_bench) { uint64_t prev_op_size, prev_object_size; r = fetch_bench_metadata(run_name_meta, &prev_op_size, &prev_object_size, - &num_objects, &prevPid); + &num_objects, &prev_pid); if (r < 0) { - if (r == -ENOENT) - cerr << "Must write data before running a read benchmark!" << std::endl; + if (r == -ENOENT) { + if (reuse_bench) + cerr << "Must write data before using reuse_bench for a write benchmark!" << std::endl; + else + cerr << "Must write data before running a read benchmark!" << std::endl; + } return r; } object_size = prev_object_size; @@ -289,21 +293,21 @@ int ObjBencher::aio_bench( formatter->open_object_section("bench"); if (OP_WRITE == operation) { - r = write_bench(secondsToRun, concurrentios, run_name_meta, max_objects); + r = write_bench(secondsToRun, concurrentios, run_name_meta, max_objects, prev_pid); if (r != 0) goto out; } else if (OP_SEQ_READ == operation) { - r = seq_read_bench(secondsToRun, num_objects, concurrentios, prevPid, no_verify); + r = seq_read_bench(secondsToRun, num_objects, concurrentios, prev_pid, no_verify); if (r != 0) goto out; } else if (OP_RAND_READ == operation) { - r = rand_read_bench(secondsToRun, num_objects, concurrentios, prevPid, no_verify); + r = rand_read_bench(secondsToRun, num_objects, concurrentios, prev_pid, no_verify); if (r != 0) goto out; } if (OP_WRITE == operation && cleanup) { r = fetch_bench_metadata(run_name_meta, &op_size, &object_size, - &num_objects, &prevPid); + &num_objects, &prev_pid); if (r < 0) { if (r == -ENOENT) cerr << "Should never happen: bench metadata missing for current run!" << std::endl; @@ -313,7 +317,7 @@ int ObjBencher::aio_bench( data.start_time = mono_clock::now(); out(cout) << "Cleaning up (deleting benchmark objects)" << std::endl; - r = clean_up(num_objects, prevPid, concurrentios); + r = clean_up(num_objects, prev_pid, concurrentios); if (r != 0) goto out; timePassed = mono_clock::now() - data.start_time; @@ -377,7 +381,7 @@ int ObjBencher::fetch_bench_metadata(const std::string& metadata_file, int ObjBencher::write_bench(int secondsToRun, int concurrentios, const string& run_name_meta, - unsigned max_objects) { + unsigned max_objects, int prev_pid) { if (concurrentios <= 0) return -EINVAL; @@ -397,7 +401,7 @@ int ObjBencher::write_bench(int secondsToRun, } bufferlist* newContents = 0; - std::string prefix = generate_object_prefix(); + std::string prefix = prev_pid ? generate_object_prefix(prev_pid) : generate_object_prefix(); if (!formatter) out(cout) << "Object prefix: " << prefix << std::endl; else @@ -627,7 +631,7 @@ int ObjBencher::write_bench(int secondsToRun, encode(data.object_size, b_write); num_objects = (data.finished + writes_per_object - 1) / writes_per_object; encode(num_objects, b_write); - encode(getpid(), b_write); + encode(prev_pid ? prev_pid : getpid(), b_write); encode(data.op_size, b_write); // persist meta-data for further cleanup or read diff --git a/src/common/obj_bencher.h b/src/common/obj_bencher.h index 0d4a5c11c910a..8e41fb5bc1fbb 100644 --- a/src/common/obj_bencher.h +++ b/src/common/obj_bencher.h @@ -75,9 +75,9 @@ class ObjBencher { struct bench_data data; int fetch_bench_metadata(const std::string& metadata_file, uint64_t* op_size, - uint64_t* object_size, int* num_objects, int* prevPid); + uint64_t* object_size, int* num_objects, int* prev_pid); - int write_bench(int secondsToRun, int concurrentios, const string& run_name_meta, unsigned max_objects); + int write_bench(int secondsToRun, int concurrentios, const string& run_name_meta, unsigned max_objects, int prev_pid); int seq_read_bench(int secondsToRun, int num_objects, int concurrentios, int writePid, bool no_verify=false); int rand_read_bench(int secondsToRun, int num_objects, int concurrentios, int writePid, bool no_verify=false); @@ -112,7 +112,7 @@ class ObjBencher { int aio_bench( int operation, int secondsToRun, int concurrentios, uint64_t op_size, uint64_t object_size, unsigned max_objects, - bool cleanup, bool hints, const std::string& run_name, bool no_verify=false); + bool cleanup, bool hints, const std::string& run_name, bool reuse_bench, bool no_verify=false); int clean_up(const std::string& prefix, int concurrentios, const std::string& run_name); void set_show_time(bool dt) { diff --git a/src/tools/rados/rados.cc b/src/tools/rados/rados.cc index f879970d4b942..f3aa10834cd96 100644 --- a/src/tools/rados/rados.cc +++ b/src/tools/rados/rados.cc @@ -103,7 +103,7 @@ void usage(ostream& out) " rollback roll back object to snap \n" "\n" " listsnaps list the snapshots of this object\n" -" bench write|seq|rand [-t concurrent_operations] [--no-cleanup] [--run-name run_name] [--no-hints]\n" +" bench write|seq|rand [-t concurrent_operations] [--no-cleanup] [--run-name run_name] [--no-hints] [--reuse-bench]\n" " default is 16 concurrent IOs and 4 MB ops\n" " default is to clean up after write benchmark\n" " default run-name is 'benchmark_last_metadata'\n" @@ -1842,6 +1842,7 @@ static int rados_tool_common(const std::map < std::string, std::string > &opts, int bench_write_dest = 0; bool cleanup = true; bool hints = true; // for rados bench + bool reuse_bench = false; bool no_verify = false; bool use_striper = false; bool with_clones = false; @@ -2028,6 +2029,10 @@ static int rados_tool_common(const std::map < std::string, std::string > &opts, if (i != opts.end()) { hints = false; } + i = opts.find("reuse-bench"); + if (i != opts.end()) { + reuse_bench = true; + } i = opts.find("pretty-format"); if (i != opts.end()) { pretty_format = true; @@ -3224,7 +3229,7 @@ static int rados_tool_common(const std::map < std::string, std::string > &opts, cout << "hints = " << (int)hints << std::endl; ret = bencher.aio_bench(operation, seconds, concurrent_ios, op_size, object_size, - max_objects, cleanup, hints, run_name, no_verify); + max_objects, cleanup, hints, run_name, reuse_bench, no_verify); if (ret != 0) cerr << "error during benchmark: " << cpp_strerror(ret) << std::endl; if (formatter && output) @@ -3915,6 +3920,8 @@ int main(int argc, const char **argv) opts["no-cleanup"] = "true"; } else if (ceph_argparse_flag(args, i, "--no-hints", (char*)NULL)) { opts["no-hints"] = "true"; + } else if (ceph_argparse_flag(args, i, "--reuse-bench", (char*)NULL)) { + opts["reuse-bench"] = "true"; } else if (ceph_argparse_flag(args, i, "--no-verify", (char*)NULL)) { opts["no-verify"] = "true"; } else if (ceph_argparse_witharg(args, i, &val, "--run-name", (char*)NULL)) {