Skip to content

Commit

Permalink
Apply formatter on recent 45 commits. (facebook#5827)
Browse files Browse the repository at this point in the history
Summary:
Some recent commits might not have passed through the formatter. I formatted recent 45 commits. The script hangs for more commits so I stopped there.
Pull Request resolved: facebook#5827

Test Plan: Run all existing tests.

Differential Revision: D17483727

fbshipit-source-id: af23113ee63015d8a43d89a3bc2c1056189afe8f
  • Loading branch information
siying authored and facebook-github-bot committed Sep 19, 2019
1 parent 6ec6a4a commit c06b54d
Show file tree
Hide file tree
Showing 26 changed files with 270 additions and 279 deletions.
2 changes: 1 addition & 1 deletion db/arena_wrapped_db_iter.cc
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
namespace rocksdb {

Status ArenaWrappedDBIter::GetProperty(std::string prop_name,
std::string* prop) {
std::string* prop) {
if (prop_name == "rocksdb.iterator.super-version-number") {
// First try to pass the value returned from inner iterator.
if (!db_iter_->GetProperty(prop_name, prop).ok()) {
Expand Down
2 changes: 1 addition & 1 deletion db/compaction/compaction_iterator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -230,7 +230,7 @@ void CompactionIterator::NextFromInput() {
valid_ = false;

while (!valid_ && input_->Valid() && !IsPausingManualCompaction() &&
!IsShuttingDown()) {
!IsShuttingDown()) {
key_ = input_->key();
value_ = input_->value();
iter_stats_.num_input_records++;
Expand Down
50 changes: 25 additions & 25 deletions db/compaction/compaction_iterator.h
Original file line number Diff line number Diff line change
Expand Up @@ -59,32 +59,32 @@ class CompactionIterator {
const Compaction* compaction_;
};

CompactionIterator(InternalIterator* input, const Comparator* cmp,
MergeHelper* merge_helper, SequenceNumber last_sequence,
std::vector<SequenceNumber>* snapshots,
SequenceNumber earliest_write_conflict_snapshot,
const SnapshotChecker* snapshot_checker, Env* env,
bool report_detailed_time, bool expect_valid_internal_key,
CompactionRangeDelAggregator* range_del_agg,
const Compaction* compaction = nullptr,
const CompactionFilter* compaction_filter = nullptr,
const std::atomic<bool>* shutting_down = nullptr,
const SequenceNumber preserve_deletes_seqnum = 0,
const std::atomic<bool>* manual_compaction_paused = nullptr);
CompactionIterator(
InternalIterator* input, const Comparator* cmp, MergeHelper* merge_helper,
SequenceNumber last_sequence, std::vector<SequenceNumber>* snapshots,
SequenceNumber earliest_write_conflict_snapshot,
const SnapshotChecker* snapshot_checker, Env* env,
bool report_detailed_time, bool expect_valid_internal_key,
CompactionRangeDelAggregator* range_del_agg,
const Compaction* compaction = nullptr,
const CompactionFilter* compaction_filter = nullptr,
const std::atomic<bool>* shutting_down = nullptr,
const SequenceNumber preserve_deletes_seqnum = 0,
const std::atomic<bool>* manual_compaction_paused = nullptr);

// Constructor with custom CompactionProxy, used for tests.
CompactionIterator(InternalIterator* input, const Comparator* cmp,
MergeHelper* merge_helper, SequenceNumber last_sequence,
std::vector<SequenceNumber>* snapshots,
SequenceNumber earliest_write_conflict_snapshot,
const SnapshotChecker* snapshot_checker, Env* env,
bool report_detailed_time, bool expect_valid_internal_key,
CompactionRangeDelAggregator* range_del_agg,
std::unique_ptr<CompactionProxy> compaction,
const CompactionFilter* compaction_filter = nullptr,
const std::atomic<bool>* shutting_down = nullptr,
const SequenceNumber preserve_deletes_seqnum = 0,
const std::atomic<bool>* manual_compaction_paused = nullptr);
CompactionIterator(
InternalIterator* input, const Comparator* cmp, MergeHelper* merge_helper,
SequenceNumber last_sequence, std::vector<SequenceNumber>* snapshots,
SequenceNumber earliest_write_conflict_snapshot,
const SnapshotChecker* snapshot_checker, Env* env,
bool report_detailed_time, bool expect_valid_internal_key,
CompactionRangeDelAggregator* range_del_agg,
std::unique_ptr<CompactionProxy> compaction,
const CompactionFilter* compaction_filter = nullptr,
const std::atomic<bool>* shutting_down = nullptr,
const SequenceNumber preserve_deletes_seqnum = 0,
const std::atomic<bool>* manual_compaction_paused = nullptr);

~CompactionIterator();

Expand Down Expand Up @@ -231,7 +231,7 @@ class CompactionIterator {
bool IsPausingManualCompaction() {
// This is a best-effort facility, so memory_order_relaxed is sufficient.
return manual_compaction_paused_ &&
manual_compaction_paused_->load(std::memory_order_relaxed);
manual_compaction_paused_->load(std::memory_order_relaxed);
}
};
} // namespace rocksdb
14 changes: 8 additions & 6 deletions db/compaction/compaction_job.cc
Original file line number Diff line number Diff line change
Expand Up @@ -870,9 +870,10 @@ void CompactionJob::ProcessKeyValueCompaction(SubcompactionState* sub_compact) {
db_options_.statistics.get());

TEST_SYNC_POINT("CompactionJob::Run():Inprogress");
TEST_SYNC_POINT_CALLBACK("CompactionJob::Run():PausingManualCompaction:1",
reinterpret_cast<void *>(
const_cast<std::atomic<bool> *>(manual_compaction_paused_)));
TEST_SYNC_POINT_CALLBACK(
"CompactionJob::Run():PausingManualCompaction:1",
reinterpret_cast<void*>(
const_cast<std::atomic<bool>*>(manual_compaction_paused_)));

Slice* start = sub_compact->start;
Slice* end = sub_compact->end;
Expand Down Expand Up @@ -954,9 +955,10 @@ void CompactionJob::ProcessKeyValueCompaction(SubcompactionState* sub_compact) {
input_status = input->status();
output_file_ended = true;
}
TEST_SYNC_POINT_CALLBACK("CompactionJob::Run():PausingManualCompaction:2",
reinterpret_cast<void *>(
const_cast<std::atomic<bool> *>(manual_compaction_paused_)));
TEST_SYNC_POINT_CALLBACK(
"CompactionJob::Run():PausingManualCompaction:2",
reinterpret_cast<void*>(
const_cast<std::atomic<bool>*>(manual_compaction_paused_)));
c_iter->Next();
if (c_iter->status().IsManualCompactionPaused()) {
break;
Expand Down
32 changes: 17 additions & 15 deletions db/compaction/compaction_job.h
Original file line number Diff line number Diff line change
Expand Up @@ -62,21 +62,23 @@ class VersionSet;
// if needed.
class CompactionJob {
public:
CompactionJob(
int job_id, Compaction* compaction, const ImmutableDBOptions& db_options,
const EnvOptions env_options, VersionSet* versions,
const std::atomic<bool>* shutting_down,
const SequenceNumber preserve_deletes_seqnum, LogBuffer* log_buffer,
Directory* db_directory, Directory* output_directory, Statistics* stats,
InstrumentedMutex* db_mutex, ErrorHandler* db_error_handler,
std::vector<SequenceNumber> existing_snapshots,
SequenceNumber earliest_write_conflict_snapshot,
const SnapshotChecker* snapshot_checker,
std::shared_ptr<Cache> table_cache, EventLogger* event_logger,
bool paranoid_file_checks, bool measure_io_stats,
const std::string& dbname, CompactionJobStats* compaction_job_stats,
Env::Priority thread_pri,
const std::atomic<bool>* manual_compaction_paused = nullptr);
CompactionJob(int job_id, Compaction* compaction,
const ImmutableDBOptions& db_options,
const EnvOptions env_options, VersionSet* versions,
const std::atomic<bool>* shutting_down,
const SequenceNumber preserve_deletes_seqnum,
LogBuffer* log_buffer, Directory* db_directory,
Directory* output_directory, Statistics* stats,
InstrumentedMutex* db_mutex, ErrorHandler* db_error_handler,
std::vector<SequenceNumber> existing_snapshots,
SequenceNumber earliest_write_conflict_snapshot,
const SnapshotChecker* snapshot_checker,
std::shared_ptr<Cache> table_cache, EventLogger* event_logger,
bool paranoid_file_checks, bool measure_io_stats,
const std::string& dbname,
CompactionJobStats* compaction_job_stats,
Env::Priority thread_pri,
const std::atomic<bool>* manual_compaction_paused = nullptr);

~CompactionJob();

Expand Down
17 changes: 7 additions & 10 deletions db/db_impl/db_impl_compaction_flush.cc
Original file line number Diff line number Diff line change
Expand Up @@ -983,8 +983,7 @@ Status DBImpl::CompactFilesImpl(
snapshot_checker, table_cache_, &event_logger_,
c->mutable_cf_options()->paranoid_file_checks,
c->mutable_cf_options()->report_bg_io_stats, dbname_,
&compaction_job_stats, Env::Priority::USER,
&manual_compaction_paused_);
&compaction_job_stats, Env::Priority::USER, &manual_compaction_paused_);

// Creating a compaction influences the compaction score because the score
// takes running compactions into account (by skipping files that are already
Expand Down Expand Up @@ -2313,8 +2312,7 @@ void DBImpl::BackgroundCallCompaction(PrepickedCompaction* prepicked_compaction,
env_->SleepForMicroseconds(10000); // prevent hot loop
mutex_.Lock();
} else if (!s.ok() && !s.IsShutdownInProgress() &&
!s.IsManualCompactionPaused() &&
!s.IsColumnFamilyDropped()) {
!s.IsManualCompactionPaused() && !s.IsColumnFamilyDropped()) {
// Wait a little bit before retrying background compaction in
// case this is an environmental problem and we do not want to
// chew up resources for failed compactions for the duration of
Expand All @@ -2332,11 +2330,10 @@ void DBImpl::BackgroundCallCompaction(PrepickedCompaction* prepicked_compaction,
env_->SleepForMicroseconds(1000000);
mutex_.Lock();
} else if (s.IsManualCompactionPaused()) {
ManualCompactionState *m = prepicked_compaction->manual_compaction_state;
ManualCompactionState* m = prepicked_compaction->manual_compaction_state;
assert(m);
ROCKS_LOG_BUFFER(&log_buffer, "[%s] [JOB %d] Manual compaction paused",
m->cfd->GetName().c_str(),
job_context.job_id);
m->cfd->GetName().c_str(), job_context.job_id);
}

ReleaseFileNumberFromPendingOutputs(pending_outputs_inserted_elem);
Expand All @@ -2345,8 +2342,8 @@ void DBImpl::BackgroundCallCompaction(PrepickedCompaction* prepicked_compaction,
// have created (they might not be all recorded in job_context in case of a
// failure). Thus, we force full scan in FindObsoleteFiles()
FindObsoleteFiles(&job_context, !s.ok() && !s.IsShutdownInProgress() &&
!s.IsManualCompactionPaused() &&
!s.IsColumnFamilyDropped());
!s.IsManualCompactionPaused() &&
!s.IsColumnFamilyDropped());
TEST_SYNC_POINT("DBImpl::BackgroundCallCompaction:FoundObsoleteFiles");

// delete unnecessary files if any, this is done outside the mutex
Expand Down Expand Up @@ -2430,7 +2427,7 @@ Status DBImpl::BackgroundCompaction(bool* made_progress,
if (shutting_down_.load(std::memory_order_acquire)) {
status = Status::ShutdownInProgress();
} else if (is_manual &&
manual_compaction_paused_.load(std::memory_order_acquire)) {
manual_compaction_paused_.load(std::memory_order_acquire)) {
status = Status::Incomplete(Status::SubCode::kManualCompactionPaused);
}
} else {
Expand Down
2 changes: 1 addition & 1 deletion db/db_impl/db_impl_readonly.cc
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,8 @@
// COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory).

#include "db/arena_wrapped_db_iter.h"
#include "db/db_impl/db_impl_readonly.h"
#include "db/arena_wrapped_db_iter.h"

#include "db/compacted_db_impl.h"
#include "db/db_impl/db_impl.h"
Expand Down
9 changes: 3 additions & 6 deletions db/db_iter.h
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ namespace rocksdb {
// combines multiple entries for the same userkey found in the DB
// representation into a single entry while accounting for sequence
// numbers, deletion markers, overwrites, etc.
class DBIter final: public Iterator {
class DBIter final : public Iterator {
public:
// The following is grossly complicated. TODO: clean it up
// Which direction is the iterator currently moving?
Expand All @@ -66,10 +66,7 @@ class DBIter final: public Iterator {
// this->key().
// (2) When moving backwards, the internal iterator is positioned
// just before all entries whose user key == this->key().
enum Direction {
kForward,
kReverse
};
enum Direction { kForward, kReverse };

// LocalStatistics contain Statistics counters that will be aggregated per
// each iterator instance and then will be sent to the global statistics when
Expand Down Expand Up @@ -148,7 +145,7 @@ class DBIter final: public Iterator {
bool Valid() const override { return valid_; }
Slice key() const override {
assert(valid_);
if(start_seqnum_ > 0) {
if (start_seqnum_ > 0) {
return saved_key_.GetInternalKey();
} else {
return saved_key_.GetUserKey();
Expand Down
16 changes: 7 additions & 9 deletions db/db_test2.cc
Original file line number Diff line number Diff line change
Expand Up @@ -2458,8 +2458,7 @@ TEST_F(DBTest2, PausingManualCompaction1) {

manual_compactions_paused = 0;
// Now make sure CompactFiles also not run
dbfull()->CompactFiles(rocksdb::CompactionOptions(),
files_before_compact, 0);
dbfull()->CompactFiles(rocksdb::CompactionOptions(), files_before_compact, 0);
// Wait for manual compaction to get scheduled and finish
dbfull()->TEST_WaitForCompact(true);

Expand Down Expand Up @@ -2510,14 +2509,14 @@ TEST_F(DBTest2, PausingManualCompaction3) {
Random rnd(301);
auto generate_files = [&]() {
for (int i = 0; i < options.num_levels; i++) {
for (int j = 0; j < options.num_levels-i+1; j++) {
for (int j = 0; j < options.num_levels - i + 1; j++) {
for (int k = 0; k < 1000; k++) {
ASSERT_OK(Put(Key(k + j * 1000), RandomString(&rnd, 50)));
}
Flush();
}

for (int l = 1; l < options.num_levels-i; l++) {
for (int l = 1; l < options.num_levels - i; l++) {
MoveFilesToLevel(l);
}
}
Expand All @@ -2530,9 +2529,8 @@ TEST_F(DBTest2, PausingManualCompaction3) {
#endif // !ROCKSDB_LITE
int run_manual_compactions = 0;
rocksdb::SyncPoint::GetInstance()->SetCallBack(
"CompactionJob::Run():PausingManualCompaction:1", [&](void* /*arg*/) {
run_manual_compactions++;
});
"CompactionJob::Run():PausingManualCompaction:1",
[&](void* /*arg*/) { run_manual_compactions++; });
rocksdb::SyncPoint::GetInstance()->EnableProcessing();

dbfull()->DisableManualCompaction();
Expand Down Expand Up @@ -2565,14 +2563,14 @@ TEST_F(DBTest2, PausingManualCompaction4) {
Random rnd(301);
auto generate_files = [&]() {
for (int i = 0; i < options.num_levels; i++) {
for (int j = 0; j < options.num_levels-i+1; j++) {
for (int j = 0; j < options.num_levels - i + 1; j++) {
for (int k = 0; k < 1000; k++) {
ASSERT_OK(Put(Key(k + j * 1000), RandomString(&rnd, 50)));
}
Flush();
}

for (int l = 1; l < options.num_levels-i; l++) {
for (int l = 1; l < options.num_levels - i; l++) {
MoveFilesToLevel(l);
}
}
Expand Down
42 changes: 19 additions & 23 deletions java/benchmark/src/main/java/org/rocksdb/benchmark/DbBenchmark.java
Original file line number Diff line number Diff line change
Expand Up @@ -646,8 +646,8 @@ private void run() throws RocksDBException {
currentTaskId++, randSeed_, num_, num_, writeOpt, 1));
break;
case "fillbatch":
tasks.add(new WriteSequentialTask(
currentTaskId++, randSeed_, num_, num_, writeOpt, 1000));
tasks.add(
new WriteSequentialTask(currentTaskId++, randSeed_, num_, num_, writeOpt, 1000));
break;
case "fillrandom":
tasks.add(new WriteRandomTask(
Expand Down Expand Up @@ -901,27 +901,23 @@ public static void main(String[] args) throws Exception {
}

private enum Flag {
benchmarks(
Arrays.asList(
"fillseq",
"readrandom",
"fillrandom"),
"Comma-separated list of operations to run in the specified order\n" +
"\tActual benchmarks:\n" +
"\t\tfillseq -- write N values in sequential key order in async mode.\n" +
"\t\tfillrandom -- write N values in random key order in async mode.\n" +
"\t\tfillbatch -- write N/1000 batch where each batch has 1000 values\n" +
"\t\t in sequential key order in sync mode.\n" +
"\t\tfillsync -- write N/100 values in random key order in sync mode.\n" +
"\t\tfill100K -- write N/1000 100K values in random order in async mode.\n" +
"\t\treadseq -- read N times sequentially.\n" +
"\t\treadrandom -- read N times in random order.\n" +
"\t\treadhot -- read N times in random order from 1% section of DB.\n" +
"\t\treadwhilewriting -- measure the read performance of multiple readers\n" +
"\t\t with a bg single writer. The write rate of the bg\n" +
"\t\t is capped by --writes_per_second.\n" +
"\tMeta Operations:\n" +
"\t\tdelete -- delete DB") {
benchmarks(Arrays.asList("fillseq", "readrandom", "fillrandom"),
"Comma-separated list of operations to run in the specified order\n"
+ "\tActual benchmarks:\n"
+ "\t\tfillseq -- write N values in sequential key order in async mode.\n"
+ "\t\tfillrandom -- write N values in random key order in async mode.\n"
+ "\t\tfillbatch -- write N/1000 batch where each batch has 1000 values\n"
+ "\t\t in sequential key order in sync mode.\n"
+ "\t\tfillsync -- write N/100 values in random key order in sync mode.\n"
+ "\t\tfill100K -- write N/1000 100K values in random order in async mode.\n"
+ "\t\treadseq -- read N times sequentially.\n"
+ "\t\treadrandom -- read N times in random order.\n"
+ "\t\treadhot -- read N times in random order from 1% section of DB.\n"
+ "\t\treadwhilewriting -- measure the read performance of multiple readers\n"
+ "\t\t with a bg single writer. The write rate of the bg\n"
+ "\t\t is capped by --writes_per_second.\n"
+ "\tMeta Operations:\n"
+ "\t\tdelete -- delete DB") {
@Override public Object parseValue(String value) {
return new ArrayList<String>(Arrays.asList(value.split(",")));
}
Expand Down
Loading

0 comments on commit c06b54d

Please sign in to comment.