diff --git a/db/db_impl/db_impl.cc b/db/db_impl/db_impl.cc index 03fa01da5b8..47f79bd01f5 100644 --- a/db/db_impl/db_impl.cc +++ b/db/db_impl/db_impl.cc @@ -1224,7 +1224,7 @@ Status DBImpl::SetDBOptions( file_options_for_compaction_ = fs_->OptimizeForCompactionTableWrite( file_options_for_compaction_, immutable_db_options_); versions_->ChangeFileOptions(mutable_db_options_); - //TODO(xiez): clarify why apply optimize for read to write options + // TODO(xiez): clarify why apply optimize for read to write options file_options_for_compaction_ = fs_->OptimizeForCompactionTableRead( file_options_for_compaction_, immutable_db_options_); file_options_for_compaction_.compaction_readahead_size = @@ -1677,7 +1677,7 @@ InternalIterator* DBImpl::NewInternalIterator(const ReadOptions& read_options, IterState* cleanup = new IterState(this, &mutex_, super_version, read_options.background_purge_on_iterator_cleanup || - immutable_db_options_.avoid_unnecessary_blocking_io); + immutable_db_options_.avoid_unnecessary_blocking_io); internal_iter->RegisterCleanup(CleanupIteratorState, cleanup, nullptr); return internal_iter; @@ -2048,8 +2048,8 @@ std::vector DBImpl::MultiGet( std::string* timestamp = timestamps ? &(*timestamps)[keys_read] : nullptr; LookupKey lkey(keys[keys_read], consistent_seqnum, read_options.timestamp); - auto cfh = - static_cast_with_check(column_family[keys_read]); + auto cfh = static_cast_with_check( + column_family[keys_read]); SequenceNumber max_covering_tombstone_seq = 0; auto mgd_iter = multiget_cf_data.find(cfh->cfd()->GetID()); assert(mgd_iter != multiget_cf_data.end()); @@ -3502,8 +3502,7 @@ SuperVersion* DBImpl::GetAndRefSuperVersion(uint32_t column_family_id) { void DBImpl::CleanupSuperVersion(SuperVersion* sv) { // Release SuperVersion if (sv->Unref()) { - bool defer_purge = - immutable_db_options().avoid_unnecessary_blocking_io; + bool defer_purge = immutable_db_options().avoid_unnecessary_blocking_io; { InstrumentedMutexLock l(&mutex_); sv->Cleanup(); @@ -4274,6 +4273,10 @@ Status DBImpl::WriteOptionsFile(bool need_mutex_lock, if (s.ok()) { s = RenameTempFileToOptionsFile(file_name); + } else { + // do not accumulate failed files + // (could be hundreds with bad options code) + env_->DeleteFile(file_name).PermitUncheckedError(); } // restore lock if (!need_mutex_lock) { @@ -5130,8 +5133,7 @@ Status DBImpl::VerifyChecksumInternal(const ReadOptions& read_options, } } - bool defer_purge = - immutable_db_options().avoid_unnecessary_blocking_io; + bool defer_purge = immutable_db_options().avoid_unnecessary_blocking_io; { InstrumentedMutexLock l(&mutex_); for (auto sv : sv_list) { diff --git a/db/db_test_util.cc b/db/db_test_util.cc index 2cae1b6effc..70b19532200 100644 --- a/db/db_test_util.cc +++ b/db/db_test_util.cc @@ -564,6 +564,14 @@ Options DBTestBase::GetOptions( options.unordered_write = false; break; } + case kPreloadWithoutPinning: { + options.file_preload = FilePreload::kFilePreloadWithoutPinning; + break; + } + case kPreloadDisabled: { + options.file_preload = FilePreload::kFilePreloadDisabled; + break; + } default: break; diff --git a/db/db_test_util.h b/db/db_test_util.h index affa2ba3825..3e65784e195 100644 --- a/db/db_test_util.h +++ b/db/db_test_util.h @@ -874,6 +874,8 @@ class DBTestBase : public testing::Test { kPartitionedFilterWithNewTableReaderForCompactions, kUniversalSubcompactions, kUnorderedWrite, + kPreloadWithoutPinning, + kPreloadDisabled, // This must be the last line kEnd, }; diff --git a/db/version_builder.cc b/db/version_builder.cc index e76985687cc..b9e32a4949f 100644 --- a/db/version_builder.cc +++ b/db/version_builder.cc @@ -1146,6 +1146,11 @@ class VersionBuilder::Rep { bool always_load = (table_cache_capacity == TableCache::kInfiniteCapacity); size_t max_load = port::kMaxSizet; + if (FilePreload::kFilePreloadDisabled == ioptions_->file_preload) { + max_load = 0; + always_load = true; + } + if (!always_load) { // If it is initial loading and not set to always loading all the // files, we only load up to kInitialLoadLimit files, to limit the @@ -1173,27 +1178,12 @@ class VersionBuilder::Rep { } } - // + std::atomic next_file_meta_idx(0); std::vector> files_meta; std::vector statuses; - for (int level = 0; level < num_levels_; level++) { - for (auto& file_meta_pair : levels_[level].added_files) { - auto* file_meta = file_meta_pair.second; - // If the file has been opened before, just skip it. - if (!file_meta->table_reader_handle) { - files_meta.emplace_back(file_meta, level); - statuses.emplace_back(Status::OK()); - } - if (files_meta.size() >= max_load) { - break; - } - } - if (files_meta.size() >= max_load) { - break; - } - } - std::atomic next_file_meta_idx(0); + // function called by multiple threads via loop + // that follows when preloading active std::function load_handlers_func([&]() { while (true) { size_t file_idx = next_file_meta_idx.fetch_add(1); @@ -1211,30 +1201,64 @@ class VersionBuilder::Rep { internal_stats->GetFileReadHist(level), false, level, prefetch_index_and_filter_in_cache, max_file_size_for_l0_meta_pin, file_meta->temperature); + + // The code is attempting two things: + // 1. preload / warm the table cache with new file objects + // 2. create higher performance via a cache lookup avoidance + // The issue is that number 2 creates permanent objects in the + // table cache which over time are no longer useful. The + // kFilePreloadWithoutPinning option keeps #1 and disables #2. if (file_meta->table_reader_handle != nullptr) { - // Load table_reader - file_meta->fd.table_reader = table_cache_->GetTableReaderFromHandle( - file_meta->table_reader_handle); + if (ioptions_->file_preload == kFilePreloadWithPinning) { + file_meta->fd.table_reader = table_cache_->GetTableReaderFromHandle( + file_meta->table_reader_handle); + } else { // kFilePreloadWithoutPinning + table_cache_->ReleaseHandle(file_meta->table_reader_handle); + file_meta->table_reader_handle = nullptr; + } } } }); - std::vector threads; - for (int i = 1; i < max_threads; i++) { - threads.emplace_back(load_handlers_func); - } - load_handlers_func(); - for (auto& t : threads) { - t.join(); - } Status ret; - for (const auto& s : statuses) { - if (!s.ok()) { - if (ret.ok()) { - ret = s; + // Threaded preloading + if (max_load > 0) { + // + for (int level = 0; level < num_levels_; level++) { + for (auto& file_meta_pair : levels_[level].added_files) { + auto* file_meta = file_meta_pair.second; + // If the file has been opened before, just skip it. + if (!file_meta->table_reader_handle) { + files_meta.emplace_back(file_meta, level); + statuses.emplace_back(Status::OK()); + } + if (files_meta.size() >= max_load) { + break; + } + } + if (files_meta.size() >= max_load) { + break; + } + } + + std::vector threads; + for (int i = 1; i < max_threads; i++) { + threads.emplace_back(load_handlers_func); + } + load_handlers_func(); + for (auto& t : threads) { + t.join(); + } + + for (const auto& s : statuses) { + if (!s.ok()) { + if (ret.ok()) { + ret = s; + } } } } + return ret; } }; diff --git a/db/version_builder_test.cc b/db/version_builder_test.cc index 82eb25684f7..1feb34308de 100644 --- a/db/version_builder_test.cc +++ b/db/version_builder_test.cc @@ -9,6 +9,7 @@ #include #include +#include "db/db_test_util.h" #include "db/version_edit.h" #include "db/version_set.h" #include "rocksdb/advanced_options.h" @@ -1683,10 +1684,10 @@ TEST_F(VersionBuilderTest, CheckConsistencyForFileDeletedTwice) { UpdateVersionStorageInfo(&new_vstorage); VersionBuilder version_builder2(env_options, &ioptions_, table_cache, - &new_vstorage, version_set); + &new_vstorage, version_set); VersionStorageInfo new_vstorage2(&icmp_, ucmp_, options_.num_levels, - kCompactionStyleLevel, nullptr, - true /* force_consistency_checks */); + kCompactionStyleLevel, nullptr, + true /* force_consistency_checks */); ASSERT_NOK(version_builder2.Apply(&version_edit)); UnrefFilesInVersion(&new_vstorage); @@ -1703,10 +1704,8 @@ TEST_F(VersionBuilderTest, EstimatedActiveKeys) { for (uint32_t i = 0; i < kNumFiles; ++i) { Add(static_cast(i / kFilesPerLevel), i + 1, ToString((i + 100) * 1000).c_str(), - ToString((i + 100) * 1000 + 999).c_str(), - 100U, 0, 100, 100, - kEntriesPerFile, kDeletionsPerFile, - (i < kTotalSamples)); + ToString((i + 100) * 1000 + 999).c_str(), 100U, 0, 100, 100, + kEntriesPerFile, kDeletionsPerFile, (i < kTotalSamples)); } // minus 2X for the number of deletion entries because: // 1x for deletion entry does not count as a data entry. @@ -1715,6 +1714,94 @@ TEST_F(VersionBuilderTest, EstimatedActiveKeys) { (kEntriesPerFile - 2 * kDeletionsPerFile) * kNumFiles); } +class FilePreloadTest : public DBTestBase { + public: + FilePreloadTest() : DBTestBase("/file_preload_test", /*env_do_fsync=*/true) {} +}; + +TEST_F(FilePreloadTest, PreloadCaching) { + // create a DB with 3 files + ASSERT_OK(Put("key", "val")); + ASSERT_OK(Flush()); + ASSERT_OK(Put("key2", "val2")); + ASSERT_OK(Flush()); + ASSERT_OK(Put("key3", "val3")); + ASSERT_OK(Flush()); + + DBImpl* db_impl = dbfull(); + Cache* table_cache = db_impl->TEST_table_cache(); + + ASSERT_EQ(table_cache->GetUsage(), 3) << "with preload: failed"; + table_cache->EraseUnRefEntries(); + ASSERT_EQ(table_cache->GetUsage(), 3) << "with pinning: failed"; + + Options new_options = GetOptions(kPreloadWithoutPinning); + Reopen(new_options); + db_impl = dbfull(); + table_cache = db_impl->TEST_table_cache(); + + ASSERT_EQ(table_cache->GetUsage(), 3) << "without preload: failed"; + table_cache->EraseUnRefEntries(); + ASSERT_EQ(table_cache->GetUsage(), 0) << "without pinning: should not happen"; + + new_options = GetOptions(kPreloadDisabled); + Reopen(new_options); + db_impl = dbfull(); + table_cache = db_impl->TEST_table_cache(); + + ASSERT_EQ(table_cache->GetUsage(), 0) + << "disabled preload: should not happen"; + table_cache->EraseUnRefEntries(); + ASSERT_EQ(table_cache->GetUsage(), 0) + << "disabled pinning: should not happen"; +} + +#ifndef ROCKSDB_LITE +// lite does not support GetColumnFamilyMetaData() + +TEST_F(FilePreloadTest, PreloadCorruption) { + // create a DB with 3 files + ASSERT_OK(Put("key", "val")); + ASSERT_OK(Flush()); + ASSERT_OK(Put("key2", "val2")); + ASSERT_OK(Flush()); + ASSERT_OK(Put("key3", "val3")); + ASSERT_OK(Flush()); + + DBImpl* db_impl = dbfull(); + Cache* table_cache = db_impl->TEST_table_cache(); + + ASSERT_EQ(table_cache->GetUsage(), 3); + table_cache->EraseUnRefEntries(); + ASSERT_EQ(table_cache->GetUsage(), 3); + + Options new_options = GetOptions(kDefault); + ASSERT_OK(TryReopen(new_options)); + db_impl = dbfull(); + table_cache = db_impl->TEST_table_cache(); + + ASSERT_EQ(table_cache->GetUsage(), 3); + table_cache->EraseUnRefEntries(); + ASSERT_EQ(table_cache->GetUsage(), 3); + + // find name of txn file + // must corrupt file of same size to bypass paranoid_checks fail + ColumnFamilyMetaData meta; + db_->GetColumnFamilyMetaData(&meta); + // name starts with slash + std::string fail_file = + meta.levels[0].files[0].db_path + meta.levels[0].files[0].name; + std::string garbage(meta.levels[0].files[0].size, '@'); + ASSERT_OK(WriteStringToFile(db_->GetEnv(), garbage, fail_file, true)); + + ASSERT_NOK(TryReopen(new_options)) + << "reopen should fail with corrupted .sst"; + + new_options = GetOptions(kPreloadDisabled); + ASSERT_OK(TryReopen(new_options)) + << "reopen should fail with preload disabled"; +} +#endif } // namespace ROCKSDB_NAMESPACE int main(int argc, char** argv) { diff --git a/db/version_edit_handler.cc b/db/version_edit_handler.cc index 208527fb84e..411db228fad 100644 --- a/db/version_edit_handler.cc +++ b/db/version_edit_handler.cc @@ -534,7 +534,8 @@ Status VersionEditHandler::MaybeCreateVersion(const VersionEdit& /*edit*/, // Install new version v->PrepareAppend( *cfd->GetLatestMutableCFOptions(), - !(version_set_->db_options_->skip_stats_update_on_db_open)); + !(version_set_->db_options_->skip_stats_update_on_db_open || + kFilePreloadDisabled == cfd->ioptions()->file_preload)); version_set_->AppendVersion(cfd, v); } else { delete v; @@ -808,7 +809,8 @@ Status VersionEditHandlerPointInTime::MaybeCreateVersion( if (s.ok()) { version->PrepareAppend( *cfd->GetLatestMutableCFOptions(), - !version_set_->db_options_->skip_stats_update_on_db_open); + !(version_set_->db_options_->skip_stats_update_on_db_open || + kFilePreloadDisabled == cfd->ioptions()->file_preload)); auto v_iter = versions_.find(cfd->GetID()); if (v_iter != versions_.end()) { delete v_iter->second; diff --git a/include/rocksdb/advanced_options.h b/include/rocksdb/advanced_options.h index fd080b6f19b..5ae5962dee6 100644 --- a/include/rocksdb/advanced_options.h +++ b/include/rocksdb/advanced_options.h @@ -56,6 +56,25 @@ enum CompactionPri : char { kMinOverlappingRatio = 0x3, }; +// RocksDB uses the first 25% of num_open_files for precaching during +// start-up and after compactions. The files precached in this fashion +// provide faster access. However, these files are also never released. +// Scenarios that have large bloom filters not cached or scenarios where +// user is manually lowering the num_open_files at runtime might want +// to disable this behavior. +enum FilePreload : char { + // RocksDB uses the first 25% of num_open_files for precaching during + // start-up and after compactions. The files precached in this fashion + // provide faster access. However, these files are also never released. + kFilePreloadWithPinning = 0x0, + // RocksDB uses the first 25% of num_open_files for precaching during + // start-up and after compactions. No pinning within cache, so access + // has one additional layer of indirection. But cache space can free. + kFilePreloadWithoutPinning = 0x1, + // RocksDB does not open existing table files during start-up. + kFilePreloadDisabled = 0x2, +}; + struct CompactionOptionsFIFO { // once the total sum of table files reaches this, we will delete the oldest // table file @@ -748,6 +767,13 @@ struct AdvancedColumnFamilyOptions { // Default: true bool force_consistency_checks = true; + // RocksDB can preload and optionally pin table files within the table + // cache at start-up and after compactions. The files precached in this + // fashion provide faster access. However, these files are also never + // released from the table cache. + // Default: kFilePreloadWithPinning + FilePreload file_preload = kFilePreloadWithPinning; + // Measure IO stats in compactions and flushes, if true. // // Default: false diff --git a/include/rocksdb/utilities/ldb_cmd.h b/include/rocksdb/utilities/ldb_cmd.h index e900abefee5..70a1acd274e 100644 --- a/include/rocksdb/utilities/ldb_cmd.h +++ b/include/rocksdb/utilities/ldb_cmd.h @@ -61,6 +61,7 @@ class LDBCommand { static const std::string ARG_CREATE_IF_MISSING; static const std::string ARG_NO_VALUE; static const std::string ARG_DISABLE_CONSISTENCY_CHECKS; + static const std::string ARG_FILE_PRELOAD; struct ParsedParams { std::string cmd; @@ -173,6 +174,9 @@ class LDBCommand { // The value passed to options.force_consistency_checks. bool force_consistency_checks_; + // The value passed to options.file_preload. + FilePreload file_preload_; + bool create_if_missing_; /** diff --git a/include/rocksdb/utilities/options_type.h b/include/rocksdb/utilities/options_type.h index 33292b3b4fe..82397bfb5e2 100644 --- a/include/rocksdb/utilities/options_type.h +++ b/include/rocksdb/utilities/options_type.h @@ -593,6 +593,8 @@ class OptionTypeInfo { bool IsStruct() const { return (type_ == OptionType::kStruct); } + bool IsVector() const { return (type_ == OptionType::kVector); } + bool IsConfigurable() const { return (type_ == OptionType::kConfigurable || type_ == OptionType::kCustomizable); diff --git a/monitoring/iostats_context.cc b/monitoring/iostats_context.cc index b86951b4bfe..9cc95dd24ce 100644 --- a/monitoring/iostats_context.cc +++ b/monitoring/iostats_context.cc @@ -20,9 +20,7 @@ __thread IOStatsContext iostats_context; "No thread-local support. Disable iostats context with -DNIOSTATS_CONTEXT." #endif -IOStatsContext* get_iostats_context() { - return &iostats_context; -} +IOStatsContext* get_iostats_context() { return &iostats_context; } void IOStatsContext::Reset() { #ifndef NIOSTATS_CONTEXT diff --git a/monitoring/perf_context.cc b/monitoring/perf_context.cc index 9e56f101886..ebccdc634c8 100644 --- a/monitoring/perf_context.cc +++ b/monitoring/perf_context.cc @@ -23,12 +23,11 @@ thread_local PerfContext perf_context; #error "No thread-local support. Disable perf context with -DNPERF_CONTEXT." #endif -PerfContext* get_perf_context() { - return &perf_context; -} +PerfContext* get_perf_context() { return &perf_context; } PerfContext::~PerfContext() { -#if !defined(NPERF_CONTEXT) && defined(ROCKSDB_SUPPORT_THREAD_LOCAL) && !defined(OS_SOLARIS) +#if !defined(NPERF_CONTEXT) && defined(ROCKSDB_SUPPORT_THREAD_LOCAL) && \ + !defined(OS_SOLARIS) ClearPerLevelPerfContext(); #endif } @@ -426,15 +425,14 @@ void PerfContext::Reset() { ss << #counter << " = " << counter << ", "; \ } -#define PERF_CONTEXT_BY_LEVEL_OUTPUT_ONE_COUNTER(counter) \ - if (per_level_perf_context_enabled && \ - level_to_perf_context) { \ - ss << #counter << " = "; \ - for (auto& kv : *level_to_perf_context) { \ - if (!exclude_zero_counters || (kv.second.counter > 0)) { \ - ss << kv.second.counter << "@level" << kv.first << ", "; \ - } \ - } \ +#define PERF_CONTEXT_BY_LEVEL_OUTPUT_ONE_COUNTER(counter) \ + if (per_level_perf_context_enabled && level_to_perf_context) { \ + ss << #counter << " = "; \ + for (auto& kv : *level_to_perf_context) { \ + if (!exclude_zero_counters || (kv.second.counter > 0)) { \ + ss << kv.second.counter << "@level" << kv.first << ", "; \ + } \ + } \ } void PerfContextByLevel::Reset() { @@ -551,11 +549,11 @@ void PerfContext::EnablePerLevelPerfContext() { per_level_perf_context_enabled = true; } -void PerfContext::DisablePerLevelPerfContext(){ +void PerfContext::DisablePerLevelPerfContext() { per_level_perf_context_enabled = false; } -void PerfContext::ClearPerLevelPerfContext(){ +void PerfContext::ClearPerLevelPerfContext() { if (level_to_perf_context != nullptr) { level_to_perf_context->clear(); delete level_to_perf_context; diff --git a/options/cf_options.cc b/options/cf_options.cc index 0da7f449a69..c6eac21cd28 100644 --- a/options/cf_options.cc +++ b/options/cf_options.cc @@ -140,6 +140,11 @@ static Status ParseCompressionOptions(const std::string& value, const std::string kOptNameBMCompOpts = "bottommost_compression_opts"; const std::string kOptNameCompOpts = "compression_opts"; +static std::unordered_map file_preload_string_map = { + {"kFilePreloadWithPinning", FilePreload::kFilePreloadWithPinning}, + {"kFilePreloadWithoutPinning", FilePreload::kFilePreloadWithoutPinning}, + {"kFilePreloadDisabled", FilePreload::kFilePreloadDisabled}}; + // OptionTypeInfo map for CompressionOptions static std::unordered_map compression_options_type_info = { @@ -514,6 +519,9 @@ static std::unordered_map {"compaction_measure_io_stats", {0, OptionType::kBoolean, OptionVerificationType::kDeprecated, OptionTypeFlags::kNone}}, + {"file_preload", OptionTypeInfo::Enum( + offset_of(&ImmutableCFOptions::file_preload), + &file_preload_string_map)}, {"purge_redundant_kvs_while_flush", {0, OptionType::kBoolean, OptionVerificationType::kDeprecated, OptionTypeFlags::kNone}}, @@ -864,6 +872,7 @@ ImmutableCFOptions::ImmutableCFOptions(const ColumnFamilyOptions& cf_options) num_levels(cf_options.num_levels), optimize_filters_for_hits(cf_options.optimize_filters_for_hits), force_consistency_checks(cf_options.force_consistency_checks), + file_preload(cf_options.file_preload), memtable_insert_with_hint_prefix_extractor( cf_options.memtable_insert_with_hint_prefix_extractor), cf_paths(cf_options.cf_paths), @@ -905,9 +914,9 @@ uint64_t MultiplyCheckOverflow(uint64_t op1, double op2) { // when level_compaction_dynamic_level_bytes is true and leveled compaction // is used, the base level is not always L1, so precomupted max_file_size can // no longer be used. Recompute file_size_for_level from base level. -uint64_t MaxFileSizeForLevel(const MutableCFOptions& cf_options, - int level, CompactionStyle compaction_style, int base_level, - bool level_compaction_dynamic_level_bytes) { +uint64_t MaxFileSizeForLevel(const MutableCFOptions& cf_options, int level, + CompactionStyle compaction_style, int base_level, + bool level_compaction_dynamic_level_bytes) { if (!level_compaction_dynamic_level_bytes || level < base_level || compaction_style != kCompactionStyleLevel) { assert(level >= 0); diff --git a/options/cf_options.h b/options/cf_options.h index 19ecec0694a..6639d5b3ec1 100644 --- a/options/cf_options.h +++ b/options/cf_options.h @@ -72,6 +72,8 @@ struct ImmutableCFOptions { bool force_consistency_checks; + FilePreload file_preload; + std::shared_ptr memtable_insert_with_hint_prefix_extractor; diff --git a/options/configurable.cc b/options/configurable.cc index 5f916078a19..3607cfb1f0c 100644 --- a/options/configurable.cc +++ b/options/configurable.cc @@ -608,7 +608,8 @@ Status ConfigurableHelper::SerializeOptions(const ConfigOptions& config_options, } if (!s.ok()) { return s; - } else if (!value.empty()) { + } else if (!value.empty() || + (opt_info.IsVector() && opt_info.IsMutable())) { // = result->append(prefix + opt_name + "=" + value + config_options.delimiter); diff --git a/options/options.cc b/options/options.cc index 3ed6f196fc7..6e0f7d6a2b8 100644 --- a/options/options.cc +++ b/options/options.cc @@ -87,6 +87,7 @@ AdvancedColumnFamilyOptions::AdvancedColumnFamilyOptions(const Options& options) optimize_filters_for_hits(options.optimize_filters_for_hits), paranoid_file_checks(options.paranoid_file_checks), force_consistency_checks(options.force_consistency_checks), + file_preload(options.file_preload), report_bg_io_stats(options.report_bg_io_stats), ttl(options.ttl), periodic_compaction_seconds(options.periodic_compaction_seconds), @@ -381,6 +382,8 @@ void ColumnFamilyOptions::Dump(Logger* log) const { paranoid_file_checks); ROCKS_LOG_HEADER(log, " Options.force_consistency_checks: %d", force_consistency_checks); + ROCKS_LOG_HEADER(log, " Options.file_preload: %d", + file_preload); ROCKS_LOG_HEADER(log, " Options.report_bg_io_stats: %d", report_bg_io_stats); ROCKS_LOG_HEADER(log, " Options.ttl: %" PRIu64, diff --git a/options/options_helper.cc b/options/options_helper.cc index 50d4e230005..31d75d95ca1 100644 --- a/options/options_helper.cc +++ b/options/options_helper.cc @@ -293,6 +293,7 @@ void UpdateColumnFamilyOptions(const ImmutableCFOptions& ioptions, cf_opts->num_levels = ioptions.num_levels; cf_opts->optimize_filters_for_hits = ioptions.optimize_filters_for_hits; cf_opts->force_consistency_checks = ioptions.force_consistency_checks; + cf_opts->file_preload = ioptions.file_preload; cf_opts->memtable_insert_with_hint_prefix_extractor = ioptions.memtable_insert_with_hint_prefix_extractor; cf_opts->cf_paths = ioptions.cf_paths; @@ -462,13 +463,11 @@ bool SerializeSingleOptionHelper(const void* opt_address, case OptionType::kInt32T: *value = ToString(*(static_cast(opt_address))); break; - case OptionType::kInt64T: - { - int64_t v; - GetUnaligned(static_cast(opt_address), &v); - *value = ToString(v); - } - break; + case OptionType::kInt64T: { + int64_t v; + GetUnaligned(static_cast(opt_address), &v); + *value = ToString(v); + } break; case OptionType::kUInt: *value = ToString(*(static_cast(opt_address))); break; @@ -478,20 +477,16 @@ bool SerializeSingleOptionHelper(const void* opt_address, case OptionType::kUInt32T: *value = ToString(*(static_cast(opt_address))); break; - case OptionType::kUInt64T: - { - uint64_t v; - GetUnaligned(static_cast(opt_address), &v); - *value = ToString(v); - } - break; - case OptionType::kSizeT: - { - size_t v; - GetUnaligned(static_cast(opt_address), &v); - *value = ToString(v); - } - break; + case OptionType::kUInt64T: { + uint64_t v; + GetUnaligned(static_cast(opt_address), &v); + *value = ToString(v); + } break; + case OptionType::kSizeT: { + size_t v; + GetUnaligned(static_cast(opt_address), &v); + *value = ToString(v); + } break; case OptionType::kDouble: *value = ToString(*(static_cast(opt_address))); break; @@ -511,7 +506,6 @@ bool SerializeSingleOptionHelper(const void* opt_address, return SerializeEnum( compression_type_string_map, *(static_cast(opt_address)), value); - break; case OptionType::kFilterPolicy: { const auto* ptr = static_cast*>(opt_address); @@ -558,7 +552,6 @@ Status ConfigureFromMap( return s; } - Status StringToMap(const std::string& opts_str, std::unordered_map* opts_map) { assert(opts_map); @@ -602,7 +595,6 @@ Status StringToMap(const std::string& opts_str, return Status::OK(); } - Status GetStringFromDBOptions(std::string* opt_string, const DBOptions& db_options, const std::string& delimiter) { @@ -620,7 +612,6 @@ Status GetStringFromDBOptions(const ConfigOptions& config_options, return config->GetOptionString(config_options, opt_string); } - Status GetStringFromColumnFamilyOptions(std::string* opt_string, const ColumnFamilyOptions& cf_options, const std::string& delimiter) { @@ -681,10 +672,9 @@ Status GetColumnFamilyOptionsFromMap( } } -Status GetColumnFamilyOptionsFromString( - const ColumnFamilyOptions& base_options, - const std::string& opts_str, - ColumnFamilyOptions* new_options) { +Status GetColumnFamilyOptionsFromString(const ColumnFamilyOptions& base_options, + const std::string& opts_str, + ColumnFamilyOptions* new_options) { ConfigOptions config_options; config_options.input_strings_escaped = false; config_options.ignore_unknown_options = false; diff --git a/options/options_settable_test.cc b/options/options_settable_test.cc index cdc5822bb13..b33cddf5ac7 100644 --- a/options/options_settable_test.cc +++ b/options/options_settable_test.cc @@ -518,6 +518,7 @@ TEST_F(OptionsSettableTest, ColumnFamilyOptionsAllFieldsSettable) { "blob_garbage_collection_age_cutoff=0.5;" "blob_garbage_collection_force_threshold=0.75;" "blob_compaction_readahead_size=262144;" + "file_preload=kFilePreloadWithPinning;" "bottommost_temperature=kWarm;" "compaction_options_fifo={max_table_files_size=3;allow_" "compaction=false;age_for_warm=1;};", diff --git a/options/options_test.cc b/options/options_test.cc index f1733beaad7..ea6a9cdbc78 100644 --- a/options/options_test.cc +++ b/options/options_test.cc @@ -1627,6 +1627,10 @@ TEST_F(OptionsTest, MutableCFOptions) { ASSERT_NOK(GetColumnFamilyOptionsFromMap( config_options, cf_opts, {{"force_consistency_checks", "true"}}, &cf_opts)); + // file_preload is not mutable + ASSERT_NOK(GetColumnFamilyOptionsFromMap( + config_options, cf_opts, {{"file_preload", "kFilePreloadWithPinning"}}, + &cf_opts)); // Attempt to change the table. It is not mutable, so this should fail and // leave the original intact diff --git a/port/win/env_win.cc b/port/win/env_win.cc index 3108cf6e776..11c4d75ff31 100644 --- a/port/win/env_win.cc +++ b/port/win/env_win.cc @@ -9,6 +9,8 @@ #if defined(OS_WIN) +#include "port/win/env_win.h" + #include // _rmdir, _mkdir, _getcwd #include #include // _access @@ -27,7 +29,6 @@ #include "monitoring/thread_status_util.h" #include "port/port.h" #include "port/port_dirent.h" -#include "port/win/env_win.h" #include "port/win/io_win.h" #include "port/win/win_logger.h" #include "rocksdb/env.h" diff --git a/table/cuckoo/cuckoo_table_reader.h b/table/cuckoo/cuckoo_table_reader.h index fb30e92cc35..f6c599ae808 100644 --- a/table/cuckoo/cuckoo_table_reader.h +++ b/table/cuckoo/cuckoo_table_reader.h @@ -9,8 +9,8 @@ #pragma once #ifndef ROCKSDB_LITE -#include #include +#include #include #include @@ -25,7 +25,7 @@ class Arena; class TableReader; struct ImmutableOptions; -class CuckooTableReader: public TableReader { +class CuckooTableReader : public TableReader { public: CuckooTableReader(const ImmutableOptions& ioptions, std::unique_ptr&& file, @@ -93,7 +93,7 @@ class CuckooTableReader: public TableReader { uint64_t table_size_; const Comparator* ucomp_; uint64_t (*get_slice_hash_)(const Slice& s, uint32_t index, - uint64_t max_num_buckets); + uint64_t max_num_buckets); }; } // namespace ROCKSDB_NAMESPACE diff --git a/table/sst_file_dumper.cc b/table/sst_file_dumper.cc index a887c856818..380c67826a9 100644 --- a/table/sst_file_dumper.cc +++ b/table/sst_file_dumper.cc @@ -221,9 +221,8 @@ Status SstFileDumper::CalculateCompressedTableSize( table_options.block_size = block_size; BlockBasedTableFactory block_based_tf(table_options); std::unique_ptr table_builder; - table_builder.reset(block_based_tf.NewTableBuilder( - tb_options, - dest_writer.get())); + table_builder.reset( + block_based_tf.NewTableBuilder(tb_options, dest_writer.get())); std::unique_ptr iter(table_reader_->NewIterator( read_options_, moptions_.prefix_extractor.get(), /*arena=*/nullptr, /*skip_filters=*/false, TableReaderCaller::kSSTDumpTool)); diff --git a/test_util/testutil.cc b/test_util/testutil.cc index 14b0c181af9..4404055d6b6 100644 --- a/test_util/testutil.cc +++ b/test_util/testutil.cc @@ -342,6 +342,7 @@ void RandomInitCFOptions(ColumnFamilyOptions* cf_opt, DBOptions& db_options, cf_opt->optimize_filters_for_hits = rnd->Uniform(2); cf_opt->paranoid_file_checks = rnd->Uniform(2); cf_opt->force_consistency_checks = rnd->Uniform(2); + cf_opt->file_preload = (FilePreload)(rnd->Uniform(3)); cf_opt->compaction_options_fifo.allow_compaction = rnd->Uniform(2); cf_opt->memtable_whole_key_filtering = rnd->Uniform(2); cf_opt->enable_blob_files = rnd->Uniform(2); diff --git a/tools/ldb_cmd.cc b/tools/ldb_cmd.cc index 74973549392..4b205a43ad0 100644 --- a/tools/ldb_cmd.cc +++ b/tools/ldb_cmd.cc @@ -65,6 +65,7 @@ const std::string LDBCommand::ARG_TIMESTAMP = "timestamp"; const std::string LDBCommand::ARG_TRY_LOAD_OPTIONS = "try_load_options"; const std::string LDBCommand::ARG_DISABLE_CONSISTENCY_CHECKS = "disable_consistency_checks"; +const std::string LDBCommand::ARG_FILE_PRELOAD = "file_preload"; const std::string LDBCommand::ARG_IGNORE_UNKNOWN_OPTIONS = "ignore_unknown_options"; const std::string LDBCommand::ARG_FROM = "from"; @@ -93,7 +94,7 @@ void DumpWalFile(Options options, std::string wal_file, bool print_header, void DumpSstFile(Options options, std::string filename, bool output_hex, bool show_properties); -}; +}; // namespace LDBCommand* LDBCommand::InitFromCmdLineArgs( int argc, char const* const* argv, const Options& options, @@ -136,7 +137,7 @@ LDBCommand* LDBCommand::InitFromCmdLineArgs( const std::string OPTION_PREFIX = "--"; for (const auto& arg : args) { - if (arg[0] == '-' && arg[1] == '-'){ + if (arg[0] == '-' && arg[1] == '-') { std::vector splits = StringSplit(arg, '='); // --option_name=option_value if (splits.size() == 2) { @@ -262,8 +263,7 @@ LDBCommand* LDBCommand::SelectCommand(const ParsedParams& parsed_params) { parsed_params.flags); } else if (parsed_params.cmd == CheckPointCommand::Name()) { return new CheckPointCommand(parsed_params.cmd_params, - parsed_params.option_map, - parsed_params.flags); + parsed_params.option_map, parsed_params.flags); } else if (parsed_params.cmd == RepairCommand::Name()) { return new RepairCommand(parsed_params.cmd_params, parsed_params.option_map, parsed_params.flags); @@ -504,10 +504,12 @@ std::vector LDBCommand::BuildCmdLineOptions( ARG_COMPRESSION_TYPE, ARG_COMPRESSION_MAX_DICT_BYTES, ARG_WRITE_BUFFER_SIZE, + ARG_FILE_PRELOAD, ARG_FILE_SIZE, ARG_FIX_PREFIX_LEN, ARG_TRY_LOAD_OPTIONS, ARG_DISABLE_CONSISTENCY_CHECKS, + ARG_FILE_PRELOAD, ARG_IGNORE_UNKNOWN_OPTIONS, ARG_CF_NAME}; ret.insert(ret.end(), options.begin(), options.end()); @@ -658,7 +660,7 @@ void LDBCommand::OverrideBaseCFOptions(ColumnFamilyOptions* cf_opts) { int write_buffer_size; if (ParseIntOption(option_map_, ARG_WRITE_BUFFER_SIZE, write_buffer_size, - exec_state_)) { + exec_state_)) { if (write_buffer_size > 0) { cf_opts->write_buffer_size = write_buffer_size; } else { @@ -688,6 +690,17 @@ void LDBCommand::OverrideBaseCFOptions(ColumnFamilyOptions* cf_opts) { LDBCommandExecuteResult::Failed(ARG_FIX_PREFIX_LEN + " must be > 0."); } } + + int file_preload; + if (ParseIntOption(option_map_, ARG_FILE_PRELOAD, file_preload, + exec_state_)) { + if (0 <= file_preload && file_preload < 3) { + cf_opts->file_preload = (FilePreload)file_preload; + } else { + exec_state_ = LDBCommandExecuteResult::Failed(ARG_FILE_PRELOAD + + " must be 0, 1, or 2."); + } + } } // First, initializes the options state using the OPTIONS file when enabled. @@ -1043,7 +1056,7 @@ void DBLoaderCommand::DoCommand() { } else if (0 == line.find("Created bg thread 0x")) { // ignore this line } else { - bad_lines ++; + bad_lines++; } } @@ -1133,7 +1146,6 @@ ManifestDumpCommand::ManifestDumpCommand( } void ManifestDumpCommand::DoCommand() { - std::string manifestfile; if (!path_.empty()) { @@ -1491,7 +1503,7 @@ void IncBucketCounts(std::vector& bucket_counts, int ttl_start, (void)num_buckets; #endif assert(time_range > 0 && timekv >= ttl_start && bucket_size > 0 && - timekv < (ttl_start + time_range) && num_buckets > 1); + timekv < (ttl_start + time_range) && num_buckets > 1); int bucket = (timekv - ttl_start) / bucket_size; bucket_counts[bucket]++; } @@ -1500,7 +1512,7 @@ void PrintBucketCounts(const std::vector& bucket_counts, int ttl_start, int ttl_end, int bucket_size, int num_buckets) { int time_point = ttl_start; - for(int i = 0; i < num_buckets - 1; i++, time_point += bucket_size) { + for (int i = 0; i < num_buckets - 1; i++, time_point += bucket_size) { fprintf(stdout, "Keys in range %s to %s : %lu\n", TimeToHumanString(time_point).c_str(), TimeToHumanString(time_point + bucket_size).c_str(), @@ -1545,10 +1557,10 @@ InternalDumpCommand::InternalDumpCommand( if (itr != options.end()) { delim_ = itr->second; count_delim_ = true; - // fprintf(stdout,"delim = %c\n",delim_[0]); + // fprintf(stdout,"delim = %c\n",delim_[0]); } else { count_delim_ = IsFlagPresent(flags, ARG_COUNT_DELIM); - delim_="."; + delim_ = "."; } print_stats_ = IsFlagPresent(flags, ARG_STATS); @@ -1600,8 +1612,8 @@ void InternalDumpCommand::DoCommand() { } std::string rtype1, rtype2, row, val; rtype2 = ""; - uint64_t c=0; - uint64_t s1=0,s2=0; + uint64_t c = 0; + uint64_t s1 = 0, s2 = 0; long long count = 0; for (auto& key_version : key_versions) { @@ -1616,25 +1628,24 @@ void InternalDumpCommand::DoCommand() { int k; if (count_delim_) { rtype1 = ""; - s1=0; + s1 = 0; row = ikey.Encode().ToString(); val = key_version.value; - for(k=0;row[k]!='\x01' && row[k]!='\0';k++) - s1++; - for(k=0;val[k]!='\x01' && val[k]!='\0';k++) - s1++; - for(int j=0;row[j]!=delim_[0] && row[j]!='\0' && row[j]!='\x01';j++) - rtype1+=row[j]; - if(rtype2.compare("") && rtype2.compare(rtype1)!=0) { + for (k = 0; row[k] != '\x01' && row[k] != '\0'; k++) s1++; + for (k = 0; val[k] != '\x01' && val[k] != '\0'; k++) s1++; + for (int j = 0; row[j] != delim_[0] && row[j] != '\0' && row[j] != '\x01'; + j++) + rtype1 += row[j]; + if (rtype2.compare("") && rtype2.compare(rtype1) != 0) { fprintf(stdout, "%s => count:%" PRIu64 "\tsize:%" PRIu64 "\n", rtype2.c_str(), c, s2); - c=1; - s2=s1; + c = 1; + s2 = s1; rtype2 = rtype1; } else { c++; - s2+=s1; - rtype2=rtype1; + s2 += s1; + rtype2 = rtype1; } } @@ -1647,7 +1658,7 @@ void InternalDumpCommand::DoCommand() { // Terminate if maximum number of keys have been dumped if (max_keys_ > 0 && count >= max_keys_) break; } - if(count_delim_) { + if (count_delim_) { fprintf(stdout, "%s => count:%" PRIu64 "\tsize:%" PRIu64 "\n", rtype2.c_str(), c, s2); } else { @@ -1711,7 +1722,7 @@ DBDumperCommand::DBDumperCommand( count_delim_ = true; } else { count_delim_ = IsFlagPresent(flags, ARG_COUNT_DELIM); - delim_="."; + delim_ = "."; } print_stats_ = IsFlagPresent(flags, ARG_STATS); @@ -1850,13 +1861,13 @@ void DBDumperCommand::DoDumpCommand() { int bucket_size; if (!ParseIntOption(option_map_, ARG_TTL_BUCKET, bucket_size, exec_state_) || bucket_size <= 0) { - bucket_size = time_range; // Will have just 1 bucket by default + bucket_size = time_range; // Will have just 1 bucket by default } - //cretaing variables for row count of each type + // creating variables for row count of each type std::string rtype1, rtype2, row, val; rtype2 = ""; - uint64_t c=0; - uint64_t s1=0,s2=0; + uint64_t c = 0; + uint64_t s1 = 0, s2 = 0; // At this point, bucket_size=0 => time_range=0 int num_buckets = (bucket_size >= time_range) @@ -1874,11 +1885,9 @@ void DBDumperCommand::DoDumpCommand() { for (; iter->Valid(); iter->Next()) { int rawtime = 0; // If end marker was specified, we stop before it - if (!null_to_ && (iter->key().ToString() >= to_)) - break; + if (!null_to_ && (iter->key().ToString() >= to_)) break; // Terminate if maximum number of keys have been dumped - if (max_keys == 0) - break; + if (max_keys == 0) break; if (is_db_ttl_) { TtlIterator* it_ttl = static_cast_with_check(iter); rawtime = it_ttl->ttl_timestamp(); @@ -1898,21 +1907,20 @@ void DBDumperCommand::DoDumpCommand() { rtype1 = ""; row = iter->key().ToString(); val = iter->value().ToString(); - s1 = row.size()+val.size(); - for(int j=0;row[j]!=delim_[0] && row[j]!='\0';j++) - rtype1+=row[j]; - if(rtype2.compare("") && rtype2.compare(rtype1)!=0) { + s1 = row.size() + val.size(); + for (int j = 0; row[j] != delim_[0] && row[j] != '\0'; j++) + rtype1 += row[j]; + if (rtype2.compare("") && rtype2.compare(rtype1) != 0) { fprintf(stdout, "%s => count:%" PRIu64 "\tsize:%" PRIu64 "\n", rtype2.c_str(), c, s2); - c=1; - s2=s1; + c = 1; + s2 = s1; rtype2 = rtype1; } else { - c++; - s2+=s1; - rtype2=rtype1; + c++; + s2 += s1; + rtype2 = rtype1; } - } if (count_only_) { @@ -1933,7 +1941,7 @@ void DBDumperCommand::DoDumpCommand() { if (num_buckets > 1 && is_db_ttl_) { PrintBucketCounts(bucket_counts, ttl_start, ttl_end, bucket_size, num_buckets); - } else if(count_delim_) { + } else if (count_delim_) { fprintf(stdout, "%s => count:%" PRIu64 "\tsize:%" PRIu64 "\n", rtype2.c_str(), c, s2); } else { @@ -1964,7 +1972,7 @@ ReduceDBLevelsCommand::ReduceDBLevelsCommand( ParseIntOption(option_map_, ARG_NEW_LEVELS, new_levels_, exec_state_); print_old_levels_ = IsFlagPresent(flags, ARG_PRINT_OLD_LEVELS); - if(new_levels_ <= 0) { + if (new_levels_ <= 0) { exec_state_ = LDBCommandExecuteResult::Failed( " Use --" + ARG_NEW_LEVELS + " to specify a new level number\n"); } @@ -1977,7 +1985,7 @@ std::vector ReduceDBLevelsCommand::PrepareArgs( ret.push_back("--" + ARG_DB + "=" + db_path); ret.push_back("--" + ARG_NEW_LEVELS + "=" + ROCKSDB_NAMESPACE::ToString(new_levels)); - if(print_old_level) { + if (print_old_level) { ret.push_back("--" + ARG_PRINT_OLD_LEVELS); } return ret; @@ -2002,8 +2010,7 @@ void ReduceDBLevelsCommand::OverrideBaseCFOptions( cf_opts->max_bytes_for_level_multiplier = 1; } -Status ReduceDBLevelsCommand::GetOldNumOfLevels(Options& opt, - int* levels) { +Status ReduceDBLevelsCommand::GetOldNumOfLevels(Options& opt, int* levels) { ImmutableDBOptions db_options(opt); EnvOptions soptions; std::shared_ptr tc( @@ -2101,9 +2108,9 @@ ChangeCompactionStyleCommand::ChangeCompactionStyleCommand( old_compaction_style_(-1), new_compaction_style_(-1) { ParseIntOption(option_map_, ARG_OLD_COMPACTION_STYLE, old_compaction_style_, - exec_state_); + exec_state_); if (old_compaction_style_ != kCompactionStyleLevel && - old_compaction_style_ != kCompactionStyleUniversal) { + old_compaction_style_ != kCompactionStyleUniversal) { exec_state_ = LDBCommandExecuteResult::Failed( "Use --" + ARG_OLD_COMPACTION_STYLE + " to specify old compaction " + "style. Check ldb help for proper compaction style value.\n"); @@ -2111,9 +2118,9 @@ ChangeCompactionStyleCommand::ChangeCompactionStyleCommand( } ParseIntOption(option_map_, ARG_NEW_COMPACTION_STYLE, new_compaction_style_, - exec_state_); + exec_state_); if (new_compaction_style_ != kCompactionStyleLevel && - new_compaction_style_ != kCompactionStyleUniversal) { + new_compaction_style_ != kCompactionStyleUniversal) { exec_state_ = LDBCommandExecuteResult::Failed( "Use --" + ARG_NEW_COMPACTION_STYLE + " to specify new compaction " + "style. Check ldb help for proper compaction style value.\n"); @@ -2448,7 +2455,6 @@ WALDumperCommand::WALDumperCommand( wal_file_ = itr->second; } - print_header_ = IsFlagPresent(flags, ARG_PRINT_HEADER); print_values_ = IsFlagPresent(flags, ARG_PRINT_VALUE); is_write_committed_ = ParseBooleanOption(options, ARG_WRITE_COMMITTED, true); @@ -2511,7 +2517,7 @@ void GetCommand::DoCommand() { Status st = db_->Get(ReadOptions(), GetCfHandle(), key_, &value); if (st.ok()) { fprintf(stdout, "%s\n", - (is_value_hex_ ? StringToHex(value) : value).c_str()); + (is_value_hex_ ? StringToHex(value) : value).c_str()); } else { std::stringstream oss; oss << "Get failed: " << st.ToString(); @@ -2750,9 +2756,9 @@ void ScanCommand::DoCommand() { TimeToHumanString(ttl_start).c_str(), TimeToHumanString(ttl_end).c_str()); } - for ( ; - it->Valid() && (!end_key_specified_ || it->key().ToString() < end_key_); - it->Next()) { + for (; + it->Valid() && (!end_key_specified_ || it->key().ToString() < end_key_); + it->Next()) { if (is_db_ttl_) { TtlIterator* it_ttl = static_cast_with_check(it); int rawtime = it_ttl->ttl_timestamp(); @@ -2950,8 +2956,9 @@ void DBQuerierCommand::Help(std::string& ret) { ret.append(DBQuerierCommand::Name()); ret.append(" [--" + ARG_TTL + "]"); ret.append("\n"); - ret.append(" Starts a REPL shell. Type help for list of available " - "commands."); + ret.append( + " Starts a REPL shell. Type help for list of available " + "commands."); ret.append("\n"); } @@ -2978,7 +2985,7 @@ void DBQuerierCommand::DoCommand() { if (pos2 == std::string::npos) { break; } - tokens.push_back(line.substr(pos, pos2-pos)); + tokens.push_back(line.substr(pos, pos2 - pos)); pos = pos2 + 1; } tokens.push_back(line.substr(pos)); @@ -3012,8 +3019,8 @@ void DBQuerierCommand::DoCommand() { key = (is_key_hex_ ? HexToString(tokens[1]) : tokens[1]); s = db_->Get(read_options, GetCfHandle(), Slice(key), &value); if (s.ok()) { - fprintf(stdout, "%s\n", PrintKeyValue(key, value, - is_key_hex_, is_value_hex_).c_str()); + fprintf(stdout, "%s\n", + PrintKeyValue(key, value, is_key_hex_, is_value_hex_).c_str()); } else { if (s.IsNotFound()) { fprintf(stdout, "Not found %s\n", tokens[1].c_str()); diff --git a/tools/ldb_tool.cc b/tools/ldb_tool.cc index 08a22c0adbb..fdab8829895 100644 --- a/tools/ldb_tool.cc +++ b/tools/ldb_tool.cc @@ -49,6 +49,8 @@ void LDBCommandRunner::PrintHelp(const LDBOptions& ldb_options, " : Try to load option file from DB.\n"); ret.append(" --" + LDBCommand::ARG_DISABLE_CONSISTENCY_CHECKS + " : Set options.force_consistency_checks = false.\n"); + ret.append(" --" + LDBCommand::ARG_FILE_PRELOAD + + " : Set options.file_preload = \n");