Unverified Commit c99cf9d2 authored by yiwu-arbug's avatar yiwu-arbug Committed by GitHub

Upgrade RocksDB to 6.4 (#61)

Summary:
Update the code to base off RocksDB 6.4.x. There are a few internal interface changes and files moving around from 5.18 to 6.4. titandb_bench is also updated to base off the version in 6.4.x.

Test Plan:
Travis
parent 19ea115a
...@@ -9,7 +9,7 @@ if (NOT ROCKSDB_GIT_REPO) ...@@ -9,7 +9,7 @@ if (NOT ROCKSDB_GIT_REPO)
endif() endif()
if (NOT ROCKSDB_GIT_BRANCH) if (NOT ROCKSDB_GIT_BRANCH)
set(ROCKSDB_GIT_BRANCH "tikv-3.0") set(ROCKSDB_GIT_BRANCH "6.4.tikv")
endif() endif()
if (NOT DEFINED ROCKSDB_DIR) if (NOT DEFINED ROCKSDB_DIR)
......
...@@ -42,5 +42,5 @@ ctest -R titan ...@@ -42,5 +42,5 @@ ctest -R titan
bash scripts/format-diff.sh bash scripts/format-diff.sh
``` ```
## Compatibility ## Compatibility with RocksDB
Currently Titan is only compatible with RocksDB 5.18. Current version of Titan is developed and tested with RocksDB 6.4.
...@@ -32,7 +32,7 @@ endif() ...@@ -32,7 +32,7 @@ endif()
option(WITH_ZLIB "build with zlib" OFF) option(WITH_ZLIB "build with zlib" OFF)
if (WITH_ZLIB) if (WITH_ZLIB)
find_package(zlib REQUIRED) find_package(ZLIB REQUIRED)
add_definitions(-DZLIB) add_definitions(-DZLIB)
include_directories(${ZLIB_INCLUDE_DIR}) include_directories(${ZLIB_INCLUDE_DIR})
endif() endif()
......
...@@ -3,8 +3,8 @@ ...@@ -3,8 +3,8 @@
#include <map> #include <map>
#include <unordered_map> #include <unordered_map>
#include "logging/logging.h"
#include "rocksdb/options.h" #include "rocksdb/options.h"
#include "util/logging.h"
namespace rocksdb { namespace rocksdb {
namespace titandb { namespace titandb {
......
#include "blob_file_cache.h" #include "blob_file_cache.h"
#include "file/filename.h"
#include "util.h" #include "util.h"
#include "util/filename.h"
namespace rocksdb { namespace rocksdb {
namespace titandb { namespace titandb {
......
...@@ -19,7 +19,10 @@ BlobFileIterator::~BlobFileIterator() {} ...@@ -19,7 +19,10 @@ BlobFileIterator::~BlobFileIterator() {}
bool BlobFileIterator::Init() { bool BlobFileIterator::Init() {
Slice slice; Slice slice;
char header_buf[BlobFileHeader::kEncodedLength]; char header_buf[BlobFileHeader::kEncodedLength];
status_ = file_->Read(0, BlobFileHeader::kEncodedLength, &slice, header_buf); // With for_compaction=true, rate_limiter is enabled. Since BlobFileIterator
// is only used for GC, we always set for_compaction to true.
status_ = file_->Read(0, BlobFileHeader::kEncodedLength, &slice, header_buf,
true /*for_compaction*/);
if (!status_.ok()) { if (!status_.ok()) {
return false; return false;
} }
...@@ -29,8 +32,11 @@ bool BlobFileIterator::Init() { ...@@ -29,8 +32,11 @@ bool BlobFileIterator::Init() {
return false; return false;
} }
char footer_buf[BlobFileFooter::kEncodedLength]; char footer_buf[BlobFileFooter::kEncodedLength];
// With for_compaction=true, rate_limiter is enabled. Since BlobFileIterator
// is only used for GC, we always set for_compaction to true.
status_ = file_->Read(file_size_ - BlobFileFooter::kEncodedLength, status_ = file_->Read(file_size_ - BlobFileFooter::kEncodedLength,
BlobFileFooter::kEncodedLength, &slice, footer_buf); BlobFileFooter::kEncodedLength, &slice, footer_buf,
true /*for_compaction*/);
if (!status_.ok()) return false; if (!status_.ok()) return false;
BlobFileFooter blob_file_footer; BlobFileFooter blob_file_footer;
status_ = blob_file_footer.DecodeFrom(&slice); status_ = blob_file_footer.DecodeFrom(&slice);
...@@ -74,8 +80,10 @@ void BlobFileIterator::IterateForPrev(uint64_t offset) { ...@@ -74,8 +80,10 @@ void BlobFileIterator::IterateForPrev(uint64_t offset) {
FixedSlice<kBlobHeaderSize> header_buffer; FixedSlice<kBlobHeaderSize> header_buffer;
iterate_offset_ = BlobFileHeader::kEncodedLength; iterate_offset_ = BlobFileHeader::kEncodedLength;
for (; iterate_offset_ < offset; iterate_offset_ += total_length) { for (; iterate_offset_ < offset; iterate_offset_ += total_length) {
// With for_compaction=true, rate_limiter is enabled. Since BlobFileIterator
// is only used for GC, we always set for_compaction to true.
status_ = file_->Read(iterate_offset_, kBlobHeaderSize, &header_buffer, status_ = file_->Read(iterate_offset_, kBlobHeaderSize, &header_buffer,
header_buffer.get()); header_buffer.get(), true /*for_compaction*/);
if (!status_.ok()) return; if (!status_.ok()) return;
status_ = decoder_.DecodeHeader(&header_buffer); status_ = decoder_.DecodeHeader(&header_buffer);
if (!status_.ok()) return; if (!status_.ok()) return;
...@@ -88,8 +96,10 @@ void BlobFileIterator::IterateForPrev(uint64_t offset) { ...@@ -88,8 +96,10 @@ void BlobFileIterator::IterateForPrev(uint64_t offset) {
void BlobFileIterator::GetBlobRecord() { void BlobFileIterator::GetBlobRecord() {
FixedSlice<kBlobHeaderSize> header_buffer; FixedSlice<kBlobHeaderSize> header_buffer;
// With for_compaction=true, rate_limiter is enabled. Since BlobFileIterator
// is only used for GC, we always set for_compaction to true.
status_ = file_->Read(iterate_offset_, kBlobHeaderSize, &header_buffer, status_ = file_->Read(iterate_offset_, kBlobHeaderSize, &header_buffer,
header_buffer.get()); header_buffer.get(), true /*for_compaction*/);
if (!status_.ok()) return; if (!status_.ok()) return;
status_ = decoder_.DecodeHeader(&header_buffer); status_ = decoder_.DecodeHeader(&header_buffer);
if (!status_.ok()) return; if (!status_.ok()) return;
...@@ -97,8 +107,10 @@ void BlobFileIterator::GetBlobRecord() { ...@@ -97,8 +107,10 @@ void BlobFileIterator::GetBlobRecord() {
Slice record_slice; Slice record_slice;
auto record_size = decoder_.GetRecordSize(); auto record_size = decoder_.GetRecordSize();
buffer_.resize(record_size); buffer_.resize(record_size);
// With for_compaction=true, rate_limiter is enabled. Since BlobFileIterator
// is only used for GC, we always set for_compaction to true.
status_ = file_->Read(iterate_offset_ + kBlobHeaderSize, record_size, status_ = file_->Read(iterate_offset_ + kBlobHeaderSize, record_size,
&record_slice, buffer_.data()); &record_slice, buffer_.data(), true /*for_compaction*/);
if (status_.ok()) { if (status_.ok()) {
status_ = status_ =
decoder_.DecodeRecord(&record_slice, &cur_blob_record_, &uncompressed_); decoder_.DecodeRecord(&record_slice, &cur_blob_record_, &uncompressed_);
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
namespace rocksdb { namespace rocksdb {
namespace titandb { namespace titandb {
// Used by GC job for iterate through blob file.
class BlobFileIterator { class BlobFileIterator {
public: public:
const uint64_t kMinReadaheadSize = 4 << 10; const uint64_t kMinReadaheadSize = 4 << 10;
......
...@@ -2,11 +2,12 @@ ...@@ -2,11 +2,12 @@
#include <cinttypes> #include <cinttypes>
#include "file/filename.h"
#include "test_util/testharness.h"
#include "blob_file_builder.h" #include "blob_file_builder.h"
#include "blob_file_cache.h" #include "blob_file_cache.h"
#include "blob_file_reader.h" #include "blob_file_reader.h"
#include "util/filename.h"
#include "util/testharness.h"
namespace rocksdb { namespace rocksdb {
namespace titandb { namespace titandb {
......
...@@ -6,10 +6,10 @@ ...@@ -6,10 +6,10 @@
#include <inttypes.h> #include <inttypes.h>
#include "file/filename.h"
#include "test_util/sync_point.h"
#include "util/crc32c.h" #include "util/crc32c.h"
#include "util/filename.h"
#include "util/string_util.h" #include "util/string_util.h"
#include "util/sync_point.h"
#include "titan_stats.h" #include "titan_stats.h"
...@@ -28,12 +28,9 @@ Status NewBlobFileReader(uint64_t file_number, uint64_t readahead_size, ...@@ -28,12 +28,9 @@ Status NewBlobFileReader(uint64_t file_number, uint64_t readahead_size,
if (readahead_size > 0) { if (readahead_size > 0) {
file = NewReadaheadRandomAccessFile(std::move(file), readahead_size); file = NewReadaheadRandomAccessFile(std::move(file), readahead_size);
} }
// Currently only `BlobGCJob` will call `NewBlobFileReader()`. We set
// `for_compaction=true` in this case to enable rate limiter.
result->reset(new RandomAccessFileReader( result->reset(new RandomAccessFileReader(
std::move(file), file_name, nullptr /*env*/, nullptr /*stats*/, std::move(file), file_name, nullptr /*env*/, nullptr /*stats*/,
0 /*hist_type*/, nullptr /*file_read_hist*/, env_options.rate_limiter, 0 /*hist_type*/, nullptr /*file_read_hist*/, env_options.rate_limiter));
true /*for compaction*/));
return s; return s;
} }
......
#include "test_util/testharness.h"
#include "blob_file_size_collector.h" #include "blob_file_size_collector.h"
#include "util/testharness.h"
namespace rocksdb { namespace rocksdb {
namespace titandb { namespace titandb {
...@@ -50,8 +51,9 @@ class BlobFileSizeCollectorTest : public testing::Test { ...@@ -50,8 +51,9 @@ class BlobFileSizeCollectorTest : public testing::Test {
CompressionOptions compression_opts; CompressionOptions compression_opts;
TableBuilderOptions options(cf_ioptions_, cf_moptions_, TableBuilderOptions options(cf_ioptions_, cf_moptions_,
cf_ioptions_.internal_comparator, &collectors_, cf_ioptions_.internal_comparator, &collectors_,
kNoCompression, compression_opts, nullptr, kNoCompression, 0 /*sample_for_compression*/,
false, kDefaultColumnFamilyName, 0); compression_opts, false /*skip_filters*/,
kDefaultColumnFamilyName, 0 /*level*/);
result->reset(table_factory_->NewTableBuilder(options, 0, file)); result->reset(table_factory_->NewTableBuilder(options, 0, file));
ASSERT_TRUE(*result); ASSERT_TRUE(*result);
} }
......
#include "file/filename.h"
#include "test_util/testharness.h"
#include "blob_file_builder.h" #include "blob_file_builder.h"
#include "blob_file_cache.h" #include "blob_file_cache.h"
#include "blob_file_reader.h" #include "blob_file_reader.h"
#include "util/filename.h"
#include "util/testharness.h"
namespace rocksdb { namespace rocksdb {
namespace titandb { namespace titandb {
......
#include "blob_format.h" #include "blob_format.h"
#include "test_util/sync_point.h"
#include "util/crc32c.h" #include "util/crc32c.h"
#include "util/sync_point.h"
namespace rocksdb { namespace rocksdb {
namespace titandb { namespace titandb {
...@@ -40,7 +40,7 @@ void BlobEncoder::EncodeRecord(const BlobRecord& record) { ...@@ -40,7 +40,7 @@ void BlobEncoder::EncodeRecord(const BlobRecord& record) {
CompressionType compression; CompressionType compression;
record.EncodeTo(&record_buffer_); record.EncodeTo(&record_buffer_);
record_ = Compress(compression_ctx_, record_buffer_, &compressed_buffer_, record_ = Compress(compression_info_, record_buffer_, &compressed_buffer_,
&compression); &compression);
assert(record_.size() < std::numeric_limits<uint32_t>::max()); assert(record_.size() < std::numeric_limits<uint32_t>::max());
...@@ -82,7 +82,8 @@ Status BlobDecoder::DecodeRecord(Slice* src, BlobRecord* record, ...@@ -82,7 +82,8 @@ Status BlobDecoder::DecodeRecord(Slice* src, BlobRecord* record,
return DecodeInto(input, record); return DecodeInto(input, record);
} }
UncompressionContext ctx(compression_); UncompressionContext ctx(compression_);
Status s = Uncompress(ctx, input, buffer); UncompressionInfo info(ctx, UncompressionDict::GetEmptyDict(), compression_);
Status s = Uncompress(info, input, buffer);
if (!s.ok()) { if (!s.ok()) {
return s; return s;
} }
......
...@@ -32,7 +32,11 @@ struct BlobRecord { ...@@ -32,7 +32,11 @@ struct BlobRecord {
class BlobEncoder { class BlobEncoder {
public: public:
BlobEncoder(CompressionType compression) : compression_ctx_(compression) {} BlobEncoder(CompressionType compression)
: compression_ctx_(compression),
compression_info_(compression_opt_, compression_ctx_,
CompressionDict::GetEmptyDict(), compression,
0 /*sample_for_compression*/) {}
void EncodeRecord(const BlobRecord& record); void EncodeRecord(const BlobRecord& record);
...@@ -46,7 +50,9 @@ class BlobEncoder { ...@@ -46,7 +50,9 @@ class BlobEncoder {
Slice record_; Slice record_;
std::string record_buffer_; std::string record_buffer_;
std::string compressed_buffer_; std::string compressed_buffer_;
CompressionOptions compression_opt_;
CompressionContext compression_ctx_; CompressionContext compression_ctx_;
CompressionInfo compression_info_;
}; };
class BlobDecoder { class BlobDecoder {
......
#include "test_util/testharness.h"
#include "blob_format.h" #include "blob_format.h"
#include "testutil.h" #include "testutil.h"
#include "util.h" #include "util.h"
#include "util/testharness.h"
namespace rocksdb { namespace rocksdb {
namespace titandb { namespace titandb {
......
...@@ -3,6 +3,8 @@ ...@@ -3,6 +3,8 @@
#endif #endif
#include <inttypes.h> #include <inttypes.h>
#include <memory>
#include "blob_gc_job.h" #include "blob_gc_job.h"
namespace rocksdb { namespace rocksdb {
...@@ -491,7 +493,7 @@ Status BlobGCJob::InstallOutputBlobFiles() { ...@@ -491,7 +493,7 @@ Status BlobGCJob::InstallOutputBlobFiles() {
} }
} }
} else { } else {
std::vector<unique_ptr<BlobFileHandle>> handles; std::vector<std::unique_ptr<BlobFileHandle>> handles;
std::string to_delete_files; std::string to_delete_files;
for (auto& builder : this->blob_file_builders_) { for (auto& builder : this->blob_file_builders_) {
if (!to_delete_files.empty()) { if (!to_delete_files.empty()) {
...@@ -565,7 +567,8 @@ Status BlobGCJob::DeleteInputBlobFiles() { ...@@ -565,7 +567,8 @@ Status BlobGCJob::DeleteInputBlobFiles() {
VersionEdit edit; VersionEdit edit;
edit.SetColumnFamilyID(blob_gc_->column_family_handle()->GetID()); edit.SetColumnFamilyID(blob_gc_->column_family_handle()->GetID());
for (const auto& file : blob_gc_->sampled_inputs()) { for (const auto& file : blob_gc_->sampled_inputs()) {
ROCKS_LOG_INFO(db_options_.info_log, "Titan add obsolete file [%llu]", ROCKS_LOG_INFO(db_options_.info_log,
"Titan add obsolete file [%" PRIu64 "]",
file->file_number()); file->file_number());
metrics_.blob_db_gc_num_files++; metrics_.blob_db_gc_num_files++;
edit.DeleteBlobFile(file->file_number(), obsolete_sequence); edit.DeleteBlobFile(file->file_number(), obsolete_sequence);
......
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
#include "blob_file_iterator.h" #include "blob_file_iterator.h"
#include "blob_file_manager.h" #include "blob_file_manager.h"
#include "blob_gc.h" #include "blob_gc.h"
#include "db/db_impl.h" #include "db/db_impl/db_impl.h"
#include "rocksdb/statistics.h" #include "rocksdb/statistics.h"
#include "rocksdb/status.h" #include "rocksdb/status.h"
#include "titan/options.h" #include "titan/options.h"
......
#include "blob_gc_job.h" #include "rocksdb/convenience.h"
#include "test_util/testharness.h"
#include "blob_gc_job.h"
#include "blob_gc_picker.h" #include "blob_gc_picker.h"
#include "db_impl.h" #include "db_impl.h"
#include "rocksdb/convenience.h"
#include "util/testharness.h"
namespace rocksdb { namespace rocksdb {
namespace titandb { namespace titandb {
......
#include "blob_gc_picker.h" #include "blob_gc_picker.h"
#ifndef __STDC_FORMAT_MACROS
#define __STDC_FORMAT_MACROS
#endif
#include <inttypes.h>
namespace rocksdb { namespace rocksdb {
namespace titandb { namespace titandb {
...@@ -16,9 +22,6 @@ std::unique_ptr<BlobGC> BasicBlobGCPicker::PickBlobGC( ...@@ -16,9 +22,6 @@ std::unique_ptr<BlobGC> BasicBlobGCPicker::PickBlobGC(
uint64_t batch_size = 0; uint64_t batch_size = 0;
uint64_t estimate_output_size = 0; uint64_t estimate_output_size = 0;
// ROCKS_LOG_INFO(db_options_.info_log, "blob file num:%lu gc score:%lu",
// blob_storage->NumBlobFiles(),
// blob_storage->gc_score().size());
bool stop_picking = false; bool stop_picking = false;
bool maybe_continue_next_time = false; bool maybe_continue_next_time = false;
uint64_t next_gc_size = 0; uint64_t next_gc_size = 0;
...@@ -30,23 +33,11 @@ std::unique_ptr<BlobGC> BasicBlobGCPicker::PickBlobGC( ...@@ -30,23 +33,11 @@ std::unique_ptr<BlobGC> BasicBlobGCPicker::PickBlobGC(
// or this file had been GCed // or this file had been GCed
continue; continue;
} }
// ROCKS_LOG_INFO(db_options_.info_log,
// "file number:%lu score:%f being_gc:%d pending:%d, "
// "size:%lu discard:%lu mark_for_gc:%d
// mark_for_sample:%d", blob_file->file_number_,
// gc_score.score, blob_file->being_gc,
// blob_file->pending, blob_file->file_size_,
// blob_file->discardable_size_,
// blob_file->marked_for_gc_,
// blob_file->marked_for_sample);
if (!CheckBlobFile(blob_file.get())) { if (!CheckBlobFile(blob_file.get())) {
ROCKS_LOG_INFO(db_options_.info_log, "file number:%lu no need gc", ROCKS_LOG_INFO(db_options_.info_log, "Blob file %" PRIu64 " no need gc",
blob_file->file_number()); blob_file->file_number());
continue; continue;
} }
if (!stop_picking) { if (!stop_picking) {
blob_files.push_back(blob_file.get()); blob_files.push_back(blob_file.get());
batch_size += blob_file->file_size(); batch_size += blob_file->file_size();
......
...@@ -2,14 +2,15 @@ ...@@ -2,14 +2,15 @@
#include <memory> #include <memory>
#include "db/column_family.h"
#include "db/write_callback.h"
#include "file/filename.h"
#include "rocksdb/status.h"
#include "blob_file_manager.h" #include "blob_file_manager.h"
#include "blob_format.h" #include "blob_format.h"
#include "blob_gc.h" #include "blob_gc.h"
#include "blob_storage.h" #include "blob_storage.h"
#include "db/column_family.h"
#include "db/write_callback.h"
#include "rocksdb/status.h"
#include "util/filename.h"
namespace rocksdb { namespace rocksdb {
namespace titandb { namespace titandb {
......
#include "blob_gc_picker.h" #include "blob_gc_picker.h"
#include "file/filename.h"
#include "test_util/testharness.h"
#include "blob_file_builder.h" #include "blob_file_builder.h"
#include "blob_file_cache.h" #include "blob_file_cache.h"
#include "blob_file_iterator.h" #include "blob_file_iterator.h"
#include "blob_file_reader.h" #include "blob_file_reader.h"
#include "util/filename.h"
#include "util/testharness.h"
namespace rocksdb { namespace rocksdb {
namespace titandb { namespace titandb {
......
...@@ -65,7 +65,8 @@ class TitanDBImpl::FileManager : public BlobFileManager { ...@@ -65,7 +65,8 @@ class TitanDBImpl::FileManager : public BlobFileManager {
} }
if (!s.ok()) return s; if (!s.ok()) return s;
ROCKS_LOG_INFO(db_->db_options_.info_log, "Titan adding blob file [%llu]", ROCKS_LOG_INFO(db_->db_options_.info_log,
"Titan adding blob file [%" PRIu64 "]",
file.first->file_number()); file.first->file_number());
edit.AddBlobFile(file.first); edit.AddBlobFile(file.first);
} }
...@@ -746,7 +747,7 @@ Status TitanDBImpl::DeleteFilesInRanges(ColumnFamilyHandle* column_family, ...@@ -746,7 +747,7 @@ Status TitanDBImpl::DeleteFilesInRanges(ColumnFamilyHandle* column_family,
if (!bs) { if (!bs) {
// TODO: Should treat it as background error and make DB read-only. // TODO: Should treat it as background error and make DB read-only.
ROCKS_LOG_ERROR(db_options_.info_log, ROCKS_LOG_ERROR(db_options_.info_log,
"Column family id:% " PRIu32 " not Found.", cf_id); "Column family id:%" PRIu32 " not Found.", cf_id);
return Status::NotFound("Column family id: " + std::to_string(cf_id) + return Status::NotFound("Column family id: " + std::to_string(cf_id) +
" not Found."); " not Found.");
} }
...@@ -1008,7 +1009,7 @@ void TitanDBImpl::OnCompactionCompleted( ...@@ -1008,7 +1009,7 @@ void TitanDBImpl::OnCompactionCompleted(
if (!bs) { if (!bs) {
// TODO: Should treat it as background error and make DB read-only. // TODO: Should treat it as background error and make DB read-only.
ROCKS_LOG_ERROR(db_options_.info_log, ROCKS_LOG_ERROR(db_options_.info_log,
"OnCompactionCompleted[%d] Column family id:% " PRIu32 "OnCompactionCompleted[%d] Column family id:%" PRIu32
" not Found.", " not Found.",
compaction_job_info.job_id, compaction_job_info.cf_id); compaction_job_info.job_id, compaction_job_info.cf_id);
return; return;
......
#pragma once #pragma once
#include "blob_file_manager.h" #include "blob_file_manager.h"
#include "db/db_impl.h" #include "db/db_impl/db_impl.h"
#include "rocksdb/statistics.h" #include "rocksdb/statistics.h"
#include "table_factory.h" #include "table_factory.h"
#include "titan/db.h" #include "titan/db.h"
......
...@@ -10,8 +10,8 @@ ...@@ -10,8 +10,8 @@
#include <unordered_map> #include <unordered_map>
#include "db/db_iter.h" #include "db/db_iter.h"
#include "logging/logging.h"
#include "rocksdb/env.h" #include "rocksdb/env.h"
#include "util/logging.h"
#include "titan_stats.h" #include "titan_stats.h"
......
...@@ -6,9 +6,9 @@ ...@@ -6,9 +6,9 @@
#include <inttypes.h> #include <inttypes.h>
#include "logging/logging.h"
#include "options/options_helper.h" #include "options/options_helper.h"
#include "rocksdb/convenience.h" #include "rocksdb/convenience.h"
#include "util/logging.h"
namespace rocksdb { namespace rocksdb {
namespace titandb { namespace titandb {
......
#include "table/table_builder.h" #include "table/table_builder.h"
#include "file/filename.h"
#include "table/table_reader.h"
#include "test_util/testharness.h"
#include "blob_file_manager.h" #include "blob_file_manager.h"
#include "blob_file_reader.h" #include "blob_file_reader.h"
#include "table/table_reader.h"
#include "table_factory.h" #include "table_factory.h"
#include "util/filename.h"
#include "util/testharness.h"
#include "version_set.h" #include "version_set.h"
namespace rocksdb { namespace rocksdb {
...@@ -149,8 +150,9 @@ class TableBuilderTest : public testing::Test { ...@@ -149,8 +150,9 @@ class TableBuilderTest : public testing::Test {
CompressionOptions compression_opts; CompressionOptions compression_opts;
TableBuilderOptions options(cf_ioptions_, cf_moptions_, TableBuilderOptions options(cf_ioptions_, cf_moptions_,
cf_ioptions_.internal_comparator, &collectors_, cf_ioptions_.internal_comparator, &collectors_,
kNoCompression, compression_opts, nullptr, kNoCompression, 0 /*sample_for_compression*/,
false, kDefaultColumnFamilyName, 0); compression_opts, false /*skip_filters*/,
kDefaultColumnFamilyName, 0 /*level*/);
result->reset(table_factory_->NewTableBuilder(options, 0, file)); result->reset(table_factory_->NewTableBuilder(options, 0, file));
} }
...@@ -203,7 +205,9 @@ TEST_F(TableBuilderTest, Basic) { ...@@ -203,7 +205,9 @@ TEST_F(TableBuilderTest, Basic) {
ReadOptions ro; ReadOptions ro;
std::unique_ptr<InternalIterator> iter; std::unique_ptr<InternalIterator> iter;
iter.reset(base_reader->NewIterator(ro, nullptr)); iter.reset(base_reader->NewIterator(ro, nullptr /*prefix_extractor*/,
nullptr /*arena*/, false /*skip_filters*/,
TableReaderCaller::kUncategorized));
iter->SeekToFirst(); iter->SeekToFirst();
for (char i = 0; i < n; i++) { for (char i = 0; i < n; i++) {
ASSERT_TRUE(iter->Valid()); ASSERT_TRUE(iter->Valid());
...@@ -252,7 +256,9 @@ TEST_F(TableBuilderTest, NoBlob) { ...@@ -252,7 +256,9 @@ TEST_F(TableBuilderTest, NoBlob) {
ReadOptions ro; ReadOptions ro;
std::unique_ptr<InternalIterator> iter; std::unique_ptr<InternalIterator> iter;
iter.reset(base_reader->NewIterator(ro, nullptr)); iter.reset(base_reader->NewIterator(ro, nullptr /*prefix_extractor*/,
nullptr /*arena*/, false /*skip_filters*/,
TableReaderCaller::kUncategorized));
iter->SeekToFirst(); iter->SeekToFirst();
for (char i = 0; i < n; i++) { for (char i = 0; i < n; i++) {
ASSERT_TRUE(iter->Valid()); ASSERT_TRUE(iter->Valid());
......
#pragma once #pragma once
#include "rocksdb/cache.h" #include "rocksdb/cache.h"
#include "test_util/testharness.h"
#include "util/compression.h" #include "util/compression.h"
#include "util/testharness.h"
namespace rocksdb { namespace rocksdb {
namespace titandb { namespace titandb {
......
...@@ -5,12 +5,12 @@ ...@@ -5,12 +5,12 @@
#include "db_impl.h" #include "db_impl.h"
#include "db_iter.h" #include "db_iter.h"
#include "file/filename.h"
#include "port/port.h" #include "port/port.h"
#include "rocksdb/utilities/debug.h" #include "rocksdb/utilities/debug.h"
#include "test_util/testharness.h"
#include "titan/db.h" #include "titan/db.h"
#include "util/filename.h"
#include "util/random.h" #include "util/random.h"
#include "util/testharness.h"
namespace rocksdb { namespace rocksdb {
namespace titandb { namespace titandb {
......
...@@ -2,17 +2,18 @@ ...@@ -2,17 +2,18 @@
#include <options/cf_options.h> #include <options/cf_options.h>
#include <unordered_map> #include <unordered_map>
#include "file/filename.h"
#include "rocksdb/utilities/debug.h"
#include "test_util/sync_point.h"
#include "test_util/testharness.h"
#include "util/random.h"
#include "blob_file_iterator.h" #include "blob_file_iterator.h"
#include "blob_file_reader.h" #include "blob_file_reader.h"
#include "db_impl.h" #include "db_impl.h"
#include "db_iter.h" #include "db_iter.h"
#include "rocksdb/utilities/debug.h"
#include "titan/db.h" #include "titan/db.h"
#include "titan_fault_injection_test_env.h" #include "titan_fault_injection_test_env.h"
#include "util/filename.h"
#include "util/random.h"
#include "util/sync_point.h"
#include "util/testharness.h"
namespace rocksdb { namespace rocksdb {
namespace titandb { namespace titandb {
......
#pragma once #pragma once
#include "rocksdb/env.h" #include "rocksdb/env.h"
#include "util/fault_injection_test_env.h" #include "test_util/fault_injection_test_env.h"
#include <memory> #include <memory>
......
...@@ -11,46 +11,46 @@ bool GoodCompressionRatio(size_t compressed_size, size_t raw_size) { ...@@ -11,46 +11,46 @@ bool GoodCompressionRatio(size_t compressed_size, size_t raw_size) {
return compressed_size < raw_size - (raw_size / 8u); return compressed_size < raw_size - (raw_size / 8u);
} }
Slice Compress(const CompressionContext& ctx, const Slice& input, Slice Compress(const CompressionInfo& info, const Slice& input,
std::string* output, CompressionType* type) { std::string* output, CompressionType* type) {
*type = ctx.type(); *type = info.type();
if (ctx.type() == kNoCompression) { if (info.type() == kNoCompression) {
return input; return input;
} }
// Returns compressed block contents if: // Returns compressed block contents if:
// (1) the compression method is supported in this platform and // (1) the compression method is supported in this platform and
// (2) the compression rate is "good enough". // (2) the compression rate is "good enough".
switch (ctx.type()) { switch (info.type()) {
case kSnappyCompression: case kSnappyCompression:
if (Snappy_Compress(ctx, input.data(), input.size(), output) && if (Snappy_Compress(info, input.data(), input.size(), output) &&
GoodCompressionRatio(output->size(), input.size())) { GoodCompressionRatio(output->size(), input.size())) {
return *output; return *output;
} }
break; break;
case kZlibCompression: case kZlibCompression:
if (Zlib_Compress(ctx, kCompressionFormat, input.data(), input.size(), if (Zlib_Compress(info, kCompressionFormat, input.data(), input.size(),
output) && output) &&
GoodCompressionRatio(output->size(), input.size())) { GoodCompressionRatio(output->size(), input.size())) {
return *output; return *output;
} }
break; break;
case kBZip2Compression: case kBZip2Compression:
if (BZip2_Compress(ctx, kCompressionFormat, input.data(), input.size(), if (BZip2_Compress(info, kCompressionFormat, input.data(), input.size(),
output) && output) &&
GoodCompressionRatio(output->size(), input.size())) { GoodCompressionRatio(output->size(), input.size())) {
return *output; return *output;
} }
break; break;
case kLZ4Compression: case kLZ4Compression:
if (LZ4_Compress(ctx, kCompressionFormat, input.data(), input.size(), if (LZ4_Compress(info, kCompressionFormat, input.data(), input.size(),
output) && output) &&
GoodCompressionRatio(output->size(), input.size())) { GoodCompressionRatio(output->size(), input.size())) {
return *output; return *output;
} }
break; break;
case kLZ4HCCompression: case kLZ4HCCompression:
if (LZ4HC_Compress(ctx, kCompressionFormat, input.data(), input.size(), if (LZ4HC_Compress(info, kCompressionFormat, input.data(), input.size(),
output) && output) &&
GoodCompressionRatio(output->size(), input.size())) { GoodCompressionRatio(output->size(), input.size())) {
return *output; return *output;
...@@ -64,7 +64,7 @@ Slice Compress(const CompressionContext& ctx, const Slice& input, ...@@ -64,7 +64,7 @@ Slice Compress(const CompressionContext& ctx, const Slice& input,
break; break;
case kZSTD: case kZSTD:
case kZSTDNotFinalCompression: case kZSTDNotFinalCompression:
if (ZSTD_Compress(ctx, input.data(), input.size(), output) && if (ZSTD_Compress(info, input.data(), input.size(), output) &&
GoodCompressionRatio(output->size(), input.size())) { GoodCompressionRatio(output->size(), input.size())) {
return *output; return *output;
} }
...@@ -78,13 +78,13 @@ Slice Compress(const CompressionContext& ctx, const Slice& input, ...@@ -78,13 +78,13 @@ Slice Compress(const CompressionContext& ctx, const Slice& input,
return input; return input;
} }
Status Uncompress(const UncompressionContext& ctx, const Slice& input, Status Uncompress(const UncompressionInfo& info, const Slice& input,
OwnedSlice* output) { OwnedSlice* output) {
int size = 0; int size = 0;
CacheAllocationPtr ubuf; CacheAllocationPtr ubuf;
assert(ctx.type() != kNoCompression); assert(info.type() != kNoCompression);
switch (ctx.type()) { switch (info.type()) {
case kSnappyCompression: { case kSnappyCompression: {
size_t usize = 0; size_t usize = 0;
if (!Snappy_GetUncompressedLength(input.data(), input.size(), &usize)) { if (!Snappy_GetUncompressedLength(input.data(), input.size(), &usize)) {
...@@ -98,7 +98,7 @@ Status Uncompress(const UncompressionContext& ctx, const Slice& input, ...@@ -98,7 +98,7 @@ Status Uncompress(const UncompressionContext& ctx, const Slice& input,
break; break;
} }
case kZlibCompression: case kZlibCompression:
ubuf = Zlib_Uncompress(ctx, input.data(), input.size(), &size, ubuf = Zlib_Uncompress(info, input.data(), input.size(), &size,
kCompressionFormat); kCompressionFormat);
if (!ubuf.get()) { if (!ubuf.get()) {
return Status::Corruption("Corrupted compressed blob", "Zlib"); return Status::Corruption("Corrupted compressed blob", "Zlib");
...@@ -114,7 +114,7 @@ Status Uncompress(const UncompressionContext& ctx, const Slice& input, ...@@ -114,7 +114,7 @@ Status Uncompress(const UncompressionContext& ctx, const Slice& input,
output->reset(std::move(ubuf), size); output->reset(std::move(ubuf), size);
break; break;
case kLZ4Compression: case kLZ4Compression:
ubuf = LZ4_Uncompress(ctx, input.data(), input.size(), &size, ubuf = LZ4_Uncompress(info, input.data(), input.size(), &size,
kCompressionFormat); kCompressionFormat);
if (!ubuf.get()) { if (!ubuf.get()) {
return Status::Corruption("Corrupted compressed blob", "LZ4"); return Status::Corruption("Corrupted compressed blob", "LZ4");
...@@ -122,7 +122,7 @@ Status Uncompress(const UncompressionContext& ctx, const Slice& input, ...@@ -122,7 +122,7 @@ Status Uncompress(const UncompressionContext& ctx, const Slice& input,
output->reset(std::move(ubuf), size); output->reset(std::move(ubuf), size);
break; break;
case kLZ4HCCompression: case kLZ4HCCompression:
ubuf = LZ4_Uncompress(ctx, input.data(), input.size(), &size, ubuf = LZ4_Uncompress(info, input.data(), input.size(), &size,
kCompressionFormat); kCompressionFormat);
if (!ubuf.get()) { if (!ubuf.get()) {
return Status::Corruption("Corrupted compressed blob", "LZ4HC"); return Status::Corruption("Corrupted compressed blob", "LZ4HC");
...@@ -138,7 +138,7 @@ Status Uncompress(const UncompressionContext& ctx, const Slice& input, ...@@ -138,7 +138,7 @@ Status Uncompress(const UncompressionContext& ctx, const Slice& input,
break; break;
case kZSTD: case kZSTD:
case kZSTDNotFinalCompression: case kZSTDNotFinalCompression:
ubuf = ZSTD_Uncompress(ctx, input.data(), input.size(), &size); ubuf = ZSTD_Uncompress(info, input.data(), input.size(), &size);
if (!ubuf.get()) { if (!ubuf.get()) {
return Status::Corruption("Corrupted compressed blob", "ZSTD"); return Status::Corruption("Corrupted compressed blob", "ZSTD");
} }
......
...@@ -55,13 +55,13 @@ class FixedSlice : public Slice { ...@@ -55,13 +55,13 @@ class FixedSlice : public Slice {
// compressed data. However, if the compression ratio is not good, it // compressed data. However, if the compression ratio is not good, it
// returns the input slice directly and sets "*type" to // returns the input slice directly and sets "*type" to
// kNoCompression. // kNoCompression.
Slice Compress(const CompressionContext& ctx, const Slice& input, Slice Compress(const CompressionInfo& info, const Slice& input,
std::string* output, CompressionType* type); std::string* output, CompressionType* type);
// Uncompresses the input data according to the uncompression type. // Uncompresses the input data according to the uncompression type.
// If successful, fills "*buffer" with the uncompressed data and // If successful, fills "*buffer" with the uncompressed data and
// points "*output" to it. // points "*output" to it.
Status Uncompress(const UncompressionContext& ctx, const Slice& input, Status Uncompress(const UncompressionInfo& info, const Slice& input,
OwnedSlice* output); OwnedSlice* output);
void UnrefCacheHandle(void* cache, void* handle); void UnrefCacheHandle(void* cache, void* handle);
......
#include "util.h" #include "util.h"
#include "util/testharness.h"
#include "test_util/testharness.h"
namespace rocksdb { namespace rocksdb {
namespace titandb { namespace titandb {
...@@ -10,14 +11,20 @@ TEST(UtilTest, Compression) { ...@@ -10,14 +11,20 @@ TEST(UtilTest, Compression) {
std::string input(1024, 'a'); std::string input(1024, 'a');
for (auto compression : for (auto compression :
{kSnappyCompression, kZlibCompression, kLZ4Compression, kZSTD}) { {kSnappyCompression, kZlibCompression, kLZ4Compression, kZSTD}) {
CompressionOptions compression_opt;
CompressionContext compression_ctx(compression); CompressionContext compression_ctx(compression);
CompressionInfo compression_info(
compression_opt, compression_ctx, CompressionDict::GetEmptyDict(),
compression, 0 /* sample_for_compression */);
std::string buffer; std::string buffer;
auto compressed = Compress(compression_ctx, input, &buffer, &compression); auto compressed = Compress(compression_info, input, &buffer, &compression);
if (compression != kNoCompression) { if (compression != kNoCompression) {
ASSERT_TRUE(compressed.size() <= input.size()); ASSERT_TRUE(compressed.size() <= input.size());
UncompressionContext uncompression_ctx(compression); UncompressionContext uncompression_ctx(compression);
UncompressionInfo uncompression_info(
uncompression_ctx, UncompressionDict::GetEmptyDict(), compression);
OwnedSlice output; OwnedSlice output;
ASSERT_OK(Uncompress(uncompression_ctx, compressed, &output)); ASSERT_OK(Uncompress(uncompression_info, compressed, &output));
ASSERT_EQ(output, input); ASSERT_EQ(output, input);
} }
} }
......
...@@ -2,8 +2,9 @@ ...@@ -2,8 +2,9 @@
#include <inttypes.h> #include <inttypes.h>
#include "file/filename.h"
#include "edit_collector.h" #include "edit_collector.h"
#include "util/filename.h"
namespace rocksdb { namespace rocksdb {
namespace titandb { namespace titandb {
...@@ -71,7 +72,7 @@ Status VersionSet::Recover() { ...@@ -71,7 +72,7 @@ Status VersionSet::Recover() {
LogReporter reporter; LogReporter reporter;
reporter.status = &s; reporter.status = &s;
log::Reader reader(nullptr, std::move(file), &reporter, true /*checksum*/, log::Reader reader(nullptr, std::move(file), &reporter, true /*checksum*/,
0 /*initial_offset*/, 0); 0 /*log_num*/);
Slice record; Slice record;
std::string scratch; std::string scratch;
EditCollector collector; EditCollector collector;
...@@ -248,7 +249,8 @@ Status VersionSet::DropColumnFamilies( ...@@ -248,7 +249,8 @@ Status VersionSet::DropColumnFamilies(
VersionEdit edit; VersionEdit edit;
edit.SetColumnFamilyID(it->first); edit.SetColumnFamilyID(it->first);
for (auto& file : it->second->files_) { for (auto& file : it->second->files_) {
ROCKS_LOG_INFO(db_options_.info_log, "Titan add obsolete file [%llu]", ROCKS_LOG_INFO(db_options_.info_log,
"Titan add obsolete file [%" PRIu64 "]",
file.second->file_number()); file.second->file_number());
edit.DeleteBlobFile(file.first, obsolete_sequence); edit.DeleteBlobFile(file.first, obsolete_sequence);
} }
......
#include "file/filename.h"
#include "test_util/testharness.h"
#include "edit_collector.h" #include "edit_collector.h"
#include "testutil.h" #include "testutil.h"
#include "util.h" #include "util.h"
#include "util/filename.h"
#include "util/testharness.h"
#include "version_edit.h" #include "version_edit.h"
#include "version_set.h" #include "version_set.h"
......
This diff is collapsed.
...@@ -43,9 +43,10 @@ int main() { ...@@ -43,9 +43,10 @@ int main() {
#include <queue> #include <queue>
#include <thread> #include <thread>
#include "db/db_impl.h" #include "db/db_impl/db_impl.h"
#include "db/version_set.h" #include "db/version_set.h"
#include "hdfs/env_hdfs.h" #include "hdfs/env_hdfs.h"
#include "logging/logging.h"
#include "monitoring/histogram.h" #include "monitoring/histogram.h"
#include "options/options_helper.h" #include "options/options_helper.h"
#include "port/port.h" #include "port/port.h"
...@@ -65,15 +66,14 @@ int main() { ...@@ -65,15 +66,14 @@ int main() {
#include "util/compression.h" #include "util/compression.h"
#include "util/crc32c.h" #include "util/crc32c.h"
#include "util/gflags_compat.h" #include "util/gflags_compat.h"
#include "util/logging.h"
#include "util/mutexlock.h" #include "util/mutexlock.h"
#include "util/random.h" #include "util/random.h"
#include "util/string_util.h" #include "util/string_util.h"
// SyncPoint is not supported in Released Windows Mode. // SyncPoint is not supported in Released Windows Mode.
#if !(defined NDEBUG) || !defined(OS_WIN) #if !(defined NDEBUG) || !defined(OS_WIN)
#include "util/sync_point.h" #include "test_util/sync_point.h"
#endif // !(defined NDEBUG) || !defined(OS_WIN) #endif // !(defined NDEBUG) || !defined(OS_WIN)
#include "util/testutil.h" #include "test_util/testutil.h"
#include "utilities/merge_operators.h" #include "utilities/merge_operators.h"
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment