Unverified Commit c99cf9d2 authored by yiwu-arbug's avatar yiwu-arbug Committed by GitHub

Upgrade RocksDB to 6.4 (#61)

Summary:
Update the code to base off RocksDB 6.4.x. There are a few internal interface changes and files moving around from 5.18 to 6.4. titandb_bench is also updated to base off the version in 6.4.x.

Test Plan:
Travis
parent 19ea115a
......@@ -9,7 +9,7 @@ if (NOT ROCKSDB_GIT_REPO)
endif()
if (NOT ROCKSDB_GIT_BRANCH)
set(ROCKSDB_GIT_BRANCH "tikv-3.0")
set(ROCKSDB_GIT_BRANCH "6.4.tikv")
endif()
if (NOT DEFINED ROCKSDB_DIR)
......
......@@ -42,5 +42,5 @@ ctest -R titan
bash scripts/format-diff.sh
```
## Compatibility
Currently Titan is only compatible with RocksDB 5.18.
## Compatibility with RocksDB
Current version of Titan is developed and tested with RocksDB 6.4.
......@@ -32,7 +32,7 @@ endif()
option(WITH_ZLIB "build with zlib" OFF)
if (WITH_ZLIB)
find_package(zlib REQUIRED)
find_package(ZLIB REQUIRED)
add_definitions(-DZLIB)
include_directories(${ZLIB_INCLUDE_DIR})
endif()
......
......@@ -3,8 +3,8 @@
#include <map>
#include <unordered_map>
#include "logging/logging.h"
#include "rocksdb/options.h"
#include "util/logging.h"
namespace rocksdb {
namespace titandb {
......
#include "blob_file_cache.h"
#include "file/filename.h"
#include "util.h"
#include "util/filename.h"
namespace rocksdb {
namespace titandb {
......
......@@ -19,7 +19,10 @@ BlobFileIterator::~BlobFileIterator() {}
bool BlobFileIterator::Init() {
Slice slice;
char header_buf[BlobFileHeader::kEncodedLength];
status_ = file_->Read(0, BlobFileHeader::kEncodedLength, &slice, header_buf);
// With for_compaction=true, rate_limiter is enabled. Since BlobFileIterator
// is only used for GC, we always set for_compaction to true.
status_ = file_->Read(0, BlobFileHeader::kEncodedLength, &slice, header_buf,
true /*for_compaction*/);
if (!status_.ok()) {
return false;
}
......@@ -29,8 +32,11 @@ bool BlobFileIterator::Init() {
return false;
}
char footer_buf[BlobFileFooter::kEncodedLength];
// With for_compaction=true, rate_limiter is enabled. Since BlobFileIterator
// is only used for GC, we always set for_compaction to true.
status_ = file_->Read(file_size_ - BlobFileFooter::kEncodedLength,
BlobFileFooter::kEncodedLength, &slice, footer_buf);
BlobFileFooter::kEncodedLength, &slice, footer_buf,
true /*for_compaction*/);
if (!status_.ok()) return false;
BlobFileFooter blob_file_footer;
status_ = blob_file_footer.DecodeFrom(&slice);
......@@ -74,8 +80,10 @@ void BlobFileIterator::IterateForPrev(uint64_t offset) {
FixedSlice<kBlobHeaderSize> header_buffer;
iterate_offset_ = BlobFileHeader::kEncodedLength;
for (; iterate_offset_ < offset; iterate_offset_ += total_length) {
// With for_compaction=true, rate_limiter is enabled. Since BlobFileIterator
// is only used for GC, we always set for_compaction to true.
status_ = file_->Read(iterate_offset_, kBlobHeaderSize, &header_buffer,
header_buffer.get());
header_buffer.get(), true /*for_compaction*/);
if (!status_.ok()) return;
status_ = decoder_.DecodeHeader(&header_buffer);
if (!status_.ok()) return;
......@@ -88,8 +96,10 @@ void BlobFileIterator::IterateForPrev(uint64_t offset) {
void BlobFileIterator::GetBlobRecord() {
FixedSlice<kBlobHeaderSize> header_buffer;
// With for_compaction=true, rate_limiter is enabled. Since BlobFileIterator
// is only used for GC, we always set for_compaction to true.
status_ = file_->Read(iterate_offset_, kBlobHeaderSize, &header_buffer,
header_buffer.get());
header_buffer.get(), true /*for_compaction*/);
if (!status_.ok()) return;
status_ = decoder_.DecodeHeader(&header_buffer);
if (!status_.ok()) return;
......@@ -97,8 +107,10 @@ void BlobFileIterator::GetBlobRecord() {
Slice record_slice;
auto record_size = decoder_.GetRecordSize();
buffer_.resize(record_size);
// With for_compaction=true, rate_limiter is enabled. Since BlobFileIterator
// is only used for GC, we always set for_compaction to true.
status_ = file_->Read(iterate_offset_ + kBlobHeaderSize, record_size,
&record_slice, buffer_.data());
&record_slice, buffer_.data(), true /*for_compaction*/);
if (status_.ok()) {
status_ =
decoder_.DecodeRecord(&record_slice, &cur_blob_record_, &uncompressed_);
......
......@@ -14,6 +14,7 @@
namespace rocksdb {
namespace titandb {
// Used by GC job for iterate through blob file.
class BlobFileIterator {
public:
const uint64_t kMinReadaheadSize = 4 << 10;
......
......@@ -2,11 +2,12 @@
#include <cinttypes>
#include "file/filename.h"
#include "test_util/testharness.h"
#include "blob_file_builder.h"
#include "blob_file_cache.h"
#include "blob_file_reader.h"
#include "util/filename.h"
#include "util/testharness.h"
namespace rocksdb {
namespace titandb {
......
......@@ -6,10 +6,10 @@
#include <inttypes.h>
#include "file/filename.h"
#include "test_util/sync_point.h"
#include "util/crc32c.h"
#include "util/filename.h"
#include "util/string_util.h"
#include "util/sync_point.h"
#include "titan_stats.h"
......@@ -28,12 +28,9 @@ Status NewBlobFileReader(uint64_t file_number, uint64_t readahead_size,
if (readahead_size > 0) {
file = NewReadaheadRandomAccessFile(std::move(file), readahead_size);
}
// Currently only `BlobGCJob` will call `NewBlobFileReader()`. We set
// `for_compaction=true` in this case to enable rate limiter.
result->reset(new RandomAccessFileReader(
std::move(file), file_name, nullptr /*env*/, nullptr /*stats*/,
0 /*hist_type*/, nullptr /*file_read_hist*/, env_options.rate_limiter,
true /*for compaction*/));
0 /*hist_type*/, nullptr /*file_read_hist*/, env_options.rate_limiter));
return s;
}
......
#include "test_util/testharness.h"
#include "blob_file_size_collector.h"
#include "util/testharness.h"
namespace rocksdb {
namespace titandb {
......@@ -50,8 +51,9 @@ class BlobFileSizeCollectorTest : public testing::Test {
CompressionOptions compression_opts;
TableBuilderOptions options(cf_ioptions_, cf_moptions_,
cf_ioptions_.internal_comparator, &collectors_,
kNoCompression, compression_opts, nullptr,
false, kDefaultColumnFamilyName, 0);
kNoCompression, 0 /*sample_for_compression*/,
compression_opts, false /*skip_filters*/,
kDefaultColumnFamilyName, 0 /*level*/);
result->reset(table_factory_->NewTableBuilder(options, 0, file));
ASSERT_TRUE(*result);
}
......
#include "file/filename.h"
#include "test_util/testharness.h"
#include "blob_file_builder.h"
#include "blob_file_cache.h"
#include "blob_file_reader.h"
#include "util/filename.h"
#include "util/testharness.h"
namespace rocksdb {
namespace titandb {
......
#include "blob_format.h"
#include "test_util/sync_point.h"
#include "util/crc32c.h"
#include "util/sync_point.h"
namespace rocksdb {
namespace titandb {
......@@ -40,7 +40,7 @@ void BlobEncoder::EncodeRecord(const BlobRecord& record) {
CompressionType compression;
record.EncodeTo(&record_buffer_);
record_ = Compress(compression_ctx_, record_buffer_, &compressed_buffer_,
record_ = Compress(compression_info_, record_buffer_, &compressed_buffer_,
&compression);
assert(record_.size() < std::numeric_limits<uint32_t>::max());
......@@ -82,7 +82,8 @@ Status BlobDecoder::DecodeRecord(Slice* src, BlobRecord* record,
return DecodeInto(input, record);
}
UncompressionContext ctx(compression_);
Status s = Uncompress(ctx, input, buffer);
UncompressionInfo info(ctx, UncompressionDict::GetEmptyDict(), compression_);
Status s = Uncompress(info, input, buffer);
if (!s.ok()) {
return s;
}
......
......@@ -32,7 +32,11 @@ struct BlobRecord {
class BlobEncoder {
public:
BlobEncoder(CompressionType compression) : compression_ctx_(compression) {}
BlobEncoder(CompressionType compression)
: compression_ctx_(compression),
compression_info_(compression_opt_, compression_ctx_,
CompressionDict::GetEmptyDict(), compression,
0 /*sample_for_compression*/) {}
void EncodeRecord(const BlobRecord& record);
......@@ -46,7 +50,9 @@ class BlobEncoder {
Slice record_;
std::string record_buffer_;
std::string compressed_buffer_;
CompressionOptions compression_opt_;
CompressionContext compression_ctx_;
CompressionInfo compression_info_;
};
class BlobDecoder {
......
#include "test_util/testharness.h"
#include "blob_format.h"
#include "testutil.h"
#include "util.h"
#include "util/testharness.h"
namespace rocksdb {
namespace titandb {
......
......@@ -3,6 +3,8 @@
#endif
#include <inttypes.h>
#include <memory>
#include "blob_gc_job.h"
namespace rocksdb {
......@@ -491,7 +493,7 @@ Status BlobGCJob::InstallOutputBlobFiles() {
}
}
} else {
std::vector<unique_ptr<BlobFileHandle>> handles;
std::vector<std::unique_ptr<BlobFileHandle>> handles;
std::string to_delete_files;
for (auto& builder : this->blob_file_builders_) {
if (!to_delete_files.empty()) {
......@@ -565,7 +567,8 @@ Status BlobGCJob::DeleteInputBlobFiles() {
VersionEdit edit;
edit.SetColumnFamilyID(blob_gc_->column_family_handle()->GetID());
for (const auto& file : blob_gc_->sampled_inputs()) {
ROCKS_LOG_INFO(db_options_.info_log, "Titan add obsolete file [%llu]",
ROCKS_LOG_INFO(db_options_.info_log,
"Titan add obsolete file [%" PRIu64 "]",
file->file_number());
metrics_.blob_db_gc_num_files++;
edit.DeleteBlobFile(file->file_number(), obsolete_sequence);
......
......@@ -4,7 +4,7 @@
#include "blob_file_iterator.h"
#include "blob_file_manager.h"
#include "blob_gc.h"
#include "db/db_impl.h"
#include "db/db_impl/db_impl.h"
#include "rocksdb/statistics.h"
#include "rocksdb/status.h"
#include "titan/options.h"
......
#include "blob_gc_job.h"
#include "rocksdb/convenience.h"
#include "test_util/testharness.h"
#include "blob_gc_job.h"
#include "blob_gc_picker.h"
#include "db_impl.h"
#include "rocksdb/convenience.h"
#include "util/testharness.h"
namespace rocksdb {
namespace titandb {
......
#include "blob_gc_picker.h"
#ifndef __STDC_FORMAT_MACROS
#define __STDC_FORMAT_MACROS
#endif
#include <inttypes.h>
namespace rocksdb {
namespace titandb {
......@@ -16,9 +22,6 @@ std::unique_ptr<BlobGC> BasicBlobGCPicker::PickBlobGC(
uint64_t batch_size = 0;
uint64_t estimate_output_size = 0;
// ROCKS_LOG_INFO(db_options_.info_log, "blob file num:%lu gc score:%lu",
// blob_storage->NumBlobFiles(),
// blob_storage->gc_score().size());
bool stop_picking = false;
bool maybe_continue_next_time = false;
uint64_t next_gc_size = 0;
......@@ -30,23 +33,11 @@ std::unique_ptr<BlobGC> BasicBlobGCPicker::PickBlobGC(
// or this file had been GCed
continue;
}
// ROCKS_LOG_INFO(db_options_.info_log,
// "file number:%lu score:%f being_gc:%d pending:%d, "
// "size:%lu discard:%lu mark_for_gc:%d
// mark_for_sample:%d", blob_file->file_number_,
// gc_score.score, blob_file->being_gc,
// blob_file->pending, blob_file->file_size_,
// blob_file->discardable_size_,
// blob_file->marked_for_gc_,
// blob_file->marked_for_sample);
if (!CheckBlobFile(blob_file.get())) {
ROCKS_LOG_INFO(db_options_.info_log, "file number:%lu no need gc",
ROCKS_LOG_INFO(db_options_.info_log, "Blob file %" PRIu64 " no need gc",
blob_file->file_number());
continue;
}
if (!stop_picking) {
blob_files.push_back(blob_file.get());
batch_size += blob_file->file_size();
......
......@@ -2,14 +2,15 @@
#include <memory>
#include "db/column_family.h"
#include "db/write_callback.h"
#include "file/filename.h"
#include "rocksdb/status.h"
#include "blob_file_manager.h"
#include "blob_format.h"
#include "blob_gc.h"
#include "blob_storage.h"
#include "db/column_family.h"
#include "db/write_callback.h"
#include "rocksdb/status.h"
#include "util/filename.h"
namespace rocksdb {
namespace titandb {
......
#include "blob_gc_picker.h"
#include "file/filename.h"
#include "test_util/testharness.h"
#include "blob_file_builder.h"
#include "blob_file_cache.h"
#include "blob_file_iterator.h"
#include "blob_file_reader.h"
#include "util/filename.h"
#include "util/testharness.h"
namespace rocksdb {
namespace titandb {
......
......@@ -65,7 +65,8 @@ class TitanDBImpl::FileManager : public BlobFileManager {
}
if (!s.ok()) return s;
ROCKS_LOG_INFO(db_->db_options_.info_log, "Titan adding blob file [%llu]",
ROCKS_LOG_INFO(db_->db_options_.info_log,
"Titan adding blob file [%" PRIu64 "]",
file.first->file_number());
edit.AddBlobFile(file.first);
}
......@@ -746,7 +747,7 @@ Status TitanDBImpl::DeleteFilesInRanges(ColumnFamilyHandle* column_family,
if (!bs) {
// TODO: Should treat it as background error and make DB read-only.
ROCKS_LOG_ERROR(db_options_.info_log,
"Column family id:% " PRIu32 " not Found.", cf_id);
"Column family id:%" PRIu32 " not Found.", cf_id);
return Status::NotFound("Column family id: " + std::to_string(cf_id) +
" not Found.");
}
......@@ -1008,7 +1009,7 @@ void TitanDBImpl::OnCompactionCompleted(
if (!bs) {
// TODO: Should treat it as background error and make DB read-only.
ROCKS_LOG_ERROR(db_options_.info_log,
"OnCompactionCompleted[%d] Column family id:% " PRIu32
"OnCompactionCompleted[%d] Column family id:%" PRIu32
" not Found.",
compaction_job_info.job_id, compaction_job_info.cf_id);
return;
......
#pragma once
#include "blob_file_manager.h"
#include "db/db_impl.h"
#include "db/db_impl/db_impl.h"
#include "rocksdb/statistics.h"
#include "table_factory.h"
#include "titan/db.h"
......
......@@ -10,8 +10,8 @@
#include <unordered_map>
#include "db/db_iter.h"
#include "logging/logging.h"
#include "rocksdb/env.h"
#include "util/logging.h"
#include "titan_stats.h"
......
......@@ -6,9 +6,9 @@
#include <inttypes.h>
#include "logging/logging.h"
#include "options/options_helper.h"
#include "rocksdb/convenience.h"
#include "util/logging.h"
namespace rocksdb {
namespace titandb {
......
#include "table/table_builder.h"
#include "file/filename.h"
#include "table/table_reader.h"
#include "test_util/testharness.h"
#include "blob_file_manager.h"
#include "blob_file_reader.h"
#include "table/table_reader.h"
#include "table_factory.h"
#include "util/filename.h"
#include "util/testharness.h"
#include "version_set.h"
namespace rocksdb {
......@@ -149,8 +150,9 @@ class TableBuilderTest : public testing::Test {
CompressionOptions compression_opts;
TableBuilderOptions options(cf_ioptions_, cf_moptions_,
cf_ioptions_.internal_comparator, &collectors_,
kNoCompression, compression_opts, nullptr,
false, kDefaultColumnFamilyName, 0);
kNoCompression, 0 /*sample_for_compression*/,
compression_opts, false /*skip_filters*/,
kDefaultColumnFamilyName, 0 /*level*/);
result->reset(table_factory_->NewTableBuilder(options, 0, file));
}
......@@ -203,7 +205,9 @@ TEST_F(TableBuilderTest, Basic) {
ReadOptions ro;
std::unique_ptr<InternalIterator> iter;
iter.reset(base_reader->NewIterator(ro, nullptr));
iter.reset(base_reader->NewIterator(ro, nullptr /*prefix_extractor*/,
nullptr /*arena*/, false /*skip_filters*/,
TableReaderCaller::kUncategorized));
iter->SeekToFirst();
for (char i = 0; i < n; i++) {
ASSERT_TRUE(iter->Valid());
......@@ -252,7 +256,9 @@ TEST_F(TableBuilderTest, NoBlob) {
ReadOptions ro;
std::unique_ptr<InternalIterator> iter;
iter.reset(base_reader->NewIterator(ro, nullptr));
iter.reset(base_reader->NewIterator(ro, nullptr /*prefix_extractor*/,
nullptr /*arena*/, false /*skip_filters*/,
TableReaderCaller::kUncategorized));
iter->SeekToFirst();
for (char i = 0; i < n; i++) {
ASSERT_TRUE(iter->Valid());
......
#pragma once
#include "rocksdb/cache.h"
#include "test_util/testharness.h"
#include "util/compression.h"
#include "util/testharness.h"
namespace rocksdb {
namespace titandb {
......
......@@ -5,12 +5,12 @@
#include "db_impl.h"
#include "db_iter.h"
#include "file/filename.h"
#include "port/port.h"
#include "rocksdb/utilities/debug.h"
#include "test_util/testharness.h"
#include "titan/db.h"
#include "util/filename.h"
#include "util/random.h"
#include "util/testharness.h"
namespace rocksdb {
namespace titandb {
......
......@@ -2,17 +2,18 @@
#include <options/cf_options.h>
#include <unordered_map>
#include "file/filename.h"
#include "rocksdb/utilities/debug.h"
#include "test_util/sync_point.h"
#include "test_util/testharness.h"
#include "util/random.h"
#include "blob_file_iterator.h"
#include "blob_file_reader.h"
#include "db_impl.h"
#include "db_iter.h"
#include "rocksdb/utilities/debug.h"
#include "titan/db.h"
#include "titan_fault_injection_test_env.h"
#include "util/filename.h"
#include "util/random.h"
#include "util/sync_point.h"
#include "util/testharness.h"
namespace rocksdb {
namespace titandb {
......
#pragma once
#include "rocksdb/env.h"
#include "util/fault_injection_test_env.h"
#include "test_util/fault_injection_test_env.h"
#include <memory>
......@@ -78,4 +78,4 @@ Status TitanTestRandomAccessFile::InvalidateCache(size_t offset,
}
} // namespace titandb
} // namespace rocksdb
\ No newline at end of file
} // namespace rocksdb
......@@ -11,46 +11,46 @@ bool GoodCompressionRatio(size_t compressed_size, size_t raw_size) {
return compressed_size < raw_size - (raw_size / 8u);
}
Slice Compress(const CompressionContext& ctx, const Slice& input,
Slice Compress(const CompressionInfo& info, const Slice& input,
std::string* output, CompressionType* type) {
*type = ctx.type();
if (ctx.type() == kNoCompression) {
*type = info.type();
if (info.type() == kNoCompression) {
return input;
}
// Returns compressed block contents if:
// (1) the compression method is supported in this platform and
// (2) the compression rate is "good enough".
switch (ctx.type()) {
switch (info.type()) {
case kSnappyCompression:
if (Snappy_Compress(ctx, input.data(), input.size(), output) &&
if (Snappy_Compress(info, input.data(), input.size(), output) &&
GoodCompressionRatio(output->size(), input.size())) {
return *output;
}
break;
case kZlibCompression:
if (Zlib_Compress(ctx, kCompressionFormat, input.data(), input.size(),
if (Zlib_Compress(info, kCompressionFormat, input.data(), input.size(),
output) &&
GoodCompressionRatio(output->size(), input.size())) {
return *output;
}
break;
case kBZip2Compression:
if (BZip2_Compress(ctx, kCompressionFormat, input.data(), input.size(),
if (BZip2_Compress(info, kCompressionFormat, input.data(), input.size(),
output) &&
GoodCompressionRatio(output->size(), input.size())) {
return *output;
}
break;
case kLZ4Compression:
if (LZ4_Compress(ctx, kCompressionFormat, input.data(), input.size(),
if (LZ4_Compress(info, kCompressionFormat, input.data(), input.size(),
output) &&
GoodCompressionRatio(output->size(), input.size())) {
return *output;
}
break;
case kLZ4HCCompression:
if (LZ4HC_Compress(ctx, kCompressionFormat, input.data(), input.size(),
if (LZ4HC_Compress(info, kCompressionFormat, input.data(), input.size(),
output) &&
GoodCompressionRatio(output->size(), input.size())) {
return *output;
......@@ -64,7 +64,7 @@ Slice Compress(const CompressionContext& ctx, const Slice& input,
break;
case kZSTD:
case kZSTDNotFinalCompression:
if (ZSTD_Compress(ctx, input.data(), input.size(), output) &&
if (ZSTD_Compress(info, input.data(), input.size(), output) &&
GoodCompressionRatio(output->size(), input.size())) {
return *output;
}
......@@ -78,13 +78,13 @@ Slice Compress(const CompressionContext& ctx, const Slice& input,
return input;
}
Status Uncompress(const UncompressionContext& ctx, const Slice& input,
Status Uncompress(const UncompressionInfo& info, const Slice& input,
OwnedSlice* output) {
int size = 0;
CacheAllocationPtr ubuf;
assert(ctx.type() != kNoCompression);
assert(info.type() != kNoCompression);
switch (ctx.type()) {
switch (info.type()) {
case kSnappyCompression: {
size_t usize = 0;
if (!Snappy_GetUncompressedLength(input.data(), input.size(), &usize)) {
......@@ -98,7 +98,7 @@ Status Uncompress(const UncompressionContext& ctx, const Slice& input,
break;
}
case kZlibCompression:
ubuf = Zlib_Uncompress(ctx, input.data(), input.size(), &size,
ubuf = Zlib_Uncompress(info, input.data(), input.size(), &size,
kCompressionFormat);
if (!ubuf.get()) {
return Status::Corruption("Corrupted compressed blob", "Zlib");
......@@ -114,7 +114,7 @@ Status Uncompress(const UncompressionContext& ctx, const Slice& input,
output->reset(std::move(ubuf), size);
break;
case kLZ4Compression:
ubuf = LZ4_Uncompress(ctx, input.data(), input.size(), &size,
ubuf = LZ4_Uncompress(info, input.data(), input.size(), &size,
kCompressionFormat);
if (!ubuf.get()) {
return Status::Corruption("Corrupted compressed blob", "LZ4");
......@@ -122,7 +122,7 @@ Status Uncompress(const UncompressionContext& ctx, const Slice& input,
output->reset(std::move(ubuf), size);
break;
case kLZ4HCCompression:
ubuf = LZ4_Uncompress(ctx, input.data(), input.size(), &size,
ubuf = LZ4_Uncompress(info, input.data(), input.size(), &size,
kCompressionFormat);
if (!ubuf.get()) {
return Status::Corruption("Corrupted compressed blob", "LZ4HC");
......@@ -138,7 +138,7 @@ Status Uncompress(const UncompressionContext& ctx, const Slice& input,
break;
case kZSTD:
case kZSTDNotFinalCompression:
ubuf = ZSTD_Uncompress(ctx, input.data(), input.size(), &size);
ubuf = ZSTD_Uncompress(info, input.data(), input.size(), &size);
if (!ubuf.get()) {
return Status::Corruption("Corrupted compressed blob", "ZSTD");
}
......
......@@ -55,13 +55,13 @@ class FixedSlice : public Slice {
// compressed data. However, if the compression ratio is not good, it
// returns the input slice directly and sets "*type" to
// kNoCompression.
Slice Compress(const CompressionContext& ctx, const Slice& input,
Slice Compress(const CompressionInfo& info, const Slice& input,
std::string* output, CompressionType* type);
// Uncompresses the input data according to the uncompression type.
// If successful, fills "*buffer" with the uncompressed data and
// points "*output" to it.
Status Uncompress(const UncompressionContext& ctx, const Slice& input,
Status Uncompress(const UncompressionInfo& info, const Slice& input,
OwnedSlice* output);
void UnrefCacheHandle(void* cache, void* handle);
......
#include "util.h"
#include "util/testharness.h"
#include "test_util/testharness.h"
namespace rocksdb {
namespace titandb {
......@@ -10,14 +11,20 @@ TEST(UtilTest, Compression) {
std::string input(1024, 'a');
for (auto compression :
{kSnappyCompression, kZlibCompression, kLZ4Compression, kZSTD}) {
CompressionOptions compression_opt;
CompressionContext compression_ctx(compression);
CompressionInfo compression_info(
compression_opt, compression_ctx, CompressionDict::GetEmptyDict(),
compression, 0 /* sample_for_compression */);
std::string buffer;
auto compressed = Compress(compression_ctx, input, &buffer, &compression);
auto compressed = Compress(compression_info, input, &buffer, &compression);
if (compression != kNoCompression) {
ASSERT_TRUE(compressed.size() <= input.size());
UncompressionContext uncompression_ctx(compression);
UncompressionInfo uncompression_info(
uncompression_ctx, UncompressionDict::GetEmptyDict(), compression);
OwnedSlice output;
ASSERT_OK(Uncompress(uncompression_ctx, compressed, &output));
ASSERT_OK(Uncompress(uncompression_info, compressed, &output));
ASSERT_EQ(output, input);
}
}
......
......@@ -2,8 +2,9 @@
#include <inttypes.h>
#include "file/filename.h"
#include "edit_collector.h"
#include "util/filename.h"
namespace rocksdb {
namespace titandb {
......@@ -71,7 +72,7 @@ Status VersionSet::Recover() {
LogReporter reporter;
reporter.status = &s;
log::Reader reader(nullptr, std::move(file), &reporter, true /*checksum*/,
0 /*initial_offset*/, 0);
0 /*log_num*/);
Slice record;
std::string scratch;
EditCollector collector;
......@@ -248,7 +249,8 @@ Status VersionSet::DropColumnFamilies(
VersionEdit edit;
edit.SetColumnFamilyID(it->first);
for (auto& file : it->second->files_) {
ROCKS_LOG_INFO(db_options_.info_log, "Titan add obsolete file [%llu]",
ROCKS_LOG_INFO(db_options_.info_log,
"Titan add obsolete file [%" PRIu64 "]",
file.second->file_number());
edit.DeleteBlobFile(file.first, obsolete_sequence);
}
......
#include "file/filename.h"
#include "test_util/testharness.h"
#include "edit_collector.h"
#include "testutil.h"
#include "util.h"
#include "util/filename.h"
#include "util/testharness.h"
#include "version_edit.h"
#include "version_set.h"
......
This diff is collapsed.
......@@ -43,9 +43,10 @@ int main() {
#include <queue>
#include <thread>
#include "db/db_impl.h"
#include "db/db_impl/db_impl.h"
#include "db/version_set.h"
#include "hdfs/env_hdfs.h"
#include "logging/logging.h"
#include "monitoring/histogram.h"
#include "options/options_helper.h"
#include "port/port.h"
......@@ -65,15 +66,14 @@ int main() {
#include "util/compression.h"
#include "util/crc32c.h"
#include "util/gflags_compat.h"
#include "util/logging.h"
#include "util/mutexlock.h"
#include "util/random.h"
#include "util/string_util.h"
// SyncPoint is not supported in Released Windows Mode.
#if !(defined NDEBUG) || !defined(OS_WIN)
#include "util/sync_point.h"
#include "test_util/sync_point.h"
#endif // !(defined NDEBUG) || !defined(OS_WIN)
#include "util/testutil.h"
#include "test_util/testutil.h"
#include "utilities/merge_operators.h"
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment