Unverified Commit 715dbd69 authored by Connor's avatar Connor Committed by GitHub

Update BlobFileMeta format (#68)

* add smallest_key and largest_key
* add number of file entires and file's level to blob file meta
Signed-off-by: 's avatarConnor1996 <zbk602423539@gmail.com>
parent 468ddc97
......@@ -25,6 +25,15 @@ void BlobFileBuilder::Add(const BlobRecord& record, BlobHandle* handle) {
status_ = file_->Append(encoder_.GetHeader());
if (ok()) {
status_ = file_->Append(encoder_.GetRecord());
// The keys added into blob files are in order.
if (smallest_key_.empty()) {
smallest_key_.assign(record.key.data(), record.key.size());
}
assert(cf_options_.comparator->Compare(record.key, Slice(smallest_key_)) >=
0);
assert(cf_options_.comparator->Compare(record.key, Slice(largest_key_)) >=
0);
largest_key_.assign(record.key.data(), record.key.size());
}
}
......
......@@ -54,6 +54,9 @@ class BlobFileBuilder {
// REQUIRES: Finish(), Abandon() have not been called.
void Abandon();
const std::string& GetSmallestKey() { return smallest_key_; }
const std::string& GetLargestKey() { return largest_key_; }
private:
bool ok() const { return status().ok(); }
......@@ -62,6 +65,9 @@ class BlobFileBuilder {
Status status_;
BlobEncoder encoder_;
std::string smallest_key_;
std::string largest_key_;
};
} // namespace titandb
......
......@@ -156,8 +156,10 @@ void BlobFileIterator::PrefetchAndGet() {
}
BlobFileMergeIterator::BlobFileMergeIterator(
std::vector<std::unique_ptr<BlobFileIterator>>&& blob_file_iterators)
: blob_file_iterators_(std::move(blob_file_iterators)) {}
std::vector<std::unique_ptr<BlobFileIterator>>&& blob_file_iterators,
const Comparator* comparator)
: blob_file_iterators_(std::move(blob_file_iterators)),
min_heap_(BlobFileIterComparator(comparator)) {}
bool BlobFileMergeIterator::Valid() const {
if (current_ == nullptr) return false;
......
......@@ -76,7 +76,7 @@ class BlobFileIterator {
class BlobFileMergeIterator {
public:
explicit BlobFileMergeIterator(
std::vector<std::unique_ptr<BlobFileIterator>>&&);
std::vector<std::unique_ptr<BlobFileIterator>>&&, const Comparator*);
~BlobFileMergeIterator() = default;
......@@ -94,19 +94,28 @@ class BlobFileMergeIterator {
BlobIndex GetBlobIndex() { return current_->GetBlobIndex(); }
private:
class IternalComparator {
class BlobFileIterComparator {
public:
// The default constructor is not supposed to be used.
// It is only to make std::priority_queue can compile.
BlobFileIterComparator() : comparator_(nullptr){};
explicit BlobFileIterComparator(const Comparator* comparator)
: comparator_(comparator){};
// Smaller value get Higher priority
bool operator()(const BlobFileIterator* iter1,
const BlobFileIterator* iter2) {
return BytewiseComparator()->Compare(iter1->key(), iter2->key()) > 0;
assert(comparator_ != nullptr);
return comparator_->Compare(iter1->key(), iter2->key()) > 0;
}
private:
const Comparator* comparator_;
};
Status status_;
std::vector<std::unique_ptr<BlobFileIterator>> blob_file_iterators_;
std::priority_queue<BlobFileIterator*, std::vector<BlobFileIterator*>,
IternalComparator>
BlobFileIterComparator>
min_heap_;
BlobFileIterator* current_ = nullptr;
};
......
......@@ -94,8 +94,7 @@ class BlobFileIteratorTest : public testing::Test {
const int n = 1000;
std::vector<BlobHandle> handles(n);
for (int i = 0; i < n; i++) {
auto id = std::to_string(i);
AddKeyValue(id, id, &handles[i]);
AddKeyValue(GenKey(i), GenValue(i), &handles[i]);
}
FinishBuilder();
......@@ -106,9 +105,8 @@ class BlobFileIteratorTest : public testing::Test {
for (int i = 0; i < n; blob_file_iterator_->Next(), i++) {
ASSERT_OK(blob_file_iterator_->status());
ASSERT_EQ(blob_file_iterator_->Valid(), true);
auto id = std::to_string(i);
ASSERT_EQ(id, blob_file_iterator_->key());
ASSERT_EQ(id, blob_file_iterator_->value());
ASSERT_EQ(GenKey(i), blob_file_iterator_->key());
ASSERT_EQ(GenValue(i), blob_file_iterator_->value());
BlobIndex blob_index = blob_file_iterator_->GetBlobIndex();
ASSERT_EQ(handles[i], blob_index.blob_handle);
}
......@@ -125,8 +123,7 @@ TEST_F(BlobFileIteratorTest, IterateForPrev) {
const int n = 1000;
std::vector<BlobHandle> handles(n);
for (int i = 0; i < n; i++) {
auto id = std::to_string(i);
AddKeyValue(id, id, &handles[i]);
AddKeyValue(GenKey(i), GenValue(i), &handles[i]);
}
FinishBuilder();
......@@ -142,9 +139,8 @@ TEST_F(BlobFileIteratorTest, IterateForPrev) {
BlobIndex blob_index;
blob_index = blob_file_iterator_->GetBlobIndex();
ASSERT_EQ(handles[i], blob_index.blob_handle);
auto id = std::to_string(i);
ASSERT_EQ(id, blob_file_iterator_->key());
ASSERT_EQ(id, blob_file_iterator_->value());
ASSERT_EQ(GenKey(i), blob_file_iterator_->key());
ASSERT_EQ(GenValue(i), blob_file_iterator_->value());
}
auto idx = Random::GetTLSInstance()->Uniform(n);
......@@ -207,7 +203,7 @@ TEST_F(BlobFileIteratorTest, MergeIterator) {
&readable_file_);
iters.emplace_back(std::unique_ptr<BlobFileIterator>(new BlobFileIterator{
std::move(readable_file_), file_number_, file_size, TitanCFOptions()}));
BlobFileMergeIterator iter(std::move(iters));
BlobFileMergeIterator iter(std::move(iters), titan_options_.comparator);
iter.SeekToFirst();
int i = 1;
......
......@@ -5,6 +5,8 @@
#include "blob_file_cache.h"
#include "blob_file_reader.h"
#include <cinttypes>
namespace rocksdb {
namespace titandb {
......@@ -19,6 +21,14 @@ class BlobFileTest : public testing::Test {
env_->DeleteDir(dirname_);
}
std::string GenKey(uint64_t i) {
char buf[64];
snprintf(buf, sizeof(buf), "k-%08" PRIu64, i);
return buf;
}
std::string GenValue(uint64_t i) { return std::string(1024, i); }
void TestBlobFilePrefetcher(TitanOptions options) {
options.dirname = dirname_;
TitanDBOptions db_options(options);
......@@ -39,8 +49,8 @@ class BlobFileTest : public testing::Test {
new BlobFileBuilder(db_options, cf_options, file.get()));
for (int i = 0; i < n; i++) {
auto key = std::to_string(i);
auto value = std::string(1024, i);
auto key = GenKey(i);
auto value = GenValue(i);
BlobRecord record;
record.key = key;
record.value = value;
......@@ -57,8 +67,8 @@ class BlobFileTest : public testing::Test {
std::unique_ptr<BlobFilePrefetcher> prefetcher;
ASSERT_OK(cache.NewPrefetcher(file_number_, file_size, &prefetcher));
for (int i = 0; i < n; i++) {
auto key = std::to_string(i);
auto value = std::string(1024, i);
auto key = GenKey(i);
auto value = GenValue(i);
BlobRecord expect;
expect.key = key;
expect.value = value;
......@@ -100,8 +110,8 @@ class BlobFileTest : public testing::Test {
new BlobFileBuilder(db_options, cf_options, file.get()));
for (int i = 0; i < n; i++) {
auto key = std::to_string(i);
auto value = std::string(1024, i);
auto key = GenKey(i);
auto value = GenValue(i);
BlobRecord record;
record.key = key;
record.value = value;
......@@ -123,8 +133,8 @@ class BlobFileTest : public testing::Test {
std::move(random_access_file_reader),
file_size, &blob_file_reader, nullptr));
for (int i = 0; i < n; i++) {
auto key = std::to_string(i);
auto value = std::string(1024, i);
auto key = GenKey(i);
auto value = GenValue(i);
BlobRecord expect;
expect.key = key;
expect.value = value;
......
......@@ -133,11 +133,36 @@ bool operator==(const BlobIndex& lhs, const BlobIndex& rhs) {
void BlobFileMeta::EncodeTo(std::string* dst) const {
PutVarint64(dst, file_number_);
PutVarint64(dst, file_size_);
PutVarint64(dst, file_entries_);
PutVarint32(dst, file_level_);
PutLengthPrefixedSlice(dst, smallest_key_);
PutLengthPrefixedSlice(dst, largest_key_);
}
Status BlobFileMeta::DecodeFrom(Slice* src) {
Status BlobFileMeta::DecodeFromLegacy(Slice* src) {
if (!GetVarint64(src, &file_number_) || !GetVarint64(src, &file_size_)) {
return Status::Corruption("BlobFileMeta Decode failed");
return Status::Corruption("BlobFileMeta decode legacy failed");
}
assert(smallest_key_.empty());
assert(largest_key_.empty());
return Status::OK();
}
Status BlobFileMeta::DecodeFrom(Slice* src) {
if (!GetVarint64(src, &file_number_) || !GetVarint64(src, &file_size_) ||
!GetVarint64(src, &file_entries_) || !GetVarint32(src, &file_level_)) {
return Status::Corruption("BlobFileMeta decode failed");
}
Slice str;
if (GetLengthPrefixedSlice(src, &str)) {
smallest_key_.assign(str.data(), str.size());
} else {
return Status::Corruption("BlobSmallestKey Decode failed");
}
if (GetLengthPrefixedSlice(src, &str)) {
largest_key_.assign(str.data(), str.size());
} else {
return Status::Corruption("BlobLargestKey decode failed");
}
return Status::OK();
}
......
......@@ -9,17 +9,38 @@
namespace rocksdb {
namespace titandb {
// Blob header format:
// Blob file overall format:
//
// [blob file header]
// [blob head + record 1]
// [blob head + record 2]
// ...
// [blob head + record N]
// [meta block 1]
// [meta block 2]
// ...
// [meta block K]
// [meta index block]
// [blob file footer]
// Format of blob head (9 bytes):
//
// +---------+---------+-------------+
// | crc | size | compression |
// +---------+---------+-------------+
// | Fixed32 | Fixed32 | char |
// +---------+---------+-------------+
//
// crc : fixed32
// size : fixed32
// compression : char
const uint64_t kBlobHeaderSize = 9;
// Blob record format:
// Format of blob record (not fixed size):
//
// +--------------------+----------------------+
// | key | value |
// +--------------------+----------------------+
// | Varint64 + key_len | Varint64 + value_len |
// +--------------------+----------------------+
//
// key : varint64 length + length bytes
// value : varint64 length + length bytes
struct BlobRecord {
Slice key;
Slice value;
......@@ -71,10 +92,14 @@ class BlobDecoder {
CompressionType compression_{kNoCompression};
};
// Blob handle format:
// Format of blob handle (not fixed size):
//
// +----------+----------+
// | offset | size |
// +----------+----------+
// | Varint64 | Varint64 |
// +----------+----------+
//
// offset : varint64
// size : varint64
struct BlobHandle {
uint64_t offset{0};
uint64_t size{0};
......@@ -85,11 +110,16 @@ struct BlobHandle {
friend bool operator==(const BlobHandle& lhs, const BlobHandle& rhs);
};
// Blob index format:
// Format of blob index (not fixed size):
//
// type : char
// file_number_ : varint64
// blob_handle : varint64 offset + varint64 size
// +------+-------------+------------------------------------+
// | type | file number | blob handle |
// +------+-------------+------------------------------------+
// | char | Varint64 | Varint64(offsest) + Varint64(size) |
// +------+-------------+------------------------------------+
//
// It is stored in LSM-Tree as the value of key, then Titan can use this blob
// index to locate actual value from blob file.
struct BlobIndex {
enum Type : unsigned char {
kBlobRecord = 1,
......@@ -103,10 +133,30 @@ struct BlobIndex {
friend bool operator==(const BlobIndex& lhs, const BlobIndex& rhs);
};
// Blob file meta format:
// Format of blob file meta (not fixed size):
//
// +-------------+-----------+--------------+------------+
// | file number | file size | file entries | file level |
// +-------------+-----------+--------------+------------+
// | Varint64 | Varint64 | Varint64 | Varint32 |
// +-------------+-----------+--------------+------------+
// +--------------------+--------------------+
// | smallest key | largest key |
// +--------------------+--------------------+
// | Varint32 + key_len | Varint32 + key_len |
// +--------------------+--------------------+
//
// The blob file meta is stored in Titan's manifest for quick constructing of
// meta infomations of all the blob files in memory.
//
// Legacy format:
//
// +-------------+-----------+
// | file number | file size |
// +-------------+-----------+
// | Varint64 | Varint64 |
// +-------------+-----------+
//
// file_number_ : varint64
// file_size_ : varint64
class BlobFileMeta {
public:
enum class FileEvent {
......@@ -131,16 +181,30 @@ class BlobFileMeta {
};
BlobFileMeta() = default;
BlobFileMeta(uint64_t _file_number, uint64_t _file_size)
: file_number_(_file_number), file_size_(_file_size) {}
BlobFileMeta(uint64_t _file_number, uint64_t _file_size,
uint64_t _file_entries, uint32_t _file_level,
const std::string& _smallest_key,
const std::string& _largest_key)
: file_number_(_file_number),
file_size_(_file_size),
file_entries_(_file_entries),
file_level_(_file_level),
smallest_key_(_smallest_key),
largest_key_(_largest_key) {}
friend bool operator==(const BlobFileMeta& lhs, const BlobFileMeta& rhs);
void EncodeTo(std::string* dst) const;
Status DecodeFrom(Slice* src);
Status DecodeFromLegacy(Slice* src);
uint64_t file_number() const { return file_number_; }
uint64_t file_size() const { return file_size_; }
uint64_t file_entries() const { return file_entries_; }
uint32_t file_level() const { return file_level_; }
Slice smallest_key() const { return smallest_key_; }
Slice largest_key() const { return largest_key_; }
FileState file_state() const { return state_; }
bool is_obsolete() const { return state_ == FileState::kObsolete; }
uint64_t discardable_size() const { return discardable_size_; }
......@@ -157,6 +221,13 @@ class BlobFileMeta {
// Persistent field
uint64_t file_number_{0};
uint64_t file_size_{0};
uint64_t file_entries_;
// Target level of compaction/flush which generates this blob file
uint32_t file_level_;
// Empty `smallest_key_` and `largest_key_` means smallest key is unknown,
// and can only happen when the file is from legacy version.
std::string smallest_key_;
std::string largest_key_;
// Not persistent field
FileState state_{FileState::kInit};
......@@ -167,12 +238,16 @@ class BlobFileMeta {
bool gc_mark_{false};
};
// Blob file header format.
// Format of blob file header (8 bytes):
//
// +--------------+---------+
// | magic number | version |
// +--------------+---------+
// | Fixed32 | Fixed32 |
// +--------------+---------+
//
// The header is mean to be compatible with header of BlobDB blob files, except
// we use a different magic number.
//
// magic_number : fixed32
// version : fixed32
struct BlobFileHeader {
// The first 32bits from $(echo titandb/blob | sha1sum).
static const uint32_t kHeaderMagicNumber = 0x2be0a614ul;
......@@ -185,12 +260,16 @@ struct BlobFileHeader {
Status DecodeFrom(Slice* src);
};
// Blob file footer format:
// Format of blob file footer (BlockHandle::kMaxEncodedLength + 12):
//
// +---------------------+-------------+--------------+----------+
// | meta index handle | padding | magic number | checksum |
// +---------------------+-------------+--------------+----------+
// | Varint64 + Varint64 | padding_len | Fixed64 | Fixed32 |
// +---------------------+-------------+--------------+----------+
//
// meta_index_handle : varint64 offset + varint64 size
// <padding> : [... kEncodedLength - 12] bytes
// magic_number : fixed64
// checksum : fixed32
// To make the blob file footer fixed size,
// the padding_len is `BlockHandle::kMaxEncodedLength - meta_handle_len`
struct BlobFileFooter {
// The first 64bits from $(echo titandb/blob | sha1sum).
static const uint64_t kFooterMagicNumber{0x2be0a6148e39edc6ull};
......
......@@ -35,7 +35,7 @@ TEST(BlobFormatTest, BlobIndex) {
}
TEST(BlobFormatTest, BlobFileMeta) {
BlobFileMeta input(2, 3);
BlobFileMeta input(2, 3, 0, 0, "0", "9");
CheckCodec(input);
}
......
......@@ -391,7 +391,9 @@ Status BlobGCJob::BuildIterator(
blob_gc_->titan_cf_options())));
}
if (s.ok()) result->reset(new BlobFileMergeIterator(std::move(list)));
if (s.ok())
result->reset(new BlobFileMergeIterator(
std::move(list), blob_gc_->titan_cf_options().comparator));
return s;
}
......@@ -480,7 +482,9 @@ Status BlobGCJob::InstallOutputBlobFiles() {
std::string tmp;
for (auto& builder : this->blob_file_builders_) {
auto file = std::make_shared<BlobFileMeta>(
builder.first->GetNumber(), builder.first->GetFile()->GetFileSize());
builder.first->GetNumber(), builder.first->GetFile()->GetFileSize(),
0, 0, builder.second->GetSmallestKey(),
builder.second->GetLargestKey());
if (!tmp.empty()) {
tmp.append(" ");
......
......@@ -31,7 +31,8 @@ class BlobGCPickerTest : public testing::Test {
void AddBlobFile(uint64_t file_number, uint64_t file_size,
uint64_t discardable_size, bool being_gc = false) {
auto f = std::make_shared<BlobFileMeta>(file_number, file_size);
auto f =
std::make_shared<BlobFileMeta>(file_number, file_size, 0, 0, "", "");
f->AddDiscardableSize(discardable_size);
f->FileStateTransit(BlobFileMeta::FileEvent::kDbRestart);
if (being_gc) {
......
......@@ -242,6 +242,7 @@ Status TitanDBImpl::Open(const std::vector<TitanCFDescriptor>& descs,
handles->clear();
s = db_->Close();
delete db_;
db_ = nullptr;
}
if (!s.ok()) return s;
......
......@@ -128,7 +128,8 @@ Status TitanTableBuilder::Finish() {
"Titan table builder finish output file %" PRIu64 ".",
blob_handle_->GetNumber());
std::shared_ptr<BlobFileMeta> file = std::make_shared<BlobFileMeta>(
blob_handle_->GetNumber(), blob_handle_->GetFile()->GetFileSize());
blob_handle_->GetNumber(), blob_handle_->GetFile()->GetFileSize(), 0,
0, blob_builder_->GetSmallestKey(), blob_builder_->GetLargestKey());
file->FileStateTransit(BlobFileMeta::FileEvent::kFlushOrCompactionOutput);
status_ =
blob_manager_->FinishFile(cf_id_, file, std::move(blob_handle_));
......
......@@ -553,7 +553,7 @@ TEST_F(TitanDBTest, VersionEditError) {
auto cf_id = db_->DefaultColumnFamily()->GetID();
VersionEdit edit;
edit.SetColumnFamilyID(cf_id);
edit.AddBlobFile(std::make_shared<BlobFileMeta>(1, 1));
edit.AddBlobFile(std::make_shared<BlobFileMeta>(1, 1, 0, 0, "", ""));
ASSERT_OK(LogAndApply(edit));
VerifyDB(data);
......@@ -561,7 +561,7 @@ TEST_F(TitanDBTest, VersionEditError) {
// add same blob file twice
VersionEdit edit1;
edit1.SetColumnFamilyID(cf_id);
edit1.AddBlobFile(std::make_shared<BlobFileMeta>(1, 1));
edit1.AddBlobFile(std::make_shared<BlobFileMeta>(1, 1, 0, 0, "", ""));
ASSERT_NOK(LogAndApply(edit));
Reopen();
......
......@@ -5,13 +5,6 @@
namespace rocksdb {
namespace titandb {
enum Tag {
kNextFileNumber = 1,
kColumnFamilyID = 10,
kAddedBlobFile = 11,
kDeletedBlobFile = 12,
};
void VersionEdit::EncodeTo(std::string* dst) const {
if (has_next_file_number_) {
PutVarint32Varint64(dst, kNextFileNumber, next_file_number_);
......@@ -20,7 +13,7 @@ void VersionEdit::EncodeTo(std::string* dst) const {
PutVarint32Varint32(dst, kColumnFamilyID, column_family_id_);
for (auto& file : added_files_) {
PutVarint32(dst, kAddedBlobFile);
PutVarint32(dst, kAddedBlobFileV2);
file->EncodeTo(dst);
}
for (auto& file : deleted_files_) {
......@@ -33,6 +26,7 @@ Status VersionEdit::DecodeFrom(Slice* src) {
uint32_t tag;
uint64_t file_number;
std::shared_ptr<BlobFileMeta> blob_file;
Status s;
const char* error = nullptr;
while (!error && !src->empty()) {
......@@ -54,12 +48,23 @@ Status VersionEdit::DecodeFrom(Slice* src) {
error = "column family id";
}
break;
// for compatibility issue
case kAddedBlobFile:
blob_file = std::make_shared<BlobFileMeta>();
if (blob_file->DecodeFrom(src).ok()) {
s = blob_file->DecodeFromLegacy(src);
if (s.ok()) {
AddBlobFile(blob_file);
} else {
error = s.ToString().c_str();
}
break;
case kAddedBlobFileV2:
blob_file = std::make_shared<BlobFileMeta>();
s = blob_file->DecodeFrom(src);
if (s.ok()) {
AddBlobFile(blob_file);
} else {
error = "added blob file";
error = s.ToString().c_str();
}
break;
case kDeletedBlobFile:
......
......@@ -10,6 +10,15 @@
namespace rocksdb {
namespace titandb {
enum Tag {
kNextFileNumber = 1,
kColumnFamilyID = 10,
kAddedBlobFile = 11,
kDeletedBlobFile = 12, // Deprecated, leave here for backward compatibility
kAddedBlobFileV2 = 13, // Comparing to kAddedBlobFile, it newly includes
// smallest_key and largest_key of blob file
};
class VersionEdit {
public:
void SetNextFileNumber(uint64_t v) {
......@@ -35,6 +44,7 @@ class VersionEdit {
private:
friend class VersionSet;
friend class VersionTest;
friend class EditCollector;
bool has_next_file_number_{false};
......
#include "file/filename.h"
#include "test_util/testharness.h"
#include "blob_format.h"
#include "edit_collector.h"
#include "testutil.h"
#include "util.h"
......@@ -67,7 +68,7 @@ class VersionTest : public testing::Test {
void AddBlobFiles(uint32_t cf_id, uint64_t start, uint64_t end) {
auto storage = column_families_[cf_id];
for (auto i = start; i < end; i++) {
auto file = std::make_shared<BlobFileMeta>(i, i);
auto file = std::make_shared<BlobFileMeta>(i, i, 0, 0, "", "");
storage->files_.emplace(i, file);
}
}
......@@ -107,6 +108,20 @@ class VersionTest : public testing::Test {
void CheckColumnFamiliesSize(uint64_t size) {
ASSERT_EQ(vset_->column_families_.size(), size);
}
void LegacyEncode(const VersionEdit& edit, std::string* dst) {
PutVarint32Varint32(dst, Tag::kColumnFamilyID, edit.column_family_id_);
for (auto& file : edit.added_files_) {
PutVarint32(dst, Tag::kAddedBlobFile);
PutVarint64(dst, file->file_number());
PutVarint64(dst, file->file_size());
}
for (auto& file : edit.deleted_files_) {
// obsolete sequence is a inpersistent field, so no need to encode it.
PutVarint32Varint64(dst, Tag::kDeletedBlobFile, file.first);
}
}
};
TEST_F(VersionTest, VersionEdit) {
......@@ -115,8 +130,8 @@ TEST_F(VersionTest, VersionEdit) {
input.SetNextFileNumber(1);
input.SetColumnFamilyID(2);
CheckCodec(input);
auto file1 = std::make_shared<BlobFileMeta>(3, 4);
auto file2 = std::make_shared<BlobFileMeta>(5, 6);
auto file1 = std::make_shared<BlobFileMeta>(3, 4, 0, 0, "", "");
auto file2 = std::make_shared<BlobFileMeta>(5, 6, 0, 0, "", "");
input.AddBlobFile(file1);
input.AddBlobFile(file2);
input.DeleteBlobFile(7);
......@@ -128,7 +143,7 @@ VersionEdit AddBlobFilesEdit(uint32_t cf_id, uint64_t start, uint64_t end) {
VersionEdit edit;
edit.SetColumnFamilyID(cf_id);
for (auto i = start; i < end; i++) {
auto file = std::make_shared<BlobFileMeta>(i, i);
auto file = std::make_shared<BlobFileMeta>(i, i, 0, 0, "", "");
edit.AddBlobFile(file);
}
return edit;
......@@ -269,6 +284,20 @@ TEST_F(VersionTest, ObsoleteFiles) {
CheckColumnFamiliesSize(8);
}
TEST_F(VersionTest, BlobFileMetaV1ToV2) {
VersionEdit edit;
edit.SetColumnFamilyID(1);
edit.AddBlobFile(std::make_shared<BlobFileMeta>(1, 1, 0, 0, "", ""));
edit.DeleteBlobFile(1);
edit.AddBlobFile(std::make_shared<BlobFileMeta>(2, 2, 0, 0, "", ""));
std::string str;
LegacyEncode(edit, &str);
VersionEdit edit1;
ASSERT_OK(DecodeInto(Slice(str), &edit1));
CheckCodec(edit1);
}
} // namespace titandb
} // namespace rocksdb
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment