Unverified Commit bc358745 authored by Myth's avatar Myth Committed by GitHub

Add checkpoint support (#207)

* Add checkpoint support
Signed-off-by: 's avatarMyth <caipengbo@outlook.com>
parent 1098aac8
......@@ -103,6 +103,7 @@ if (WITH_TITAN_TESTS AND (NOT CMAKE_BUILD_TYPE STREQUAL "Release"))
table_builder_test
thread_safety_test
titan_db_test
titan_checkpoint_test
titan_options_test
util_test
compaction_filter_test
......
#pragma once
#include "db.h"
namespace rocksdb {
namespace titandb {
class Checkpoint {
public:
// Creates a Checkpoint object to be used for creating openable snapshots
static Status Create(TitanDB* db, Checkpoint** checkpoint_ptr);
// Builds an openable snapshot of TitanDB.
// base_checkpoint_dir: checkpoint directory of base DB
// titan_checkpoint_dir: checkpoint directory of TitanDB, if not specified,
// default value is {base_checkpoint_dir}/titandb.
// The specified directory should contain absolute path and not exist, it
// will be created by the API.
// When a checkpoint is created:
// (1) SST and blob files are hard linked if the output directory is on the
// same filesystem as the database, and copied otherwise.
// (2) MANIFEST file specific to TitanDB will be regenerated based on all
// existing blob files.
// (3) other required files are always copied.
// log_size_for_flush: if the total log file size is equal or larger than
// this value, then a flush is triggered for all the column families. The
// default value is 0, which means flush is always triggered. If you move
// away from the default, the checkpoint may not contain up-to-date data
// if WAL writing is not always enabled.
// Flush will always trigger if it is 2PC.
virtual Status CreateCheckpoint(const std::string& base_checkpoint_dir,
const std::string& titan_checkpoint_dir = "",
uint64_t log_size_for_flush = 0);
virtual ~Checkpoint() {}
};
} // namespace titandb
} // namespace rocksdb
......@@ -6,6 +6,8 @@
namespace rocksdb {
namespace titandb {
class VersionEdit;
struct TitanCFDescriptor {
std::string name;
TitanCFOptions options;
......@@ -108,6 +110,23 @@ class TitanDB : public StackableDB {
return Status::NotSupported("TitanDB doesn't support this operation");
}
using StackableDB::DisableFileDeletions;
Status DisableFileDeletions() override {
return Status::NotSupported("TitanDB doesn't support this operation");
}
using StackableDB::EnableFileDeletions;
Status EnableFileDeletions(bool /*force*/) override {
return Status::NotSupported("TitanDB doesn't support this operation");
}
// Get all files in /titandb directory after disable file deletions
// edits include all blob file records of every column family
virtual Status GetAllTitanFiles(std::vector<std::string>& /*files*/,
std::vector<VersionEdit>* /*edits*/) {
return Status::NotSupported("TitanDB doesn't support this operation");
}
using rocksdb::StackableDB::SingleDelete;
Status SingleDelete(const WriteOptions& /*wopts*/,
ColumnFamilyHandle* /*column_family*/,
......
......@@ -153,13 +153,16 @@ Status BlobFileSet::OpenManifest(uint64_t file_number) {
ImmutableDBOptions ioptions(db_options_);
s = SyncTitanManifest(env_, stats_, &ioptions, manifest_->file());
}
uint64_t old_manifest_file_number = manifest_file_number_;
if (s.ok()) {
// Makes "CURRENT" file that points to the new manifest file.
s = SetCurrentFile(env_, dirname_, file_number, nullptr);
manifest_file_number_ = file_number;
}
if (!s.ok()) {
manifest_.reset();
manifest_file_number_ = old_manifest_file_number;
obsolete_manifests_.emplace_back(file_name);
}
return s;
......@@ -326,5 +329,45 @@ void BlobFileSet::GetObsoleteFiles(std::vector<std::string>* obsolete_files,
obsolete_manifests_.clear();
}
void BlobFileSet::GetAllFiles(std::vector<std::string>* files,
std::vector<VersionEdit>* edits) {
std::vector<std::string> all_blob_files;
edits->clear();
edits->reserve(column_families_.size());
// Saves global information
{
VersionEdit edit;
edit.SetNextFileNumber(next_file_number_.load());
std::string record;
edit.EncodeTo(&record);
edits->emplace_back(edit);
}
// Saves all blob files
for (auto& cf : column_families_) {
VersionEdit edit;
edit.SetColumnFamilyID(cf.first);
auto& blob_storage = cf.second;
blob_storage->GetAllFiles(&all_blob_files);
for (auto& file : blob_storage->files_) {
edit.AddBlobFile(file.second);
}
edits->emplace_back(edit);
}
files->clear();
files->reserve(all_blob_files.size() + 2);
for (auto& live_file : all_blob_files) {
files->emplace_back(live_file);
}
// Append current MANIFEST and CURRENT file name
files->emplace_back(DescriptorFileName("", manifest_file_number_));
files->emplace_back(CurrentFileName(""));
}
} // namespace titandb
} // namespace rocksdb
......@@ -82,6 +82,10 @@ class BlobFileSet {
void GetObsoleteFiles(std::vector<std::string>* obsolete_files,
SequenceNumber oldest_sequence);
// REQUIRES: mutex is held
void GetAllFiles(std::vector<std::string>* files,
std::vector<VersionEdit>* edits);
// REQUIRES: mutex is held
bool IsColumnFamilyObsolete(uint32_t cf_id) {
return obsolete_columns_.count(cf_id) > 0;
......@@ -118,6 +122,7 @@ class BlobFileSet {
std::unordered_map<uint32_t, std::shared_ptr<BlobStorage>> column_families_;
std::unique_ptr<log::Writer> manifest_;
std::atomic<uint64_t> next_file_number_{1};
uint64_t manifest_file_number_;
};
} // namespace titandb
......
......@@ -187,6 +187,16 @@ void BlobStorage::GetObsoleteFiles(std::vector<std::string>* obsolete_files,
}
}
void BlobStorage::GetAllFiles(std::vector<std::string>* files) {
MutexLock l(&mutex_);
for (auto& file : files_) {
uint64_t file_number = file.first;
// relative to dirname
files->emplace_back(BlobFileName("", file_number));
}
}
void BlobStorage::ComputeGCScore() {
// TODO: no need to recompute all everytime
MutexLock l(&mutex_);
......
......@@ -104,6 +104,9 @@ class BlobStorage {
void GetObsoleteFiles(std::vector<std::string>* obsolete_files,
SequenceNumber oldest_sequence);
// Gets all files (start with '/titandb' prefix), including obsolete files.
void GetAllFiles(std::vector<std::string>* files);
// Mark the file as obsolete, and retrun value indicates whether the file is
// founded.
bool MarkFileObsolete(uint64_t file_number, SequenceNumber obsolete_sequence);
......
......@@ -20,6 +20,7 @@
#include "titan_build_version.h"
#include "titan_stats.h"
#include "util/autovector.h"
#include "util/mutexlock.h"
#include "util/threadpool_imp.h"
namespace rocksdb {
......@@ -765,6 +766,64 @@ void TitanDBImpl::ReleaseSnapshot(const Snapshot* snapshot) {
db_->ReleaseSnapshot(snapshot);
}
Status TitanDBImpl::DisableFileDeletions() {
// Disable base DB file deletions.
Status s = db_impl_->DisableFileDeletions();
if (!s.ok()) {
return s;
}
int count = 0;
{
// Hold delete_titandb_file_mutex_ to make sure no
// PurgeObsoleteFiles job is running.
MutexLock l(&delete_titandb_file_mutex_);
count = ++disable_titandb_file_deletions_;
}
ROCKS_LOG_INFO(db_options_.info_log,
"Disalbed blob file deletions. count: %d", count);
return Status::OK();
}
Status TitanDBImpl::EnableFileDeletions(bool force) {
// Enable base DB file deletions.
Status s = db_impl_->EnableFileDeletions(force);
if (!s.ok()) {
return s;
}
int count = 0;
{
MutexLock l(&delete_titandb_file_mutex_);
if (force) {
disable_titandb_file_deletions_ = 0;
} else if (disable_titandb_file_deletions_ > 0) {
count = --disable_titandb_file_deletions_;
}
assert(count >= 0);
}
ROCKS_LOG_INFO(db_options_.info_log, "Enabled blob file deletions. count: %d",
count);
return Status::OK();
}
Status TitanDBImpl::GetAllTitanFiles(std::vector<std::string>& files,
std::vector<VersionEdit>* edits) {
Status s = DisableFileDeletions();
if (!s.ok()) {
return s;
}
{
MutexLock l(&mutex_);
blob_file_set_->GetAllFiles(&files, edits);
}
return EnableFileDeletions(false);
}
Status TitanDBImpl::DeleteFilesInRanges(ColumnFamilyHandle* column_family,
const RangePtr* ranges, size_t n,
bool include_end) {
......
......@@ -110,6 +110,16 @@ class TitanDBImpl : public TitanDB {
void ReleaseSnapshot(const Snapshot* snapshot) override;
using TitanDB::DisableFileDeletions;
Status DisableFileDeletions() override;
using TitanDB::EnableFileDeletions;
Status EnableFileDeletions(bool force) override;
using TitanDB::GetAllTitanFiles;
Status GetAllTitanFiles(std::vector<std::string>& files,
std::vector<VersionEdit>* edits) override;
Status DeleteFilesInRanges(ColumnFamilyHandle* column_family,
const RangePtr* ranges, size_t n,
bool include_end = true) override;
......@@ -324,6 +334,13 @@ class TitanDBImpl : public TitanDB {
// REQUIRE: mutex_ held.
int drop_cf_requests_ = 0;
// PurgeObsoleteFiles, DisableFileDeletions and EnableFileDeletions block
// on the mutex to avoid contention.
mutable port::Mutex delete_titandb_file_mutex_;
// REQUIRES: access with delete_titandb_file_mutex_ held.
int disable_titandb_file_deletions_ = 0;
std::atomic_bool shuting_down_{false};
};
......
......@@ -5,6 +5,12 @@ namespace titandb {
Status TitanDBImpl::PurgeObsoleteFilesImpl() {
Status s;
MutexLock delete_file_lock(&delete_titandb_file_mutex_);
if (disable_titandb_file_deletions_ > 0) {
return s;
}
std::vector<std::string> candidate_files;
auto oldest_sequence = GetOldestSnapshotSequence();
{
......
#include "titan_checkpoint_impl.h"
#include <cinttypes>
#include "db/log_writer.h"
#include "file/file_util.h"
#include "file/filename.h"
#include "port/port.h"
#include "rocksdb/transaction_log.h"
#include "test_util/sync_point.h"
#include "util.h"
#include "utilities/checkpoint/checkpoint_impl.h"
#include "version_edit.h"
namespace rocksdb {
namespace titandb {
Status Checkpoint::Create(TitanDB* db, Checkpoint** checkpoint_ptr) {
*checkpoint_ptr = new TitanCheckpointImpl(db);
return Status::OK();
}
Status Checkpoint::CreateCheckpoint(
const std::string& /*base_checkpoint_dir*/,
const std::string& /*titan_checkpoint_dir = ""*/,
uint64_t /*log_size_for_flush = 0*/) {
return Status::NotSupported("TitanDB doesn't support this operation");
}
void TitanCheckpointImpl::CleanStagingDirectory(
const std::string& full_private_path, Logger* info_log) {
std::vector<std::string> subchildren;
Status s = db_->GetEnv()->FileExists(full_private_path);
if (s.IsNotFound()) {
return;
}
ROCKS_LOG_INFO(info_log, "File exists %s -- %s", full_private_path.c_str(),
s.ToString().c_str());
s = db_->GetEnv()->GetChildren(full_private_path, &subchildren);
if (s.ok()) {
for (auto& subchild : subchildren) {
std::string subchild_path = full_private_path + "/" + subchild;
if (subchild == "titandb") {
CleanStagingDirectory(subchild_path, info_log);
ROCKS_LOG_INFO(info_log, "Clean titandb directory %s",
subchild_path.c_str());
} else {
s = db_->GetEnv()->DeleteFile(subchild_path);
ROCKS_LOG_INFO(info_log, "Delete file %s -- %s", subchild_path.c_str(),
s.ToString().c_str());
}
}
}
// Finally delete the private dir
s = db_->GetEnv()->DeleteDir(full_private_path);
ROCKS_LOG_INFO(info_log, "Delete dir %s -- %s", full_private_path.c_str(),
s.ToString().c_str());
}
Status TitanCheckpointImpl::CreateTitanManifest(
const std::string& file_name, std::vector<VersionEdit>* edits) {
Status s;
Env* env = db_->GetEnv();
bool use_fsync = db_->GetDBOptions().use_fsync;
const EnvOptions env_options;
std::unique_ptr<WritableFileWriter> file;
{
std::unique_ptr<WritableFile> f;
s = env->NewWritableFile(file_name, &f, env_options);
if (!s.ok()) return s;
file.reset(new WritableFileWriter(std::move(f), file_name, env_options));
}
std::unique_ptr<log::Writer> manifest;
manifest.reset(new log::Writer(std::move(file), 0, false));
for (auto& edit : *edits) {
std::string record;
edit.EncodeTo(&record);
s = manifest->AddRecord(record);
if (!s.ok()) return s;
}
return manifest->file()->Sync(use_fsync);
}
// Builds an openable checkpoint of TitanDB
Status TitanCheckpointImpl::CreateCheckpoint(
const std::string& base_checkpoint_dir,
const std::string& titan_checkpoint_dir, uint64_t log_size_for_flush) {
TitanDBOptions titandb_options = db_->GetTitanDBOptions();
std::string full_private_path;
std::string checkpoint_dir = titan_checkpoint_dir;
if (checkpoint_dir.empty()) {
checkpoint_dir = base_checkpoint_dir + "/titandb";
}
// Check TitanDB checkpoint directory
Status s = db_->GetEnv()->FileExists(checkpoint_dir);
if (s.ok()) {
return Status::InvalidArgument("TitanDB checkpoint directory exists");
} else if (!s.IsNotFound()) {
assert(s.IsIOError());
return s;
}
ROCKS_LOG_INFO(titandb_options.info_log,
"Started the TitanDB checkpoint process -- creating checkpoint"
"in directory %s",
checkpoint_dir.c_str());
size_t final_nonslash_idx = checkpoint_dir.find_last_not_of('/');
if (final_nonslash_idx == std::string::npos) {
// npos means it's only slashes or empty. Non-empty means it's the root
// directory, but it shouldn't be because we verified above the directory
// doesn't exist.
assert(checkpoint_dir.empty());
return Status::InvalidArgument("Invalid TitanDB checkpoint directory name");
}
// Disable file deletions
s = db_->DisableFileDeletions();
const bool disabled_file_deletions = s.ok();
if (s.ok()) {
// Create base DB checkpoint
auto base_db_checkpoint = new rocksdb::CheckpointImpl(db_);
s = base_db_checkpoint->CreateCheckpoint(base_checkpoint_dir,
log_size_for_flush);
delete base_db_checkpoint;
base_db_checkpoint = nullptr;
if (s.ok()) {
full_private_path =
checkpoint_dir.substr(0, final_nonslash_idx + 1) + ".tmp";
ROCKS_LOG_INFO(
titandb_options.info_log,
"TitanDB checkpoint process -- using temporary directory %s",
full_private_path.c_str());
CleanStagingDirectory(full_private_path, titandb_options.info_log.get());
s = db_->GetEnv()->CreateDir(full_private_path);
}
}
if (s.ok()) {
// Create TitanDB checkpoint
s = CreateCustomCheckpoint(
titandb_options,
[&](const std::string& src_dirname, const std::string& fname,
FileType) {
ROCKS_LOG_INFO(titandb_options.info_log, "Hard Linking %s",
fname.c_str());
return db_->GetEnv()->LinkFile(src_dirname + fname,
full_private_path + fname);
} /* link_file_cb */,
[&](const std::string& src_dirname, const std::string& fname,
uint64_t size_limit_bytes, FileType) {
ROCKS_LOG_INFO(titandb_options.info_log, "Copying %s", fname.c_str());
return CopyFile(db_->GetEnv(), src_dirname + fname,
full_private_path + fname, size_limit_bytes,
titandb_options.use_fsync);
} /* copy_file_cb */,
[&](const std::string& fname, const std::string& contents, FileType) {
ROCKS_LOG_INFO(titandb_options.info_log, "Creating %s",
fname.c_str());
return CreateFile(db_->GetEnv(), full_private_path + fname, contents,
titandb_options.use_fsync);
} /* create_file_cb */,
log_size_for_flush, full_private_path);
}
if (disabled_file_deletions) {
// We copied all the files, enable file deletions
Status ss = db_->EnableFileDeletions(false);
assert(ss.ok());
}
if (s.ok()) {
// Move tmp private backup to real TitanDB checkpoint directory
s = db_->GetEnv()->RenameFile(full_private_path, checkpoint_dir);
}
if (s.ok()) {
std::unique_ptr<Directory> checkpoint_directory;
s = db_->GetEnv()->NewDirectory(checkpoint_dir, &checkpoint_directory);
if (s.ok() && checkpoint_directory != nullptr) {
s = checkpoint_directory->Fsync();
}
}
if (s.ok()) {
// Here we know that we succeeded and installed the new checkpoint
ROCKS_LOG_INFO(titandb_options.info_log,
"TitanDB checkpoint DONE. All is good");
} else {
// Clean all the files we might have created
ROCKS_LOG_INFO(titandb_options.info_log, "TitanDB checkpoint failed -- %s",
s.ToString().c_str());
CleanStagingDirectory(full_private_path, titandb_options.info_log.get());
}
return s;
}
Status TitanCheckpointImpl::CreateCustomCheckpoint(
const TitanDBOptions& titandb_options,
std::function<Status(const std::string& src_dirname,
const std::string& src_fname, FileType type)>
link_file_cb,
std::function<Status(const std::string& src_dirname,
const std::string& src_fname,
uint64_t size_limit_bytes, FileType type)>
copy_file_cb,
std::function<Status(const std::string& fname, const std::string& contents,
FileType type)>
create_file_cb,
uint64_t log_size_for_flush, const std::string full_private_path) {
Status s;
std::vector<std::string> titandb_files;
std::vector<VersionEdit> version_edits;
bool same_fs = true;
// This will return files prefixed with "/"
s = db_->GetAllTitanFiles(titandb_files, &version_edits);
TEST_SYNC_POINT(
"TitanCheckpointImpl::CreateCustomCheckpoint::AfterGetAllTitanFiles");
TEST_SYNC_POINT(
"TitanCheckpointImpl::CreateCustomCheckpoint:BeforeTitanDBCheckpoint1");
TEST_SYNC_POINT(
"TitanCheckpointImpl::CreateCustomCheckpoint::BeforeTitanDBCheckpoint2");
if (!s.ok()) {
return s;
}
// Copy/Hard link files
std::string manifest_fname, current_fname;
for (auto& live_file : titandb_files) {
uint64_t number;
FileType type;
bool ok = ParseFileName(live_file, &number, &type);
if (!ok) {
s = Status::Corruption("Can't parse file name. This is very bad");
break;
}
// We should only get blob, manifest and current files here
assert(type == kBlobFile || type == kDescriptorFile ||
type == kCurrentFile);
assert(live_file.size() > 0 && live_file[0] == '/');
if (type == kCurrentFile) {
current_fname = live_file;
continue;
} else if (type == kDescriptorFile) {
manifest_fname = live_file;
}
std::string src_fname = live_file;
// Rules:
// * If it's kBlobFile, then it's shared
// * If it's kDescriptorFile, craft the manifest based on all blob file
// * If it's kCurrentFile, craft the current file manually to ensure
// it's consistent with the manifest number. This is necessary because
// current file contents can change during checkpoint creation.
// * Always copy if cross-device link.
if (type == kBlobFile && same_fs) {
s = link_file_cb(titandb_options.dirname, src_fname, type);
if (s.IsNotSupported()) {
same_fs = false;
s = Status::OK();
}
}
if (type != kBlobFile || !same_fs) {
if (type == kDescriptorFile) {
s = CreateTitanManifest(full_private_path + src_fname, &version_edits);
} else {
s = copy_file_cb(titandb_options.dirname, src_fname, 0, type);
}
}
}
// Write manifest name to CURRENT file
if (s.ok() && !current_fname.empty() && !manifest_fname.empty()) {
create_file_cb(current_fname, manifest_fname + "\n", kCurrentFile);
}
return s;
}
} // namespace titandb
} // namespace rocksdb
#pragma once
#include "file/filename.h"
#include "titan/checkpoint.h"
namespace rocksdb {
namespace titandb {
class VersionEdit;
class TitanCheckpointImpl : public Checkpoint {
public:
explicit TitanCheckpointImpl(TitanDB* db) : db_(db) {}
// Follow these steps to build an openable snapshot of TitanDB:
// (1) Create base db checkpoint.
// (2) Hard linked all existing blob files(live + obsolete) if the output
// directory is on the same filesystem, and copied otherwise.
// (3) Create MANIFEST file include all records about existing blob files.
// (4) Craft CURRENT file manually based on MANIFEST file number.
// This will include redundant blob files, but hopefully not a lot of them,
// and on restart Titan will recalculate GC stats and GC out those redundant
// blob files.
using Checkpoint::CreateCheckpoint;
virtual Status CreateCheckpoint(const std::string& base_checkpoint_dir,
const std::string& titan_checkpoint_dir = "",
uint64_t log_size_for_flush = 0) override;
// Checkpoint logic can be customized by providing callbacks for link, copy,
// or create.
Status CreateCustomCheckpoint(
const TitanDBOptions& titandb_options,
std::function<Status(const std::string& src_dirname,
const std::string& fname, FileType type)>
link_file_cb,
std::function<Status(const std::string& src_dirname,
const std::string& fname, uint64_t size_limit_bytes,
FileType type)>
copy_file_cb,
std::function<Status(const std::string& fname,
const std::string& contents, FileType type)>
create_file_cb,
uint64_t log_size_for_flush, const std::string full_private_path);
private:
void CleanStagingDirectory(const std::string& path, Logger* info_log);
// Create titan manifest file based on the content of VersionEdit
Status CreateTitanManifest(const std::string& file_name,
std::vector<VersionEdit>* edits);
private:
TitanDB* db_;
};
} // namespace titandb
} // namespace rocksdb
#include <thread>
#include "port/port.h"
#include "port/stack_trace.h"
#include "rocksdb/env.h"
#include "test_util/fault_injection_test_env.h"
#include "test_util/sync_point.h"
#include "test_util/testharness.h"
#include "test_util/testutil.h"
#include "titan/checkpoint.h"
#include "titan/db.h"
namespace rocksdb {
namespace titandb {
class CheckpointTest : public testing::Test {
protected:
// Sequence of option configurations to try
enum OptionConfig {
kDefault = 0,
};
int option_config_;
public:
std::string dbname_;
std::string alternative_wal_dir_;
Env* env_;
TitanDB* db_;
TitanOptions last_options_;
std::vector<ColumnFamilyHandle*> handles_;
std::string snapshot_name_;
CheckpointTest() : env_(Env::Default()) {
env_->SetBackgroundThreads(1, Env::LOW);
env_->SetBackgroundThreads(1, Env::HIGH);
dbname_ = test::PerThreadDBPath(env_, "checkpoint_test");
alternative_wal_dir_ = dbname_ + "/wal";
auto options = CurrentOptions();
auto delete_options = options;
delete_options.wal_dir = alternative_wal_dir_;
EXPECT_OK(DestroyTitanDB(dbname_, delete_options));
// Destroy it for not alternative WAL dir is used.
EXPECT_OK(DestroyTitanDB(dbname_, options));
db_ = nullptr;
snapshot_name_ = test::PerThreadDBPath(env_, "snapshot");
std::string snapshot_tmp_name = snapshot_name_ + ".tmp";
EXPECT_OK(DestroyTitanDB(snapshot_name_, options));
env_->DeleteDir(snapshot_name_);
EXPECT_OK(DestroyTitanDB(snapshot_tmp_name, options));
env_->DeleteDir(snapshot_tmp_name);
Reopen(options);
}
~CheckpointTest() override {
rocksdb::SyncPoint::GetInstance()->DisableProcessing();
rocksdb::SyncPoint::GetInstance()->LoadDependency({});
rocksdb::SyncPoint::GetInstance()->ClearAllCallBacks();
Close();
TitanOptions options;
options.db_paths.emplace_back(dbname_, 0);
options.db_paths.emplace_back(dbname_ + "_2", 0);
options.db_paths.emplace_back(dbname_ + "_3", 0);
options.db_paths.emplace_back(dbname_ + "_4", 0);
EXPECT_OK(DestroyTitanDB(dbname_, options));
EXPECT_OK(DestroyTitanDB(snapshot_name_, options));
}
// Return the current option configuration.
TitanOptions CurrentOptions() {
TitanOptions options;
options.env = env_;
options.create_if_missing = true;
return options;
}
void CreateColumnFamilies(const std::vector<std::string>& cfs,
const TitanOptions& options) {
ColumnFamilyOptions cf_opts(options);
size_t cfi = handles_.size();
handles_.resize(cfi + cfs.size());
for (auto cf : cfs) {
ASSERT_OK(db_->CreateColumnFamily(cf_opts, cf, &handles_[cfi++]));
}
}
void DropColumnFamily(int cf) {
auto handle = handles_[cf];
ASSERT_OK(db_->DropColumnFamily(handle));
ASSERT_OK(db_->DestroyColumnFamilyHandle(handle));
handles_.erase(handles_.begin() + cf);
}
void CreateAndReopenWithCF(const std::vector<std::string>& cfs,
const TitanOptions& options) {
CreateColumnFamilies(cfs, options);
std::vector<std::string> cfs_plus_default = cfs;
cfs_plus_default.insert(cfs_plus_default.begin(), kDefaultColumnFamilyName);
ReopenWithColumnFamilies(cfs_plus_default, options);
}
void ReopenWithColumnFamilies(const std::vector<std::string>& cfs,
const std::vector<TitanOptions>& options) {
ASSERT_OK(TryReopenWithColumnFamilies(cfs, options));
}
void ReopenWithColumnFamilies(const std::vector<std::string>& cfs,
const TitanOptions& options) {
ASSERT_OK(TryReopenWithColumnFamilies(cfs, options));
}
Status TryReopenWithColumnFamilies(const std::vector<std::string>& cfs,
const std::vector<TitanOptions>& options) {
Close();
EXPECT_EQ(cfs.size(), options.size());
std::vector<TitanCFDescriptor> column_families;
for (size_t i = 0; i < cfs.size(); ++i) {
column_families.push_back(TitanCFDescriptor(cfs[i], options[i]));
}
TitanDBOptions db_opts = TitanDBOptions(options[0]);
return TitanDB::Open(db_opts, dbname_, column_families, &handles_, &db_);
}
Status TryReopenWithColumnFamilies(const std::vector<std::string>& cfs,
const TitanOptions& options) {
Close();
std::vector<TitanOptions> v_opts(cfs.size(), options);
return TryReopenWithColumnFamilies(cfs, v_opts);
}
void Reopen(const TitanOptions& options) { ASSERT_OK(TryReopen(options)); }
void CompactAll() {
for (auto h : handles_) {
ASSERT_OK(db_->CompactRange(CompactRangeOptions(), h, nullptr, nullptr));
}
}
void Close() {
for (auto h : handles_) {
delete h;
}
handles_.clear();
delete db_;
db_ = nullptr;
}
void DestroyAndReopen(const TitanOptions& options) {
// Destroy using last options
Destroy(last_options_);
ASSERT_OK(TryReopen(options));
}
void Destroy(const TitanOptions& options) {
Close();
ASSERT_OK(DestroyTitanDB(dbname_, options));
}
Status DestroyTitanDB(const std::string& dbname,
const TitanOptions& options) {
// Clear and delete TitanDB directory first
std::vector<std::string> filenames;
std::string titandb_path;
TitanOptions titan_options = options;
if (titan_options.dirname.empty()) {
titandb_path = dbname + "/titandb";
} else {
titandb_path = titan_options.dirname;
}
// Ignore error in case directory does not exist
env_->GetChildren(titandb_path, &filenames);
for (auto& fname : filenames) {
std::string file_path = titandb_path + "/" + fname;
env_->DeleteFile(file_path);
}
env_->DeleteDir(titandb_path);
// Destroy base db
return DestroyDB(dbname, titan_options.operator rocksdb::Options());
}
Status TryReopen(const TitanOptions& options) {
Close();
last_options_ = options;
return TitanDB::Open(options, dbname_, &db_);
}
Status Flush(int cf = 0) {
if (cf == 0) {
return db_->Flush(FlushOptions());
} else {
return db_->Flush(FlushOptions(), handles_[cf]);
}
}
std::string GenLargeValue(uint64_t min_blob_size, char v) {
return std::string(min_blob_size + 1, v);
}
Status Put(const Slice& k, const Slice& v, WriteOptions wo = WriteOptions()) {
return db_->Put(wo, k, v);
}
Status Put(int cf, const Slice& k, const Slice& v,
WriteOptions wo = WriteOptions()) {
return db_->Put(wo, handles_[cf], k, v);
}
Status Delete(const std::string& k) { return db_->Delete(WriteOptions(), k); }
Status Delete(int cf, const std::string& k) {
return db_->Delete(WriteOptions(), handles_[cf], k);
}
std::string Get(const std::string& k, const Snapshot* snapshot = nullptr) {
ReadOptions options;
options.verify_checksums = true;
options.snapshot = snapshot;
std::string result;
Status s = db_->Get(options, k, &result);
if (s.IsNotFound()) {
result = "NOT_FOUND";
} else if (!s.ok()) {
result = s.ToString();
}
return result;
}
std::string Get(int cf, const std::string& k,
const Snapshot* snapshot = nullptr) {
ReadOptions options;
options.verify_checksums = true;
options.snapshot = snapshot;
std::string result;
Status s = db_->Get(options, handles_[cf], k, &result);
if (s.IsNotFound()) {
result = "NOT_FOUND";
} else if (!s.ok()) {
result = s.ToString();
}
return result;
}
};
TEST_F(CheckpointTest, GetSnapshotLink) {
for (uint64_t log_size_for_flush : {0, 1000000}) {
TitanOptions options = CurrentOptions();
delete db_;
db_ = nullptr;
ASSERT_OK(DestroyTitanDB(dbname_, options));
// Create a database with small value and large value
Status s;
options.create_if_missing = true;
ASSERT_OK(TitanDB::Open(options, dbname_, &db_));
std::string small_key = std::string("small");
std::string large_key = std::string("large");
std::string small_value_v1 = std::string("v1");
std::string small_value_v2 = std::string("v2");
std::string large_value_v1 = GenLargeValue(options.min_blob_size, '1');
std::string large_value_v2 = GenLargeValue(options.min_blob_size, '2');
ASSERT_OK(Put(small_key, small_value_v1));
ASSERT_EQ(small_value_v1, Get(small_key));
ASSERT_OK(Put(large_key, large_value_v1));
ASSERT_EQ(large_value_v1, Get(large_key));
// Take a snapshot
Checkpoint* checkpoint;
ASSERT_OK(Checkpoint::Create(db_, &checkpoint));
ASSERT_OK(
checkpoint->CreateCheckpoint(snapshot_name_, "", log_size_for_flush));
ASSERT_OK(Put(small_key, small_value_v2));
ASSERT_EQ(small_value_v2, Get(small_key));
ASSERT_OK(Put(large_key, large_value_v2));
ASSERT_EQ(large_value_v2, Get(large_key));
ASSERT_OK(Flush());
ASSERT_EQ(small_value_v2, Get(small_key));
ASSERT_EQ(large_value_v2, Get(large_key));
// Open snapshot and verify contents while DB is running
TitanDB* snapshotDB;
ReadOptions roptions;
std::string result;
options.create_if_missing = false;
ASSERT_OK(TitanDB::Open(options, snapshot_name_, &snapshotDB));
ASSERT_OK(snapshotDB->Get(roptions, small_key, &result));
ASSERT_EQ(small_value_v1, result);
ASSERT_OK(snapshotDB->Get(roptions, large_key, &result));
ASSERT_EQ(large_value_v1, result);
delete snapshotDB;
snapshotDB = nullptr;
delete db_;
db_ = nullptr;
// Destroy original DB
ASSERT_OK(DestroyTitanDB(dbname_, options));
// Open snapshot and verify contents
options.create_if_missing = false;
dbname_ = snapshot_name_;
ASSERT_OK(TitanDB::Open(TitanOptions(options), dbname_, &db_));
ASSERT_EQ(small_value_v1, Get(small_key));
ASSERT_EQ(large_value_v1, Get(large_key));
delete db_;
db_ = nullptr;
ASSERT_OK(DestroyTitanDB(dbname_, options));
delete checkpoint;
// Restore DB name
dbname_ = test::PerThreadDBPath(env_, "checkpoint_test");
}
}
TEST_F(CheckpointTest, SpecifyTitanCheckpointDirectory) {
TitanOptions options = CurrentOptions();
delete db_;
db_ = nullptr;
ASSERT_OK(DestroyTitanDB(dbname_, options));
// Create a database with small value and large value
Status s;
options.create_if_missing = true;
ASSERT_OK(TitanDB::Open(options, dbname_, &db_));
std::string small_key = std::string("small");
std::string large_key = std::string("large");
std::string small_value_v1 = std::string("v1");
std::string small_value_v2 = std::string("v2");
std::string large_value_v1 = GenLargeValue(options.min_blob_size, '1');
std::string large_value_v2 = GenLargeValue(options.min_blob_size, '2');
ASSERT_OK(Put(small_key, small_value_v1));
ASSERT_EQ(small_value_v1, Get(small_key));
ASSERT_OK(Put(large_key, large_value_v1));
ASSERT_EQ(large_value_v1, Get(large_key));
// Take a snapshot using a specific TitanDB directory
Checkpoint* checkpoint;
std::string titandb_snapshot_dir =
test::PerThreadDBPath(env_, "snapshot-titandb");
ASSERT_OK(Checkpoint::Create(db_, &checkpoint));
ASSERT_OK(checkpoint->CreateCheckpoint(snapshot_name_, titandb_snapshot_dir));
ASSERT_OK(Put(small_key, small_value_v2));
ASSERT_EQ(small_value_v2, Get(small_key));
ASSERT_OK(Put(large_key, large_value_v2));
ASSERT_EQ(large_value_v2, Get(large_key));
ASSERT_OK(Flush());
ASSERT_EQ(small_value_v2, Get(small_key));
ASSERT_EQ(large_value_v2, Get(large_key));
// Open snapshot and verify contents while DB is running
TitanDB* snapshotDB;
ReadOptions roptions;
std::string result;
options.create_if_missing = false;
// Must specify the dirname
options.dirname = titandb_snapshot_dir;
ASSERT_OK(TitanDB::Open(options, snapshot_name_, &snapshotDB));
ASSERT_OK(snapshotDB->Get(roptions, small_key, &result));
ASSERT_EQ(small_value_v1, result);
ASSERT_OK(snapshotDB->Get(roptions, large_key, &result));
ASSERT_EQ(large_value_v1, result);
delete snapshotDB;
snapshotDB = nullptr;
delete db_;
db_ = nullptr;
// Destroy original DB
options.dirname = "";
ASSERT_OK(DestroyTitanDB(dbname_, options));
// Open snapshot and verify contents
dbname_ = snapshot_name_;
options.dirname = titandb_snapshot_dir;
ASSERT_OK(TitanDB::Open(TitanOptions(options), dbname_, &db_));
ASSERT_EQ(small_value_v1, Get(small_key));
ASSERT_EQ(large_value_v1, Get(large_key));
delete db_;
db_ = nullptr;
ASSERT_OK(DestroyTitanDB(dbname_, options));
delete checkpoint;
// Restore DB name
dbname_ = test::PerThreadDBPath(env_, "checkpoint_test");
}
TEST_F(CheckpointTest, CheckpointCF) {
TitanOptions options = CurrentOptions();
CreateAndReopenWithCF({"one", "two", "three", "four", "five", "six"},
options);
rocksdb::SyncPoint::GetInstance()->LoadDependency(
{{"CheckpointTest::CheckpointCF:2", "DBImpl::GetLiveFiles:2"},
{"DBImpl::GetLiveFiles:1", "CheckpointTest::CheckpointCF:1"}});
rocksdb::SyncPoint::GetInstance()->EnableProcessing();
ASSERT_OK(Put(0, "Default", "Default"));
ASSERT_OK(Put(1, "one", "one"));
ASSERT_OK(Put(2, "two", "two"));
ASSERT_OK(Put(3, "three", "three"));
ASSERT_OK(Put(4, "four", "four"));
ASSERT_OK(Put(5, "five", "five"));
std::string large_value_1 = GenLargeValue(options.min_blob_size, '1');
ASSERT_OK(Put(6, "six", large_value_1));
TitanDB* snapshotDB;
ReadOptions roptions;
std::string result;
std::vector<ColumnFamilyHandle*> cphandles;
Status s;
// Take a snapshot
rocksdb::port::Thread t([&]() {
Checkpoint* checkpoint;
ASSERT_OK(Checkpoint::Create(db_, &checkpoint));
ASSERT_OK(checkpoint->CreateCheckpoint(snapshot_name_));
delete checkpoint;
});
TEST_SYNC_POINT("CheckpointTest::CheckpointCF:1");
ASSERT_OK(Put(0, "Default", "Default1"));
ASSERT_OK(Put(1, "one", "eleven"));
ASSERT_OK(Put(2, "two", "twelve"));
ASSERT_OK(Put(3, "three", "thirteen"));
ASSERT_OK(Put(4, "four", "fourteen"));
ASSERT_OK(Put(5, "five", "fifteen"));
std::string large_value_2 = GenLargeValue(options.min_blob_size, '2');
ASSERT_OK(Put(6, "six", large_value_2));
TEST_SYNC_POINT("CheckpointTest::CheckpointCF:2");
t.join();
rocksdb::SyncPoint::GetInstance()->DisableProcessing();
ASSERT_OK(Put(1, "one", "twentyone"));
ASSERT_OK(Put(2, "two", "twentytwo"));
ASSERT_OK(Put(3, "three", "twentythree"));
ASSERT_OK(Put(4, "four", "twentyfour"));
ASSERT_OK(Put(5, "five", "twentyfive"));
std::string large_value_3 = GenLargeValue(options.min_blob_size, '3');
ASSERT_OK(Put(6, "six", large_value_3));
ASSERT_OK(Flush());
// Open snapshot and verify contents while DB is running
options.create_if_missing = false;
std::vector<std::string> cfs;
cfs = {
kDefaultColumnFamilyName, "one", "two", "three", "four", "five", "six"};
std::vector<TitanCFDescriptor> column_families;
for (size_t i = 0; i < cfs.size(); ++i) {
column_families.push_back(TitanCFDescriptor(cfs[i], options));
}
ASSERT_OK(TitanDB::Open(options, snapshot_name_, column_families, &cphandles,
&snapshotDB));
ASSERT_OK(snapshotDB->Get(roptions, cphandles[0], "Default", &result));
ASSERT_EQ("Default1", result);
ASSERT_OK(snapshotDB->Get(roptions, cphandles[1], "one", &result));
ASSERT_EQ("eleven", result);
ASSERT_OK(snapshotDB->Get(roptions, cphandles[6], "six", &result));
ASSERT_EQ(large_value_2, result);
for (auto h : cphandles) {
delete h;
}
cphandles.clear();
delete snapshotDB;
snapshotDB = nullptr;
}
TEST_F(CheckpointTest, CheckpointCFNoFlush) {
TitanOptions options = CurrentOptions();
CreateAndReopenWithCF({"one", "two", "three", "four", "five"}, options);
rocksdb::SyncPoint::GetInstance()->EnableProcessing();
ASSERT_OK(Put(0, "Default", "Default"));
ASSERT_OK(Put(1, "one", "one"));
ASSERT_OK(Flush());
ASSERT_OK(Put(2, "two", "two"));
std::string large_value_1 = GenLargeValue(options.min_blob_size, '1');
ASSERT_OK(Put(3, "three", large_value_1));
TitanDB* snapshotDB;
ReadOptions roptions;
std::string result;
std::vector<ColumnFamilyHandle*> cphandles;
Status s;
// Take a snapshot
rocksdb::SyncPoint::GetInstance()->SetCallBack(
"DBImpl::BackgroundCallFlush:start", [&](void* /*arg*/) {
// Flush should never trigger.
FAIL();
});
rocksdb::SyncPoint::GetInstance()->EnableProcessing();
Checkpoint* checkpoint;
ASSERT_OK(Checkpoint::Create(db_, &checkpoint));
ASSERT_OK(checkpoint->CreateCheckpoint(snapshot_name_, "", 1000000));
rocksdb::SyncPoint::GetInstance()->DisableProcessing();
delete checkpoint;
ASSERT_OK(Put(1, "one", "two"));
ASSERT_OK(Flush(1));
ASSERT_OK(Put(2, "two", "twentytwo"));
std::string large_value_2 = GenLargeValue(options.min_blob_size, '2');
ASSERT_OK(Put(3, "three", large_value_2));
Close();
EXPECT_OK(DestroyTitanDB(dbname_, options));
// Open snapshot and verify contents while DB is running
options.create_if_missing = false;
std::vector<std::string> cfs;
cfs = {kDefaultColumnFamilyName, "one", "two", "three", "four", "five"};
std::vector<TitanCFDescriptor> column_families;
for (size_t i = 0; i < cfs.size(); ++i) {
column_families.push_back(TitanCFDescriptor(cfs[i], options));
}
ASSERT_OK(TitanDB::Open(options, snapshot_name_, column_families, &cphandles,
&snapshotDB));
ASSERT_OK(snapshotDB->Get(roptions, cphandles[0], "Default", &result));
ASSERT_EQ("Default", result);
ASSERT_OK(snapshotDB->Get(roptions, cphandles[1], "one", &result));
ASSERT_EQ("one", result);
ASSERT_OK(snapshotDB->Get(roptions, cphandles[2], "two", &result));
ASSERT_EQ("two", result);
ASSERT_OK(snapshotDB->Get(roptions, cphandles[3], "three", &result));
ASSERT_EQ(large_value_1, result);
for (auto h : cphandles) {
delete h;
}
cphandles.clear();
delete snapshotDB;
snapshotDB = nullptr;
}
TEST_F(CheckpointTest, CurrentFileModifiedWhileCheckpointing) {
TitanOptions options = CurrentOptions();
options.max_manifest_file_size = 0; // always rollover manifest for file add
Reopen(options);
rocksdb::SyncPoint::GetInstance()->LoadDependency(
{{"TitanCheckpointImpl::CreateCustomCheckpoint::AfterGetAllTitanFiles",
"CheckpointTest::CurrentFileModifiedWhileCheckpointing:PrePut"},
{"TitanCheckpointImpl::CreateCustomCheckpoint:BeforeTitanDBCheckpoint1",
"VersionSet::LogAndApply:WriteManifest"},
{"VersionSet::LogAndApply:WriteManifestDone",
"TitanCheckpointImpl::CreateCustomCheckpoint::"
"BeforeTitanDBCheckpoint2"}});
rocksdb::SyncPoint::GetInstance()->EnableProcessing();
rocksdb::port::Thread t([&]() {
Checkpoint* checkpoint;
ASSERT_OK(Checkpoint::Create(db_, &checkpoint));
ASSERT_OK(checkpoint->CreateCheckpoint(snapshot_name_));
delete checkpoint;
});
TEST_SYNC_POINT(
"CheckpointTest::CurrentFileModifiedWhileCheckpointing:PrePut");
ASSERT_OK(Put("Default", "Default1"));
ASSERT_OK(Put("Large", GenLargeValue(options.min_blob_size, 'v')));
ASSERT_OK(Flush());
t.join();
rocksdb::SyncPoint::GetInstance()->DisableProcessing();
TitanDB* snapshotDB;
// Successful Open() implies that CURRENT pointed to the manifest in the
// checkpoint.
ASSERT_OK(TitanDB::Open(options, snapshot_name_, &snapshotDB));
delete snapshotDB;
snapshotDB = nullptr;
}
TEST_F(CheckpointTest, CheckpointInvalidDirectoryName) {
for (std::string checkpoint_dir : {"", "/", "////"}) {
Checkpoint* checkpoint;
ASSERT_OK(Checkpoint::Create(db_, &checkpoint));
ASSERT_TRUE(
checkpoint->CreateCheckpoint(checkpoint_dir).IsInvalidArgument());
if (!checkpoint_dir.empty()) {
ASSERT_TRUE(checkpoint->CreateCheckpoint(snapshot_name_, checkpoint_dir)
.IsInvalidArgument());
}
delete checkpoint;
}
}
TEST_F(CheckpointTest, CheckpointWithParallelWrites) {
ASSERT_OK(Put("key1", "val1"));
port::Thread thread([this]() {
ASSERT_OK(Put("key2", "val2"));
ASSERT_OK(Put("key3", "val3"));
ASSERT_OK(Put("key4", "val4"));
ASSERT_OK(Put("key5", GenLargeValue(CurrentOptions().min_blob_size, '5')));
ASSERT_OK(Put("key6", GenLargeValue(CurrentOptions().min_blob_size, '6')));
ASSERT_OK(Put("key7", GenLargeValue(CurrentOptions().min_blob_size, '7')));
});
Checkpoint* checkpoint;
ASSERT_OK(Checkpoint::Create(db_, &checkpoint));
ASSERT_OK(checkpoint->CreateCheckpoint(snapshot_name_));
delete checkpoint;
thread.join();
}
TEST_F(CheckpointTest, CheckpointWithUnsyncedDataDropped) {
TitanOptions options = CurrentOptions();
std::unique_ptr<FaultInjectionTestEnv> env(new FaultInjectionTestEnv(env_));
options.env = env.get();
Reopen(options);
ASSERT_OK(Put("key1", "val1"));
std::string large_value = GenLargeValue(options.min_blob_size, 'v');
ASSERT_OK(Put("key2", large_value));
Checkpoint* checkpoint;
ASSERT_OK(Checkpoint::Create(db_, &checkpoint));
ASSERT_OK(checkpoint->CreateCheckpoint(snapshot_name_));
delete checkpoint;
env->DropUnsyncedFileData();
// Make sure it's openable even though whatever data that wasn't synced got
// dropped.
options.env = env_;
TitanDB* snapshot_db;
ASSERT_OK(TitanDB::Open(options, snapshot_name_, &snapshot_db));
ReadOptions read_opts;
std::string get_result;
ASSERT_OK(snapshot_db->Get(read_opts, "key1", &get_result));
ASSERT_EQ("val1", get_result);
ASSERT_OK(snapshot_db->Get(read_opts, "key2", &get_result));
ASSERT_EQ(large_value, get_result);
delete snapshot_db;
delete db_;
db_ = nullptr;
}
TEST_F(CheckpointTest, GCWhileCheckpointing) {
TitanOptions options = CurrentOptions();
options.max_background_gc = 1;
options.disable_background_gc = true;
options.blob_file_discardable_ratio = 0.01;
CreateAndReopenWithCF({"one", "two", "three"}, options);
std::string large_value_1 = GenLargeValue(options.min_blob_size, '1');
std::string large_value_2 = GenLargeValue(options.min_blob_size, '2');
ASSERT_OK(Put(0, "Default", "Default"));
ASSERT_OK(Put(1, "one", "one"));
ASSERT_OK(Put(2, "two", large_value_1));
ASSERT_OK(Put(3, "three", large_value_2));
ASSERT_OK(Flush());
SyncPoint::GetInstance()->LoadDependency(
{{"TitanCheckpointImpl::CreateCustomCheckpoint::AfterGetAllTitanFiles",
// Drop CF and GC after created base db checkpoint
"CheckpointTest::DeleteBlobWhileCheckpointing::DropCF"},
{"CheckpointTest::DeleteBlobWhileCheckpointing::WaitGC",
"BlobGCJob::Finish::AfterRewriteValidKeyToLSM"},
{"CheckpointTest::DeleteBlobWhileCheckpointing::GCFinish",
"TitanCheckpointImpl::CreateCustomCheckpoint::"
"BeforeTitanDBCheckpoint1"}});
SyncPoint::GetInstance()->EnableProcessing();
rocksdb::port::Thread t([&]() {
Checkpoint* checkpoint;
ASSERT_OK(Checkpoint::Create(db_, &checkpoint));
ASSERT_OK(checkpoint->CreateCheckpoint(snapshot_name_));
delete checkpoint;
});
TEST_SYNC_POINT("CheckpointTest::DeleteBlobWhileCheckpointing::DropCF");
DropColumnFamily(2);
TEST_SYNC_POINT("CheckpointTest::DeleteBlobWhileCheckpointing::WaitGC");
CompactAll();
TEST_SYNC_POINT("CheckpointTest::DeleteBlobWhileCheckpointing::GCFinish");
t.join();
TitanDB* snapshotDB;
ReadOptions roptions;
std::string result;
std::vector<ColumnFamilyHandle*> cphandles;
options.create_if_missing = false;
std::vector<std::string> cfs;
cfs = {kDefaultColumnFamilyName, "one", "two", "three"};
std::vector<TitanCFDescriptor> column_families;
for (size_t i = 0; i < cfs.size(); ++i) {
column_families.push_back(TitanCFDescriptor(cfs[i], options));
}
ASSERT_OK(TitanDB::Open(options, snapshot_name_, column_families, &cphandles,
&snapshotDB));
ASSERT_OK(snapshotDB->Get(roptions, cphandles[0], "Default", &result));
ASSERT_EQ("Default", result);
ASSERT_OK(snapshotDB->Get(roptions, cphandles[1], "one", &result));
ASSERT_EQ("one", result);
ASSERT_OK(snapshotDB->Get(roptions, cphandles[2], "two", &result));
ASSERT_EQ(large_value_1, result);
ASSERT_OK(snapshotDB->Get(roptions, cphandles[3], "three", &result));
ASSERT_EQ(large_value_2, result);
CompactAll();
ASSERT_OK(snapshotDB->Get(roptions, cphandles[2], "two", &result));
ASSERT_EQ(large_value_1, result);
for (auto h : cphandles) {
delete h;
}
cphandles.clear();
delete snapshotDB;
snapshotDB = nullptr;
}
} // namespace titandb
} // namespace rocksdb
int main(int argc, char** argv) {
rocksdb::port::InstallStackTraceHandler();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment