Unverified Commit d2fe0a96 authored by dorianzheng's avatar dorianzheng Committed by GitHub

add func: load_latest_options (#214)

* add func: load_latest_options

* modify: retrun value

* modify: expose func

* modify: immutable borrow

* modify: remove warning

* modify: add assert

* modify: s/cf_dec/cf_desc, s/cf_decs/cf_descs

* modify: refactor

* modify: refactor

* modify: expose CColumnFamilyDescriptor

* modify: func name

* modify: cargo fmt the whole project

* fix: ci error
parent 2f868cb7
......@@ -21,7 +21,7 @@ matrix:
install:
- rustup component add rustfmt-preview
before_script:
- cargo fmt --all -- --write-mode diff
- cargo fmt --all -- --check
script:
- cargo build
......
......@@ -40,7 +40,8 @@ fn link_cpp(build: &mut Build) {
// Don't link to c++ statically on windows.
return;
};
let output = tool.to_command()
let output = tool
.to_command()
.arg("--print-file-name")
.arg(stdlib)
.output()
......@@ -81,7 +82,8 @@ fn build_rocksdb() -> Build {
if cfg!(feature = "sse") {
cfg.define("FORCE_SSE42", "ON");
}
let dst = cfg.register_dep("Z")
let dst = cfg
.register_dep("Z")
.define("WITH_ZLIB", "ON")
.register_dep("BZIP2")
.define("WITH_BZ2", "ON")
......
......@@ -21,6 +21,7 @@
#include "rocksdb/memtablerep.h"
#include "rocksdb/merge_operator.h"
#include "rocksdb/options.h"
#include "rocksdb/perf_context.h"
#include "rocksdb/rate_limiter.h"
#include "rocksdb/slice_transform.h"
#include "rocksdb/statistics.h"
......@@ -30,8 +31,8 @@
#include "rocksdb/universal_compaction.h"
#include "rocksdb/utilities/backupable_db.h"
#include "rocksdb/utilities/debug.h"
#include "rocksdb/utilities/options_util.h"
#include "rocksdb/write_batch.h"
#include "rocksdb/perf_context.h"
#include "db/column_family.h"
#include "table/sst_file_writer_collectors.h"
......@@ -162,6 +163,9 @@ struct crocksdb_readoptions_t {
};
struct crocksdb_writeoptions_t { WriteOptions rep; };
struct crocksdb_options_t { Options rep; };
struct crocksdb_column_family_descriptor {
ColumnFamilyDescriptor rep;
};
struct crocksdb_compactoptions_t {
CompactRangeOptions rep;
};
......@@ -1944,6 +1948,23 @@ void crocksdb_options_destroy(crocksdb_options_t* options) {
delete options;
}
void crocksdb_column_family_descriptor_destroy(
crocksdb_column_family_descriptor* cf_desc) {
delete cf_desc;
}
const char* crocksdb_name_from_column_family_descriptor(
const crocksdb_column_family_descriptor* cf_desc) {
return cf_desc->rep.name.c_str();
}
crocksdb_options_t* crocksdb_options_from_column_family_descriptor(
const crocksdb_column_family_descriptor* cf_desc) {
crocksdb_options_t* options = new crocksdb_options_t;
*static_cast<ColumnFamilyOptions*>(&options->rep) = cf_desc->rep.options;
return options;
}
void crocksdb_options_increase_parallelism(
crocksdb_options_t* opt, int total_threads) {
opt->rep.IncreaseParallelism(total_threads);
......@@ -2071,6 +2092,11 @@ void crocksdb_options_set_level_compaction_dynamic_level_bytes(
opt->rep.level_compaction_dynamic_level_bytes = v;
}
unsigned char crocksdb_options_get_level_compaction_dynamic_level_bytes(
const crocksdb_options_t* options) {
return options->rep.level_compaction_dynamic_level_bytes;
}
void crocksdb_options_set_max_bytes_for_level_multiplier(crocksdb_options_t* opt,
double n) {
opt->rep.max_bytes_for_level_multiplier = n;
......@@ -2595,6 +2621,30 @@ void crocksdb_options_set_vector_memtable_factory(crocksdb_options_t* opt, uint6
opt->rep.memtable_factory.reset(new VectorRepFactory(reserved_bytes));
}
bool crocksdb_load_latest_options(const char* dbpath, crocksdb_env_t* env,
crocksdb_options_t* db_options,
crocksdb_column_family_descriptor*** cf_descs,
size_t* cf_descs_len,
bool ignore_unknown_options, char** errptr) {
std::vector<ColumnFamilyDescriptor> tmp_cf_descs;
Status s = rocksdb::LoadLatestOptions(dbpath, env->rep, &db_options->rep,
&tmp_cf_descs, ignore_unknown_options);
*errptr = nullptr;
if (s.IsNotFound()) return false;
if (SaveError(errptr, s)) return false;
*cf_descs_len = tmp_cf_descs.size();
(*cf_descs) = (crocksdb_column_family_descriptor**)malloc(
sizeof(crocksdb_column_family_descriptor*) * (*cf_descs_len));
for (std::size_t i = 0; i < *cf_descs_len; ++i) {
(*cf_descs)[i] =
new crocksdb_column_family_descriptor{std::move(tmp_cf_descs[i])};
}
return true;
}
crocksdb_ratelimiter_t* crocksdb_ratelimiter_create(
int64_t rate_bytes_per_sec,
int64_t refill_period_us,
......
......@@ -90,6 +90,8 @@ typedef struct crocksdb_iterator_t crocksdb_iterator_t;
typedef struct crocksdb_logger_t crocksdb_logger_t;
typedef struct crocksdb_mergeoperator_t crocksdb_mergeoperator_t;
typedef struct crocksdb_options_t crocksdb_options_t;
typedef struct crocksdb_column_family_descriptor
crocksdb_column_family_descriptor;
typedef struct crocksdb_compactoptions_t crocksdb_compactoptions_t;
typedef struct crocksdb_block_based_table_options_t
crocksdb_block_based_table_options_t;
......@@ -741,6 +743,14 @@ extern C_ROCKSDB_LIBRARY_API void crocksdb_options_set_cuckoo_table_factory(
extern C_ROCKSDB_LIBRARY_API crocksdb_options_t* crocksdb_options_create();
extern C_ROCKSDB_LIBRARY_API crocksdb_options_t* crocksdb_options_copy(const crocksdb_options_t*);
extern C_ROCKSDB_LIBRARY_API void crocksdb_options_destroy(crocksdb_options_t*);
extern C_ROCKSDB_LIBRARY_API void crocksdb_column_family_descriptor_destroy(
crocksdb_column_family_descriptor* cf_desc);
extern C_ROCKSDB_LIBRARY_API const char*
crocksdb_name_from_column_family_descriptor(
const crocksdb_column_family_descriptor* cf_desc);
extern C_ROCKSDB_LIBRARY_API crocksdb_options_t*
crocksdb_options_from_column_family_descriptor(
const crocksdb_column_family_descriptor* cf_desc);
extern C_ROCKSDB_LIBRARY_API void crocksdb_options_increase_parallelism(
crocksdb_options_t* opt, int total_threads);
extern C_ROCKSDB_LIBRARY_API void crocksdb_options_optimize_for_point_lookup(
......@@ -830,6 +840,9 @@ extern C_ROCKSDB_LIBRARY_API void crocksdb_options_set_optimize_filters_for_hits
extern C_ROCKSDB_LIBRARY_API void
crocksdb_options_set_level_compaction_dynamic_level_bytes(crocksdb_options_t*,
unsigned char);
extern C_ROCKSDB_LIBRARY_API unsigned char
crocksdb_options_get_level_compaction_dynamic_level_bytes(
const crocksdb_options_t* const options);
extern C_ROCKSDB_LIBRARY_API void
crocksdb_options_set_max_bytes_for_level_multiplier(crocksdb_options_t*, double);
extern C_ROCKSDB_LIBRARY_API double
......@@ -841,6 +854,10 @@ extern C_ROCKSDB_LIBRARY_API void crocksdb_options_enable_statistics(
crocksdb_options_t*, unsigned char);
extern C_ROCKSDB_LIBRARY_API void crocksdb_options_reset_statistics(
crocksdb_options_t*);
extern C_ROCKSDB_LIBRARY_API bool crocksdb_load_latest_options(
const char* dbpath, crocksdb_env_t* env, crocksdb_options_t* db_options,
crocksdb_column_family_descriptor*** cf_descs, size_t* cf_descs_len,
bool ignore_unknown_options, char** errptr);
/* returns a pointer to a malloc()-ed, null terminated string */
extern C_ROCKSDB_LIBRARY_API char* crocksdb_options_statistics_get_string(
......
......@@ -22,6 +22,7 @@ use libc::{c_char, c_double, c_int, c_uchar, c_void, size_t, uint32_t, uint64_t,
use std::ffi::CStr;
pub enum Options {}
pub enum ColumnFamilyDescriptor {}
pub enum DBInstance {}
pub enum DBWriteOptions {}
pub enum DBReadOptions {}
......@@ -349,6 +350,13 @@ extern "C" {
pub fn crocksdb_options_create() -> *mut Options;
pub fn crocksdb_options_copy(opts: *const Options) -> *mut Options;
pub fn crocksdb_options_destroy(opts: *mut Options);
pub fn crocksdb_column_family_descriptor_destroy(cf_desc: *mut ColumnFamilyDescriptor);
pub fn crocksdb_name_from_column_family_descriptor(
cf_descs: *const ColumnFamilyDescriptor,
) -> *const c_char;
pub fn crocksdb_options_from_column_family_descriptor(
cf_descs: *const ColumnFamilyDescriptor,
) -> *mut Options;
pub fn crocksdb_cache_create_lru(
capacity: size_t,
shard_bits: c_int,
......@@ -542,6 +550,9 @@ extern "C" {
options: *mut Options,
v: bool,
);
pub fn crocksdb_options_get_level_compaction_dynamic_level_bytes(
options: *const Options,
) -> bool;
pub fn crocksdb_options_set_memtable_insert_with_hint_prefix_extractor(
options: *mut Options,
prefix_extractor: *mut DBSliceTransform,
......@@ -560,6 +571,15 @@ extern "C" {
err: *mut *mut c_char,
);
pub fn crocksdb_options_get_block_cache_capacity(options: *const Options) -> usize;
pub fn crocksdb_load_latest_options(
dbpath: *const c_char,
env: *mut DBEnv,
db_options: *const Options,
cf_descs: *const *mut *mut ColumnFamilyDescriptor,
cf_descs_len: *mut size_t,
ignore_unknown_options: bool,
errptr: *const *mut c_char,
) -> bool;
pub fn crocksdb_ratelimiter_create(
rate_bytes_per_sec: i64,
refill_period_us: i64,
......
......@@ -11,12 +11,12 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use crocksdb_ffi::{
self, DBCompactionJobInfo, DBEventListener, DBFlushJobInfo, DBIngestionInfo, DBInstance,
};
use libc::c_void;
use std::path::Path;
use std::{mem, slice, str};
use crocksdb_ffi::{self, DBCompactionJobInfo, DBEventListener, DBFlushJobInfo, DBIngestionInfo,
DBInstance};
use {TableProperties, TablePropertiesCollectionView};
macro_rules! fetch_str {
......
......@@ -13,12 +13,41 @@
// limitations under the License.
//
extern crate core;
extern crate libc;
#[cfg(test)]
extern crate tempdir;
#[macro_use]
pub extern crate librocksdb_sys;
#[cfg(test)]
extern crate tempdir;
pub use compaction_filter::CompactionFilter;
pub use event_listener::{CompactionJobInfo, EventListener, FlushJobInfo, IngestionInfo};
pub use librocksdb_sys::{
self as crocksdb_ffi, new_bloom_filter, CompactionPriority, DBBottommostLevelCompaction,
DBCompactionStyle, DBCompressionType, DBEntryType, DBInfoLogLevel, DBRecoveryMode,
DBStatisticsHistogramType, DBStatisticsTickerType,
};
pub use merge_operator::MergeOperands;
pub use metadata::{ColumnFamilyMetaData, LevelMetaData, SstFileMetaData};
pub use perf_context::{get_perf_level, set_perf_level, PerfContext, PerfLevel};
pub use rocksdb::{
load_latest_options, set_external_sst_file_global_seq_no, BackupEngine, CFHandle, DBIterator,
DBVector, Env, ExternalSstFileInfo, Kv, Range, SeekKey, SequentialFile, SstFileWriter,
Writable, WriteBatch, DB,
};
pub use rocksdb_options::{
BlockBasedOptions, CColumnFamilyDescriptor, ColumnFamilyOptions, CompactOptions,
CompactionOptions, DBOptions, EnvOptions, FifoCompactionOptions, HistogramData,
IngestExternalFileOptions, RateLimiter, ReadOptions, RestoreOptions, WriteOptions,
};
pub use slice_transform::SliceTransform;
pub use table_filter::TableFilter;
pub use table_properties::{
TableProperties, TablePropertiesCollection, TablePropertiesCollectionView,
UserCollectedProperties,
};
pub use table_properties_collector::TablePropertiesCollector;
pub use table_properties_collector_factory::TablePropertiesCollectorFactory;
mod compaction_filter;
pub mod comparator;
......@@ -33,26 +62,3 @@ mod table_filter;
mod table_properties;
mod table_properties_collector;
mod table_properties_collector_factory;
pub use compaction_filter::CompactionFilter;
pub use event_listener::{CompactionJobInfo, EventListener, FlushJobInfo, IngestionInfo};
pub use librocksdb_sys::{self as crocksdb_ffi, new_bloom_filter, CompactionPriority,
DBBottommostLevelCompaction, DBCompactionStyle, DBCompressionType,
DBEntryType, DBInfoLogLevel, DBRecoveryMode, DBStatisticsHistogramType,
DBStatisticsTickerType};
pub use merge_operator::MergeOperands;
pub use metadata::{ColumnFamilyMetaData, LevelMetaData, SstFileMetaData};
pub use perf_context::{get_perf_level, set_perf_level, PerfContext, PerfLevel};
pub use rocksdb::{set_external_sst_file_global_seq_no, BackupEngine, CFHandle, DBIterator,
DBVector, Env, ExternalSstFileInfo, Kv, Range, SeekKey, SequentialFile,
SstFileWriter, Writable, WriteBatch, DB};
pub use rocksdb_options::{BlockBasedOptions, ColumnFamilyOptions, CompactOptions,
CompactionOptions, DBOptions, EnvOptions, FifoCompactionOptions,
HistogramData, IngestExternalFileOptions, RateLimiter, ReadOptions,
RestoreOptions, WriteOptions};
pub use slice_transform::SliceTransform;
pub use table_filter::TableFilter;
pub use table_properties::{TableProperties, TablePropertiesCollection,
TablePropertiesCollectionView, UserCollectedProperties};
pub use table_properties_collector::TablePropertiesCollector;
pub use table_properties_collector_factory::TablePropertiesCollectorFactory;
......@@ -12,15 +12,17 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use crocksdb_ffi::{self, DBBackupEngine, DBCFHandle, DBCompressionType, DBEnv, DBInstance,
DBPinnableSlice, DBSequentialFile, DBStatisticsHistogramType,
DBStatisticsTickerType, DBWriteBatch};
use crocksdb_ffi::{
self, DBBackupEngine, DBCFHandle, DBCompressionType, DBEnv, DBInstance, DBPinnableSlice,
DBSequentialFile, DBStatisticsHistogramType, DBStatisticsTickerType, DBWriteBatch,
};
use libc::{self, c_char, c_int, c_void, size_t};
use metadata::ColumnFamilyMetaData;
use rocksdb_options::{ColumnFamilyDescriptor, ColumnFamilyOptions, CompactOptions,
use rocksdb_options::{
CColumnFamilyDescriptor, ColumnFamilyDescriptor, ColumnFamilyOptions, CompactOptions,
CompactionOptions, DBOptions, EnvOptions, FlushOptions, HistogramData,
IngestExternalFileOptions, ReadOptions, RestoreOptions, UnsafeSnap,
WriteOptions};
IngestExternalFileOptions, ReadOptions, RestoreOptions, UnsafeSnap, WriteOptions,
};
use std::collections::btree_map::Entry;
use std::collections::BTreeMap;
use std::ffi::{CStr, CString};
......@@ -251,6 +253,7 @@ impl<D: Deref<Target = DB>> Drop for DBIterator<D> {
unsafe impl<D: Deref<Target = DB> + Send> Send for DBIterator<D> {}
unsafe impl<D: Deref<Target = DB> + Send + Sync> Send for Snapshot<D> {}
unsafe impl<D: Deref<Target = DB> + Send + Sync> Sync for Snapshot<D> {}
impl<D: Deref<Target = DB> + Clone> Snapshot<D> {
......@@ -515,7 +518,7 @@ impl DB {
Err(_) => {
return Err("Failed to convert path to CString when list \
column families"
.to_owned())
.to_owned());
}
};
......@@ -628,7 +631,7 @@ impl DB {
let cname = match CString::new(cfd.name.as_bytes()) {
Ok(c) => c,
Err(_) => {
return Err("Failed to convert path to CString when opening rocksdb".to_owned())
return Err("Failed to convert path to CString when opening rocksdb".to_owned());
}
};
let cname_ptr = cname.as_ptr();
......@@ -1317,7 +1320,7 @@ impl DB {
return Err(
"Failed to convert restore_db_path to CString when restoring rocksdb"
.to_owned(),
)
);
}
};
......@@ -1327,7 +1330,7 @@ impl DB {
return Err(
"Failed to convert restore_wal_path to CString when restoring rocksdb"
.to_owned(),
)
);
}
};
......@@ -1769,7 +1772,7 @@ impl BackupEngine {
return Err(
"Failed to convert path to CString when opening rocksdb backup engine"
.to_owned(),
)
);
}
};
......@@ -1980,6 +1983,7 @@ pub struct Env {
}
unsafe impl Send for Env {}
unsafe impl Sync for Env {}
impl Default for Env {
......@@ -2106,6 +2110,42 @@ pub fn set_external_sst_file_global_seq_no(
}
}
pub fn load_latest_options(
dbpath: &str,
env: &Env,
ignore_unknown_options: bool,
) -> Result<Option<(DBOptions, Vec<CColumnFamilyDescriptor>)>, String> {
const ERR_CONVERT_PATH: &str = "Failed to convert path to CString when load latest options";
let dbpath = CString::new(dbpath.as_bytes()).map_err(|_| ERR_CONVERT_PATH.to_owned())?;
let db_options = DBOptions::new();
unsafe {
let mut raw_cf_descs: *mut *mut crocksdb_ffi::ColumnFamilyDescriptor = ptr::null_mut();
let mut cf_descs_len: size_t = 0;
let ok = ffi_try!(crocksdb_load_latest_options(
dbpath.as_ptr(),
env.inner,
db_options.inner,
&mut raw_cf_descs,
&mut cf_descs_len,
ignore_unknown_options
));
if !ok {
return Ok(None);
}
let cf_descs_list = slice::from_raw_parts(raw_cf_descs, cf_descs_len);
let cf_descs = cf_descs_list
.into_iter()
.map(|raw_cf_desc| CColumnFamilyDescriptor::from_raw(*raw_cf_desc))
.collect();
libc::free(raw_cf_descs as *mut c_void);
Ok(Some((db_options, cf_descs)))
}
}
#[cfg(test)]
mod test {
use super::*;
......@@ -2489,8 +2529,8 @@ mod test {
}
db.flush_cf(cf_handle, true).unwrap();
let total_sst_files_size =
db.get_property_int_cf(cf_handle, "rocksdb.total-sst-files-size")
let total_sst_files_size = db
.get_property_int_cf(cf_handle, "rocksdb.total-sst-files-size")
.unwrap();
assert!(total_sst_files_size > 0);
}
......@@ -2590,4 +2630,41 @@ mod test {
let cf_opts = db.get_options_cf(cf);
assert_eq!(cf_opts.get_disable_auto_compactions(), true);
}
#[test]
fn test_load_latest_options() {
let path = TempDir::new("_rust_rocksdb_load_latest_option").expect("");
let dbpath = path.path().to_str().unwrap().clone();
let cf_name: &str = "cf_dynamic_level_bytes";
// test when options not exist
assert!(
load_latest_options(dbpath, &Env::default(), false)
.unwrap()
.is_none()
);
let mut opts = DBOptions::new();
opts.create_if_missing(true);
let mut db = DB::open(opts, dbpath).unwrap();
let mut cf_opts = ColumnFamilyOptions::new();
cf_opts.set_level_compaction_dynamic_level_bytes(true);
db.create_cf((cf_name.clone(), cf_opts)).unwrap();
let cf_handle = db.cf_handle(cf_name.clone()).unwrap();
let cf_opts = db.get_options_cf(cf_handle);
assert!(cf_opts.get_level_compaction_dynamic_level_bytes());
let (_, cf_descs) = load_latest_options(dbpath, &Env::default(), false)
.unwrap()
.unwrap();
for cf_desc in cf_descs {
if cf_desc.name() == cf_name {
assert!(cf_desc.options().get_level_compaction_dynamic_level_bytes());
} else {
assert!(!cf_desc.options().get_level_compaction_dynamic_level_bytes());
}
}
}
}
......@@ -15,11 +15,12 @@
use compaction_filter::{new_compaction_filter, CompactionFilter, CompactionFilterHandle};
use comparator::{self, compare_callback, ComparatorCallback};
use crocksdb_ffi::{self, DBBlockBasedTableOptions, DBBottommostLevelCompaction, DBCompactOptions,
DBCompactionOptions, DBCompressionType, DBFifoCompactionOptions,
DBFlushOptions, DBInfoLogLevel, DBInstance, DBRateLimiter, DBReadOptions,
DBRecoveryMode, DBRestoreOptions, DBSnapshot, DBStatisticsHistogramType,
DBStatisticsTickerType, DBWriteOptions, Options};
use crocksdb_ffi::{
self, DBBlockBasedTableOptions, DBBottommostLevelCompaction, DBCompactOptions,
DBCompactionOptions, DBCompressionType, DBFifoCompactionOptions, DBFlushOptions,
DBInfoLogLevel, DBInstance, DBRateLimiter, DBReadOptions, DBRecoveryMode, DBRestoreOptions,
DBSnapshot, DBStatisticsHistogramType, DBStatisticsTickerType, DBWriteOptions, Options,
};
use event_listener::{new_event_listener, EventListener};
use libc::{self, c_double, c_int, c_uchar, c_void, size_t};
use merge_operator::MergeFn;
......@@ -31,8 +32,9 @@ use std::mem;
use std::path::Path;
use std::sync::Arc;
use table_filter::{destroy_table_filter, table_filter, TableFilter};
use table_properties_collector_factory::{new_table_properties_collector_factory,
TablePropertiesCollectorFactory};
use table_properties_collector_factory::{
new_table_properties_collector_factory, TablePropertiesCollectorFactory,
};
#[derive(Default, Debug)]
pub struct HistogramData {
......@@ -1176,6 +1178,12 @@ impl ColumnFamilyOptions {
}
}
pub fn get_level_compaction_dynamic_level_bytes(&self) -> bool {
unsafe {
crocksdb_ffi::crocksdb_options_get_level_compaction_dynamic_level_bytes(self.inner)
}
}
pub fn set_soft_pending_compaction_bytes_limit(&mut self, size: u64) {
unsafe {
crocksdb_ffi::crocksdb_options_set_soft_pending_compaction_bytes_limit(
......@@ -1425,6 +1433,45 @@ impl<'a> From<(&'a str, ColumnFamilyOptions)> for ColumnFamilyDescriptor<'a> {
}
}
pub struct CColumnFamilyDescriptor {
inner: *mut crocksdb_ffi::ColumnFamilyDescriptor,
}
impl CColumnFamilyDescriptor {
pub unsafe fn from_raw(
inner: *mut crocksdb_ffi::ColumnFamilyDescriptor,
) -> CColumnFamilyDescriptor {
assert!(
!inner.is_null(),
"could not new rocksdb column_family_descriptor with null inner"
);
CColumnFamilyDescriptor { inner }
}
pub fn name<'a>(&'a self) -> &'a str {
unsafe {
let raw_cf_name = crocksdb_ffi::crocksdb_name_from_column_family_descriptor(self.inner);
CStr::from_ptr(raw_cf_name).to_str().unwrap()
}
}
pub fn options(&self) -> ColumnFamilyOptions {
unsafe {
let raw_cf_options =
crocksdb_ffi::crocksdb_options_from_column_family_descriptor(self.inner);
ColumnFamilyOptions::from_raw(raw_cf_options)
}
}
}
impl Drop for CColumnFamilyDescriptor {
fn drop(&mut self) {
unsafe {
crocksdb_ffi::crocksdb_column_family_descriptor_destroy(self.inner);
}
}
}
pub struct FlushOptions {
pub inner: *mut DBFlushOptions,
}
......
......@@ -11,9 +11,10 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use crocksdb_ffi::{self, DBTableProperties, DBTablePropertiesCollection,
DBTablePropertiesCollectionIterator, DBTableProperty,
DBUserCollectedProperties, DBUserCollectedPropertiesIterator};
use crocksdb_ffi::{
self, DBTableProperties, DBTablePropertiesCollection, DBTablePropertiesCollectionIterator,
DBTableProperty, DBUserCollectedProperties, DBUserCollectedPropertiesIterator,
};
use libc::size_t;
use std::marker::PhantomData;
use std::ops::{Deref, Index};
......
......@@ -11,8 +11,10 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use rocksdb::{ColumnFamilyOptions, CompactOptions, DBBottommostLevelCompaction, DBOptions, Range,
Writable, DB};
use rocksdb::{
ColumnFamilyOptions, CompactOptions, DBBottommostLevelCompaction, DBOptions, Range, Writable,
DB,
};
use tempdir::TempDir;
#[test]
......@@ -99,7 +101,8 @@ fn test_compact_range_bottommost_level_compaction() {
compact_opts.set_target_level(bottommost_level);
db.compact_range_cf_opt(cf_handle, &compact_opts, None, None);
let bottommost_files = db.get_column_family_meta_data(cf_handle)
let bottommost_files = db
.get_column_family_meta_data(cf_handle)
.get_levels()
.last()
.unwrap()
......@@ -110,7 +113,8 @@ fn test_compact_range_bottommost_level_compaction() {
// Skip bottommost level compaction
compact_opts.set_bottommost_level_compaction(DBBottommostLevelCompaction::Skip);
db.compact_range_cf_opt(cf_handle, &compact_opts, None, None);
let bottommost_files = db.get_column_family_meta_data(cf_handle)
let bottommost_files = db
.get_column_family_meta_data(cf_handle)
.get_levels()
.last()
.unwrap()
......@@ -120,7 +124,8 @@ fn test_compact_range_bottommost_level_compaction() {
// Force bottommost level compaction
compact_opts.set_bottommost_level_compaction(DBBottommostLevelCompaction::Force);
db.compact_range_cf_opt(cf_handle, &compact_opts, None, None);
let bottommost_files = db.get_column_family_meta_data(cf_handle)
let bottommost_files = db
.get_column_family_meta_data(cf_handle)
.get_levels()
.last()
.unwrap()
......
......@@ -404,7 +404,8 @@ fn test_mem_sst_file_writer() {
assert!(!mem_sst_path.exists());
let mut buf = Vec::new();
let mut sst = env.new_sequential_file(mem_sst_str, EnvOptions::new())
let mut sst = env
.new_sequential_file(mem_sst_str, EnvOptions::new())
.unwrap();
sst.read_to_end(&mut buf).unwrap();
......
......@@ -11,8 +11,9 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use rocksdb::{CFHandle, ColumnFamilyOptions, CompactionOptions, DBCompressionType, DBOptions,
Writable, DB};
use rocksdb::{
CFHandle, ColumnFamilyOptions, CompactionOptions, DBCompressionType, DBOptions, Writable, DB,
};
use tempdir::TempDir;
#[test]
......
......@@ -11,12 +11,14 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use rocksdb::crocksdb_ffi::{CompactionPriority, DBCompressionType, DBInfoLogLevel as InfoLogLevel,
DBStatisticsHistogramType as HistogramType,
DBStatisticsTickerType as TickerType};
use rocksdb::{BlockBasedOptions, ColumnFamilyOptions, CompactOptions, DBOptions,
FifoCompactionOptions, ReadOptions, SeekKey, SliceTransform, Writable, WriteOptions,
DB};
use rocksdb::crocksdb_ffi::{
CompactionPriority, DBCompressionType, DBInfoLogLevel as InfoLogLevel,
DBStatisticsHistogramType as HistogramType, DBStatisticsTickerType as TickerType,
};
use rocksdb::{
BlockBasedOptions, ColumnFamilyOptions, CompactOptions, DBOptions, FifoCompactionOptions,
ReadOptions, SeekKey, SliceTransform, Writable, WriteOptions, DB,
};
use std::path::Path;
use std::thread;
use std::time::Duration;
......
......@@ -11,8 +11,9 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use rocksdb::{BlockBasedOptions, ColumnFamilyOptions, DBOptions, SeekKey, SliceTransform,
Writable, DB};
use rocksdb::{
BlockBasedOptions, ColumnFamilyOptions, DBOptions, SeekKey, SliceTransform, Writable, DB,
};
use tempdir::TempDir;
struct FixedPostfixTransform {
......
......@@ -44,11 +44,13 @@ fn test_db_statistics() {
.is_some()
);
let get_micros = db.get_statistics_histogram(HistogramType::GetMicros)
let get_micros = db
.get_statistics_histogram(HistogramType::GetMicros)
.unwrap();
assert!(get_micros.max > 0.0);
db.reset_statistics();
let get_micros = db.get_statistics_histogram(HistogramType::GetMicros)
let get_micros = db
.get_statistics_histogram(HistogramType::GetMicros)
.unwrap();
assert_eq!(get_micros.max, 0.0);
}
......
......@@ -12,9 +12,11 @@
// limitations under the License.
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
use rocksdb::{ColumnFamilyOptions, DBEntryType, DBOptions, Range, ReadOptions, SeekKey,
TableFilter, TableProperties, TablePropertiesCollection, TablePropertiesCollector,
TablePropertiesCollectorFactory, UserCollectedProperties, Writable, DB};
use rocksdb::{
ColumnFamilyOptions, DBEntryType, DBOptions, Range, ReadOptions, SeekKey, TableFilter,
TableProperties, TablePropertiesCollection, TablePropertiesCollector,
TablePropertiesCollectorFactory, UserCollectedProperties, Writable, DB,
};
use std::collections::HashMap;
use std::fmt;
use tempdir::TempDir;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment