Commit 5bddde53 authored by Huachao Huang's avatar Huachao Huang Committed by A. Hobden

*: reformat code (#208)

* *: reformat code

Just reformat code without other changes.

* travis: add cargo fmt

* travis: update rust version

* travis: use stable version
parent 2a150c32
......@@ -15,8 +15,13 @@ os:
matrix:
include:
- os: osx
rust: stable
- os: osx
rust: stable
- rust: stable
install:
- rustup component add rustfmt-preview
before_script:
- cargo fmt --all -- --write-mode diff
script:
- cargo build
......
......@@ -14,10 +14,10 @@
extern crate cc;
extern crate cmake;
use std::path::PathBuf;
use cc::Build;
use std::{env, str};
use cmake::Config;
use std::path::PathBuf;
use std::{env, str};
fn main() {
let mut build = build_rocksdb();
......@@ -40,7 +40,11 @@ fn link_cpp(build: &mut Build) {
// Don't link to c++ statically on windows.
return;
};
let output = tool.to_command().arg("--print-file-name").arg(stdlib).output().unwrap();
let output = tool.to_command()
.arg("--print-file-name")
.arg(stdlib)
.output()
.unwrap();
if !output.status.success() || output.stdout.is_empty() {
// fallback to dynamically
return;
......@@ -53,8 +57,14 @@ fn link_cpp(build: &mut Build) {
return;
}
// remove lib prefix and .a postfix.
println!("cargo:rustc-link-lib=static={}", &stdlib[3..stdlib.len() - 2]);
println!("cargo:rustc-link-search=native={}", path.parent().unwrap().display());
println!(
"cargo:rustc-link-lib=static={}",
&stdlib[3..stdlib.len() - 2]
);
println!(
"cargo:rustc-link-search=native={}",
path.parent().unwrap().display()
);
build.cpp_link_stdlib(None);
}
......@@ -71,12 +81,18 @@ fn build_rocksdb() -> Build {
if cfg!(feature = "sse") {
cfg.define("FORCE_SSE42", "ON");
}
let dst = cfg.register_dep("Z").define("WITH_ZLIB", "ON")
.register_dep("BZIP2").define("WITH_BZ2", "ON")
.register_dep("LZ4").define("WITH_LZ4", "ON")
.register_dep("ZSTD").define("WITH_ZSTD", "ON")
.register_dep("SNAPPY").define("WITH_SNAPPY", "ON")
.build_target("rocksdb").build();
let dst = cfg.register_dep("Z")
.define("WITH_ZLIB", "ON")
.register_dep("BZIP2")
.define("WITH_BZ2", "ON")
.register_dep("LZ4")
.define("WITH_LZ4", "ON")
.register_dep("ZSTD")
.define("WITH_ZSTD", "ON")
.register_dep("SNAPPY")
.define("WITH_SNAPPY", "ON")
.build_target("rocksdb")
.build();
let build_dir = format!("{}/build", dst.display());
if cfg!(target_os = "windows") {
let profile = match &*env::var("PROFILE").unwrap_or("debug".to_owned()) {
......
......@@ -181,14 +181,14 @@ pub enum DBStatisticsTickerType {
GetHitL2AndUp = 29, // Get() queries served by L2 and up
CompactionKeyDropNewerEntry = 30, /* key was written with a newer value.
* Also includes keys dropped for range del. */
CompactionKeyDropObsolete = 31, // The key is obsolete.
CompactionKeyDropRangeDel = 32, // key was covered by a range tombstone.
CompactionRangeDelDropObsolete = 34, // all keys in range were deleted.
CompactionKeyDropObsolete = 31, // The key is obsolete.
CompactionKeyDropRangeDel = 32, // key was covered by a range tombstone.
CompactionRangeDelDropObsolete = 34, // all keys in range were deleted.
CompactionOptimizedDelDropObsolete = 35, // Deletions obsoleted before bottom level due to file gap optimization.
NumberKeysWritten = 36, // number of keys written to the database via the Put and Write call's
NumberKeysRead = 37, // number of keys read
NumberKeysUpdated = 38,
BytesWritten = 39, // the number of uncompressed bytes read from DB::Put, DB::Delete,
BytesWritten = 39, // the number of uncompressed bytes read from DB::Put, DB::Delete,
// DB::Merge and DB::Write
BytesRead = 40, // the number of uncompressed bytes read from DB::Get()
NumberDbSeek = 41, // the number of calls to seek/next/prev
......@@ -541,7 +541,11 @@ extern "C" {
pub fn crocksdb_options_set_ratelimiter(options: *mut Options, limiter: *mut DBRateLimiter);
pub fn crocksdb_options_set_info_log(options: *mut Options, logger: *mut DBLogger);
pub fn crocksdb_options_get_block_cache_usage(options: *const Options) -> usize;
pub fn crocksdb_options_set_block_cache_capacity(options: *const Options, capacity: usize, err: *mut *mut c_char);
pub fn crocksdb_options_set_block_cache_capacity(
options: *const Options,
capacity: usize,
err: *mut *mut c_char,
);
pub fn crocksdb_options_get_block_cache_capacity(options: *const Options) -> usize;
pub fn crocksdb_ratelimiter_create(
rate_bytes_per_sec: i64,
......@@ -1284,8 +1288,7 @@ extern "C" {
pub fn crocksdb_slicetransform_create(
state: *mut c_void,
destructor: extern "C" fn(*mut c_void),
transform: extern "C" fn(*mut c_void, *const u8, size_t, *mut size_t)
-> *const u8,
transform: extern "C" fn(*mut c_void, *const u8, size_t, *mut size_t) -> *const u8,
in_domain: extern "C" fn(*mut c_void, *const u8, size_t) -> u8,
in_range: extern "C" fn(*mut c_void, *const u8, size_t) -> u8,
name: extern "C" fn(*mut c_void) -> *const c_char,
......@@ -1509,8 +1512,12 @@ extern "C" {
pub fn crocksdb_compactionjobinfo_output_level(info: *const DBCompactionJobInfo) -> c_int;
pub fn crocksdb_compactionjobinfo_input_records(info: *const DBCompactionJobInfo) -> uint64_t;
pub fn crocksdb_compactionjobinfo_output_records(info: *const DBCompactionJobInfo) -> uint64_t;
pub fn crocksdb_compactionjobinfo_total_input_bytes(info: *const DBCompactionJobInfo) -> uint64_t;
pub fn crocksdb_compactionjobinfo_total_output_bytes(info: *const DBCompactionJobInfo) -> uint64_t;
pub fn crocksdb_compactionjobinfo_total_input_bytes(
info: *const DBCompactionJobInfo,
) -> uint64_t;
pub fn crocksdb_compactionjobinfo_total_output_bytes(
info: *const DBCompactionJobInfo,
) -> uint64_t;
pub fn crocksdb_externalfileingestioninfo_cf_name(
info: *const DBIngestionInfo,
......@@ -1626,8 +1633,8 @@ extern "C" {
mod test {
use super::*;
use libc::{self, c_void};
use std::{fs, ptr, slice};
use std::ffi::{CStr, CString};
use std::{fs, ptr, slice};
use tempdir::TempDir;
#[test]
......
......@@ -11,14 +11,13 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use {TableProperties, TablePropertiesCollectionView};
use crocksdb_ffi::{self, DBCompactionJobInfo, DBEventListener, DBFlushJobInfo, DBIngestionInfo,
DBInstance};
use libc::c_void;
use std::{mem, slice, str};
use std::path::Path;
use std::{mem, slice, str};
use crocksdb_ffi::{self, DBCompactionJobInfo, DBEventListener, DBFlushJobInfo, DBIngestionInfo,
DBInstance};
use {TableProperties, TablePropertiesCollectionView};
macro_rules! fetch_str {
($func:ident($($arg:expr),*)) => ({
......
......@@ -20,18 +20,18 @@ extern crate tempdir;
#[macro_use]
pub extern crate librocksdb_sys;
mod compaction_filter;
pub mod comparator;
mod event_listener;
pub mod merge_operator;
mod metadata;
pub mod rocksdb;
pub mod rocksdb_options;
pub mod merge_operator;
pub mod comparator;
mod compaction_filter;
mod slice_transform;
mod table_filter;
mod table_properties;
mod table_properties_collector;
mod table_properties_collector_factory;
mod event_listener;
mod table_filter;
mod metadata;
pub use compaction_filter::CompactionFilter;
pub use event_listener::{CompactionJobInfo, EventListener, FlushJobInfo, IngestionInfo};
......
......@@ -110,9 +110,9 @@ fn custom_merge() {
#[cfg(test)]
mod tests {
use rocksdb::{BlockBasedOptions, ColumnFamilyOptions, DBCompressionType, DBOptions, DB};
use rocksdb::DBCompactionStyle;
use rocksdb::DBRecoveryMode;
use rocksdb::{BlockBasedOptions, ColumnFamilyOptions, DBCompressionType, DBOptions, DB};
#[allow(dead_code)]
fn tuned_for_somebody_elses_disk(
......
......@@ -19,7 +19,6 @@ use std::mem;
use std::ptr;
use std::slice;
pub type MergeFn = fn(&[u8], Option<&[u8]>, &mut MergeOperands) -> Vec<u8>;
pub struct MergeOperatorCallback {
......@@ -96,7 +95,6 @@ pub extern "C" fn partial_merge_callback(
}
}
pub struct MergeOperands {
operands_list: *const *const c_char,
operands_list_len: *const size_t,
......
......@@ -21,15 +21,15 @@ use rocksdb_options::{ColumnFamilyDescriptor, ColumnFamilyOptions, CompactOption
CompactionOptions, DBOptions, EnvOptions, FlushOptions, HistogramData,
IngestExternalFileOptions, ReadOptions, RestoreOptions, UnsafeSnap,
WriteOptions};
use std::{fs, ptr, slice};
use std::collections::BTreeMap;
use std::collections::btree_map::Entry;
use std::collections::BTreeMap;
use std::ffi::{CStr, CString};
use std::fmt::{self, Debug, Formatter};
use std::io;
use std::ops::Deref;
use std::path::{Path, PathBuf};
use std::str::from_utf8;
use std::{fs, ptr, slice};
use table_properties::TablePropertiesCollection;
pub struct CFHandle {
......@@ -410,8 +410,7 @@ impl DB {
const ERR_NULL_DB_ONINIT: &str = "Could not initialize database";
const ERR_NULL_CF_HANDLE: &str = "Received null column family handle from DB";
let cpath = CString::new(path.as_bytes())
.map_err(|_| ERR_CONVERT_PATH.to_owned())?;
let cpath = CString::new(path.as_bytes()).map_err(|_| ERR_CONVERT_PATH.to_owned())?;
fs::create_dir_all(&Path::new(path)).map_err(|e| {
format!(
"Failed to create rocksdb directory: \
......@@ -514,11 +513,9 @@ impl DB {
let cpath = match CString::new(path.as_bytes()) {
Ok(c) => c,
Err(_) => {
return Err(
"Failed to convert path to CString when list \
column families"
.to_owned(),
)
return Err("Failed to convert path to CString when list \
column families"
.to_owned())
}
};
......@@ -631,9 +628,7 @@ impl DB {
let cname = match CString::new(cfd.name.as_bytes()) {
Ok(c) => c,
Err(_) => {
return Err(
"Failed to convert path to CString when opening rocksdb".to_owned(),
)
return Err("Failed to convert path to CString when opening rocksdb".to_owned())
}
};
let cname_ptr = cname.as_ptr();
......@@ -1369,8 +1364,7 @@ impl DB {
) -> Result<TablePropertiesCollection, String> {
unsafe {
let props = ffi_try!(crocksdb_get_properties_of_all_tables_cf(
self.inner,
cf.inner
self.inner, cf.inner
));
Ok(TablePropertiesCollection::from_raw(props))
}
......@@ -1845,9 +1839,10 @@ impl SstFileWriter {
Ok(p) => p,
};
unsafe {
Ok(ffi_try!(
crocksdb_sstfilewriter_open(self.inner, path.as_ptr())
))
Ok(ffi_try!(crocksdb_sstfilewriter_open(
self.inner,
path.as_ptr()
)))
}
}
......
......@@ -22,8 +22,8 @@ use crocksdb_ffi::{self, DBBlockBasedTableOptions, DBCompactOptions, DBCompactio
Options};
use event_listener::{new_event_listener, EventListener};
use libc::{self, c_double, c_int, c_uchar, c_void, size_t};
use merge_operator::{self, full_merge_callback, partial_merge_callback, MergeOperatorCallback};
use merge_operator::MergeFn;
use merge_operator::{self, full_merge_callback, partial_merge_callback, MergeOperatorCallback};
use rocksdb::Env;
use slice_transform::{new_slice_transform, SliceTransform};
use std::ffi::{CStr, CString};
......@@ -119,8 +119,7 @@ impl BlockBasedOptions {
pub fn set_cache_index_and_filter_blocks(&mut self, v: bool) {
unsafe {
crocksdb_ffi::crocksdb_block_based_options_set_cache_index_and_filter_blocks(
self.inner,
v as u8,
self.inner, v as u8,
);
}
}
......@@ -144,16 +143,15 @@ impl BlockBasedOptions {
pub fn set_pin_l0_filter_and_index_blocks_in_cache(&mut self, v: bool) {
unsafe {
crocksdb_ffi::crocksdb_block_based_options_set_pin_l0_filter_and_index_blocks_in_cache(
self.inner,
v as u8);
self.inner, v as u8,
);
}
}
pub fn set_read_amp_bytes_per_bit(&mut self, v: u32) {
unsafe {
crocksdb_ffi::crocksdb_block_based_options_set_read_amp_bytes_per_bit(
self.inner,
v as c_int,
self.inner, v as c_int,
)
}
}
......@@ -362,8 +360,7 @@ impl ReadOptions {
pub fn set_background_purge_on_iterator_cleanup(&mut self, v: bool) {
unsafe {
crocksdb_ffi::crocksdb_readoptions_set_background_purge_on_iterator_cleanup(
self.inner,
v,
self.inner, v,
);
}
}
......@@ -647,8 +644,7 @@ impl DBOptions {
pub fn set_use_direct_io_for_flush_and_compaction(&mut self, v: bool) {
unsafe {
crocksdb_ffi::crocksdb_options_set_use_direct_io_for_flush_and_compaction(
self.inner,
v,
self.inner, v,
);
}
}
......@@ -746,8 +742,7 @@ impl DBOptions {
) -> Option<String> {
unsafe {
let value = crocksdb_ffi::crocksdb_options_statistics_get_histogram_string(
self.inner,
hist_type,
self.inner, hist_type,
);
if value.is_null() {
......@@ -1151,7 +1146,9 @@ impl ColumnFamilyOptions {
pub fn set_max_bytes_for_level_multiplier(&mut self, mul: i32) {
unsafe {
crocksdb_ffi::crocksdb_options_set_max_bytes_for_level_multiplier(self.inner, mul as f64);
crocksdb_ffi::crocksdb_options_set_max_bytes_for_level_multiplier(
self.inner, mul as f64,
);
}
}
......@@ -1176,8 +1173,7 @@ impl ColumnFamilyOptions {
pub fn set_soft_pending_compaction_bytes_limit(&mut self, size: u64) {
unsafe {
crocksdb_ffi::crocksdb_options_set_soft_pending_compaction_bytes_limit(
self.inner,
size,
self.inner, size,
);
}
}
......@@ -1191,8 +1187,7 @@ impl ColumnFamilyOptions {
pub fn set_hard_pending_compaction_bytes_limit(&mut self, size: u64) {
unsafe {
crocksdb_ffi::crocksdb_options_set_hard_pending_compaction_bytes_limit(
self.inner,
size,
self.inner, size,
);
}
}
......@@ -1216,8 +1211,7 @@ impl ColumnFamilyOptions {
pub fn set_min_write_buffer_number_to_merge(&mut self, to_merge: c_int) {
unsafe {
crocksdb_ffi::crocksdb_options_set_min_write_buffer_number_to_merge(
self.inner,
to_merge,
self.inner, to_merge,
);
}
}
......@@ -1342,8 +1336,7 @@ impl ColumnFamilyOptions {
};
let transform = new_slice_transform(c_name, transform)?;
crocksdb_ffi::crocksdb_options_set_memtable_insert_with_hint_prefix_extractor(
self.inner,
transform,
self.inner, transform,
);
Ok(())
}
......@@ -1361,7 +1354,7 @@ impl ColumnFamilyOptions {
pub fn set_block_cache_capacity(&self, capacity: u64) -> Result<(), String> {
unsafe {
ffi_try!(crocksdb_options_set_block_cache_capacity(
ffi_try!(crocksdb_options_set_block_cache_capacity(
self.inner,
capacity as usize
));
......
......@@ -11,7 +11,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use crocksdb_ffi::{self, DBSliceTransform};
use libc::{c_char, c_void, size_t};
use std::ffi::CString;
......@@ -85,7 +84,6 @@ extern "C" fn in_range(transform: *mut c_void, key: *const u8, key_len: size_t)
}
}
pub unsafe fn new_slice_transform(
c_name: CString,
f: Box<SliceTransform>,
......
......@@ -11,15 +11,13 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use crocksdb_ffi::{self, DBTableProperties, DBTablePropertiesCollection,
DBTablePropertiesCollectionIterator, DBTableProperty,
DBUserCollectedProperties, DBUserCollectedPropertiesIterator};
use libc::size_t;
use std::{mem, slice, str};
use std::marker::PhantomData;
use std::ops::{Deref, Index};
use std::{mem, slice, str};
pub struct TablePropertiesCollectionView(DBTablePropertiesCollection);
......
......@@ -4,20 +4,20 @@ extern crate rand;
extern crate rocksdb;
extern crate tempdir;
mod test_iterator;
mod test_multithreaded;
mod test_column_family;
mod test_compaction_filter;
mod test_compact_range;
mod test_rocksdb_options;
mod test_compaction_filter;
mod test_delete_files_in_range;
mod test_delete_range;
mod test_event_listener;
mod test_ingest_external_file;
mod test_slice_transform;
mod test_iterator;
mod test_metadata;
mod test_multithreaded;
mod test_prefix_extractor;
mod test_rate_limiter;
mod test_read_only;
mod test_rocksdb_options;
mod test_slice_transform;
mod test_statistics;
mod test_table_properties;
mod test_event_listener;
mod test_delete_range;
mod test_delete_files_in_range;
mod test_read_only;
mod test_rate_limiter;
mod test_metadata;
......@@ -11,10 +11,9 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use rocksdb::{DBOptions, Range, Writable, DB, CompactOptions, ColumnFamilyOptions};
use rocksdb::{ColumnFamilyOptions, CompactOptions, DBOptions, Range, Writable, DB};
use tempdir::TempDir;
#[test]
fn test_compact_range() {
let path = TempDir::new("_rust_rocksdb_test_compact_range").expect("");
......@@ -53,7 +52,11 @@ fn test_compact_range_change_level() {
opts.create_if_missing(true);
let mut cf_opts = ColumnFamilyOptions::new();
cf_opts.set_level_zero_file_num_compaction_trigger(10);
let db = DB::open_cf(opts, path.path().to_str().unwrap(), vec![("default", cf_opts)]).unwrap();
let db = DB::open_cf(
opts,
path.path().to_str().unwrap(),
vec![("default", cf_opts)],
).unwrap();
let samples = vec![
(b"k1".to_vec(), b"value--------1".to_vec()),
(b"k2".to_vec(), b"value--------2".to_vec()),
......
......@@ -12,8 +12,8 @@
// limitations under the License.
use rocksdb::{ColumnFamilyOptions, CompactionFilter, DBOptions, Writable, DB};
use std::sync::{Arc, RwLock};
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, RwLock};
use tempdir::TempDir;
struct Filter {
......
......@@ -512,7 +512,6 @@ fn test_delete_range_case_6() {
assert_eq!(before, after);
}
#[test]
fn test_delete_range_compact() {
let path = TempDir::new("_rust_rocksdb_test_delete_range_case_6").expect("");
......@@ -958,7 +957,6 @@ fn test_delete_range_prefix_bloom_case_4() {
let db2 = DB::open_cf(opts, path_str, vec![(cf, cf_opts)]).unwrap();
let handle2 = get_cf_handle(&db2, cf).unwrap();
let samples_b = vec![(b"keyd44444", b"value4"), (b"keye55555", b"value5")];
for (k, v) in samples_b {
db2.put_cf(handle2, k, v).unwrap();
......@@ -991,7 +989,6 @@ fn test_delete_range_prefix_bloom_case_4() {
assert_eq!(before, after);
}
#[test]
fn test_delete_range_prefix_bloom_case_5() {
let path = TempDir::new("_rust_rocksdb_test_delete_range_prefix_bloom_case_5").expect("");
......@@ -1418,7 +1415,6 @@ fn test_delete_range_sst_files() {
);
}
#[test]
fn test_delete_range_ingest_file() {
let path = TempDir::new("_rust_rocksdb_test_delete_range_ingest_file").expect("");
......
......@@ -11,11 +11,9 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use rocksdb::*;
use std::sync::Arc;
use std::sync::atomic::*;
use std::sync::Arc;
use tempdir::TempDir;
use test_ingest_external_file::gen_sst;
......@@ -122,8 +120,8 @@ fn test_event_listener_basic() {
drop(db);
assert_eq!(counter.drop_count.load(Ordering::SeqCst), 1);
assert!(
counter.input_records.load(Ordering::SeqCst) >
counter.output_records.load(Ordering::SeqCst)
counter.input_records.load(Ordering::SeqCst)
> counter.output_records.load(Ordering::SeqCst)
);
assert!(
counter.input_bytes.load(Ordering::SeqCst) > counter.output_bytes.load(Ordering::SeqCst)
......
......@@ -11,8 +11,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use rocksdb::*;
use rocksdb::rocksdb::Snapshot;
use rocksdb::*;
use std::ops::Deref;
use std::sync::*;
use std::thread;
......@@ -306,15 +306,7 @@ fn test_total_order_seek() {
cf_opts.set_memtable_prefix_bloom_size_ratio(0.1 as f64);
let keys = vec![
b"k1-1",
b"k1-2",
b"k1-3",
b"k2-1",
b"k2-2",
b"k2-3",
b"k3-1",
b"k3-2",
b"k3-3",
b"k1-1", b"k1-2", b"k1-3", b"k2-1", b"k2-2", b"k2-3", b"k3-1", b"k3-2", b"k3-3",
];
let db = DB::open_cf(
opts,
......
......@@ -28,23 +28,29 @@ pub fn test_multithreaded() {
db.put(b"key", b"value1").unwrap();
let db1 = db.clone();
let j1 = thread::spawn(move || for _ in 1..N {
db1.put(b"key", b"value1").unwrap();
let j1 = thread::spawn(move || {
for _ in 1..N {
db1.put(b"key", b"value1").unwrap();
}
});
let db2 = db.clone();
let j2 = thread::spawn(move || for _ in 1..N {
db2.put(b"key", b"value2").unwrap();
let j2 = thread::spawn(move || {
for _ in 1..N {
db2.put(b"key", b"value2").unwrap();
}
});
let db3 = db.clone();
let j3 = thread::spawn(move || for _ in 1..N {
match db3.get(b"key") {
Ok(Some(v)) => if &v[..] != b"value1" && &v[..] != b"value2" {
assert!(false);
},
_ => {
assert!(false);
let j3 = thread::spawn(move || {
for _ in 1..N {
match db3.get(b"key") {
Ok(Some(v)) => if &v[..] != b"value1" && &v[..] != b"value2" {
assert!(false);
},
_ => {
assert!(false);
}
}
}
});
......
......@@ -32,15 +32,7 @@ impl SliceTransform for FixedPrefixTransform {
fn test_prefix_extractor_compatibility() {
let path = TempDir::new("_rust_rocksdb_prefix_extractor_compatibility").expect("");
let keys = vec![
b"k1-0",
b"k1-1",
b"k1-2",
b"k1-3",
b"k1-4",
b"k1-5",
b"k1-6",
b"k1-7",
b"k1-8",
b"k1-0", b"k1-1", b"k1-2", b"k1-3", b"k1-4", b"k1-5", b"k1-6", b"k1-7", b"k1-8",
];
// create db with no prefix extractor, and insert data
......
......@@ -43,7 +43,9 @@ fn test_rate_limiter() {
fn test_rate_limiter_sendable() {
let rate_limiter = RateLimiter::new(10 * 1024 * 1024, 100 * 1000, 10);
let handle = thread::spawn(move || { rate_limiter.request(1024, 0); });
let handle = thread::spawn(move || {
rate_limiter.request(1024, 0);
});
handle.join().unwrap();
}
......@@ -2,12 +2,12 @@ use rocksdb::{DBOptions, Writable, DB};
use tempdir::TempDir;
macro_rules! check_kv {
( $db:expr, $key:expr, $val:expr ) => {
($db:expr, $key:expr, $val:expr) => {
assert_eq!($db.get($key).unwrap().unwrap(), $val);
};
( $db:expr, $cf:expr, $key:expr, $val:expr ) => {
($db:expr, $cf:expr, $key:expr, $val:expr) => {
assert_eq!($db.get_cf($cf, $key).unwrap().unwrap(), $val);
}
};
}
#[test]
......
......@@ -11,18 +11,17 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use rocksdb::{BlockBasedOptions, ColumnFamilyOptions, CompactOptions, DBOptions,
FifoCompactionOptions, ReadOptions, SeekKey, SliceTransform, Writable, WriteOptions,
DB};
use rocksdb::crocksdb_ffi::{CompactionPriority, DBCompressionType, DBInfoLogLevel as InfoLogLevel,
DBStatisticsHistogramType as HistogramType,
DBStatisticsTickerType as TickerType};
use rocksdb::{BlockBasedOptions, ColumnFamilyOptions, CompactOptions, DBOptions,
FifoCompactionOptions, ReadOptions, SeekKey, SliceTransform, Writable, WriteOptions,
DB};
use std::path::Path;
use std::thread;
use std::time::Duration;
use tempdir::TempDir;
#[test]
fn test_set_num_levels() {
let path = TempDir::new("_rust_rocksdb_test_set_num_levels").expect("");
......
......@@ -98,10 +98,7 @@ impl fmt::Display for ExampleCollector {
write!(
f,
"keys={}, puts={}, merges={}, deletes={}",
self.num_keys,
self.num_puts,
self.num_merges,
self.num_deletes
self.num_keys, self.num_puts, self.num_merges, self.num_deletes
)
}
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment