Commit 1953d1fa authored by Jay's avatar Jay Committed by GitHub

change width to default 100 (#47)

parent 4c1b1655
......@@ -135,75 +135,58 @@ extern "C" {
pub fn rocksdb_options_set_block_based_table_factory(
options: *mut DBOptions,
block_options: *mut DBBlockBasedTableOptions);
pub fn rocksdb_options_increase_parallelism(options: *mut DBOptions,
threads: c_int);
pub fn rocksdb_options_optimize_level_style_compaction(
options: *mut DBOptions, memtable_memory_budget: c_int);
pub fn rocksdb_options_increase_parallelism(options: *mut DBOptions, threads: c_int);
pub fn rocksdb_options_optimize_level_style_compaction(options: *mut DBOptions,
memtable_memory_budget: c_int);
pub fn rocksdb_options_set_compaction_filter(options: *mut DBOptions,
filter: *mut DBCompactionFilter);
pub fn rocksdb_options_set_create_if_missing(options: *mut DBOptions, v: bool);
pub fn rocksdb_options_set_max_open_files(options: *mut DBOptions,
files: c_int);
pub fn rocksdb_options_set_max_open_files(options: *mut DBOptions, files: c_int);
pub fn rocksdb_options_set_use_fsync(options: *mut DBOptions, v: c_int);
pub fn rocksdb_options_set_bytes_per_sync(options: *mut DBOptions, bytes: u64);
pub fn rocksdb_options_set_disable_data_sync(options: *mut DBOptions,
v: c_int);
pub fn rocksdb_options_set_allow_os_buffer(options: *mut DBOptions,
is_allow: bool);
pub fn rocksdb_options_set_disable_data_sync(options: *mut DBOptions, v: c_int);
pub fn rocksdb_options_set_allow_os_buffer(options: *mut DBOptions, is_allow: bool);
pub fn rocksdb_options_optimize_for_point_lookup(options: *mut DBOptions,
block_cache_size_mb: u64);
pub fn rocksdb_options_set_table_cache_numshardbits(options: *mut DBOptions,
bits: c_int);
pub fn rocksdb_options_set_max_write_buffer_number(options: *mut DBOptions,
bufno: c_int);
pub fn rocksdb_options_set_min_write_buffer_number_to_merge(
options: *mut DBOptions, bufno: c_int);
pub fn rocksdb_options_set_level0_file_num_compaction_trigger(
options: *mut DBOptions, no: c_int);
pub fn rocksdb_options_set_level0_slowdown_writes_trigger(
options: *mut DBOptions, no: c_int);
pub fn rocksdb_options_set_level0_stop_writes_trigger(options: *mut DBOptions,
no: c_int);
pub fn rocksdb_options_set_write_buffer_size(options: *mut DBOptions,
bytes: u64);
pub fn rocksdb_options_set_target_file_size_base(options: *mut DBOptions,
bytes: u64);
pub fn rocksdb_options_set_target_file_size_multiplier(options: *mut DBOptions,
mul: c_int);
pub fn rocksdb_options_set_max_bytes_for_level_base(options: *mut DBOptions,
bytes: u64);
pub fn rocksdb_options_set_max_bytes_for_level_multiplier(options: *mut DBOptions, mul: c_int);
pub fn rocksdb_options_set_max_log_file_size(options: *mut DBOptions,
bytes: u64);
pub fn rocksdb_options_set_max_manifest_file_size(options: *mut DBOptions,
bytes: u64);
pub fn rocksdb_options_set_table_cache_numshardbits(options: *mut DBOptions, bits: c_int);
pub fn rocksdb_options_set_max_write_buffer_number(options: *mut DBOptions, bufno: c_int);
pub fn rocksdb_options_set_min_write_buffer_number_to_merge(options: *mut DBOptions,
bufno: c_int);
pub fn rocksdb_options_set_level0_file_num_compaction_trigger(options: *mut DBOptions,
no: c_int);
pub fn rocksdb_options_set_level0_slowdown_writes_trigger(options: *mut DBOptions, no: c_int);
pub fn rocksdb_options_set_level0_stop_writes_trigger(options: *mut DBOptions, no: c_int);
pub fn rocksdb_options_set_write_buffer_size(options: *mut DBOptions, bytes: u64);
pub fn rocksdb_options_set_target_file_size_base(options: *mut DBOptions, bytes: u64);
pub fn rocksdb_options_set_target_file_size_multiplier(options: *mut DBOptions, mul: c_int);
pub fn rocksdb_options_set_max_bytes_for_level_base(options: *mut DBOptions, bytes: u64);
pub fn rocksdb_options_set_max_bytes_for_level_multiplier(options: *mut DBOptions,
mul: c_int);
pub fn rocksdb_options_set_max_log_file_size(options: *mut DBOptions, bytes: u64);
pub fn rocksdb_options_set_max_manifest_file_size(options: *mut DBOptions, bytes: u64);
pub fn rocksdb_options_set_hash_skip_list_rep(options: *mut DBOptions,
bytes: u64,
a1: i32,
a2: i32);
pub fn rocksdb_options_set_compaction_style(options: *mut DBOptions,
cs: DBCompactionStyle);
pub fn rocksdb_options_set_compaction_style(options: *mut DBOptions, cs: DBCompactionStyle);
pub fn rocksdb_options_set_compression(options: *mut DBOptions,
compression_style_no: DBCompressionType);
pub fn rocksdb_options_set_compression_per_level(options: *mut DBOptions,
level_values: *const DBCompressionType,
num_levels: size_t);
pub fn rocksdb_options_set_max_background_compactions(
options: *mut DBOptions, max_bg_compactions: c_int);
level_values: *const DBCompressionType,
num_levels: size_t);
pub fn rocksdb_options_set_max_background_compactions(options: *mut DBOptions,
max_bg_compactions: c_int);
pub fn rocksdb_options_set_max_background_flushes(options: *mut DBOptions,
max_bg_flushes: c_int);
pub fn rocksdb_options_set_filter_deletes(options: *mut DBOptions, v: bool);
pub fn rocksdb_options_set_disable_auto_compactions(options: *mut DBOptions,
v: c_int);
pub fn rocksdb_options_set_disable_auto_compactions(options: *mut DBOptions, v: c_int);
pub fn rocksdb_options_set_report_bg_io_stats(options: *mut DBOptions, v: c_int);
pub fn rocksdb_options_set_wal_recovery_mode(options: *mut DBOptions, mode: DBRecoveryMode);
pub fn rocksdb_options_enable_statistics(options: *mut DBOptions);
pub fn rocksdb_options_set_stats_dump_period_sec(options: *mut DBOptions, v: usize);
pub fn rocksdb_options_set_num_levels(options: *mut DBOptions, v: c_int);
pub fn rocksdb_filterpolicy_create_bloom_full(bits_per_key: c_int)
-> *mut DBFilterPolicy;
pub fn rocksdb_filterpolicy_create_bloom(bits_per_key: c_int)
-> *mut DBFilterPolicy;
pub fn rocksdb_filterpolicy_create_bloom_full(bits_per_key: c_int) -> *mut DBFilterPolicy;
pub fn rocksdb_filterpolicy_create_bloom(bits_per_key: c_int) -> *mut DBFilterPolicy;
pub fn rocksdb_open(options: *mut DBOptions,
path: *const c_char,
err: *mut *mut c_char)
......@@ -211,8 +194,7 @@ extern "C" {
pub fn rocksdb_writeoptions_create() -> *mut DBWriteOptions;
pub fn rocksdb_writeoptions_destroy(writeopts: *mut DBWriteOptions);
pub fn rocksdb_writeoptions_set_sync(writeopts: *mut DBWriteOptions, v: bool);
pub fn rocksdb_writeoptions_disable_WAL(writeopts: *mut DBWriteOptions,
v: c_int);
pub fn rocksdb_writeoptions_disable_WAL(writeopts: *mut DBWriteOptions, v: c_int);
pub fn rocksdb_put(db: *mut DBInstance,
writeopts: *mut DBWriteOptions,
k: *const u8,
......@@ -230,17 +212,14 @@ extern "C" {
err: *mut *mut c_char);
pub fn rocksdb_readoptions_create() -> *mut DBReadOptions;
pub fn rocksdb_readoptions_destroy(readopts: *mut DBReadOptions);
pub fn rocksdb_readoptions_set_verify_checksums(readopts: *mut DBReadOptions,
v: bool);
pub fn rocksdb_readoptions_set_fill_cache(readopts: *mut DBReadOptions,
v: bool);
pub fn rocksdb_readoptions_set_verify_checksums(readopts: *mut DBReadOptions, v: bool);
pub fn rocksdb_readoptions_set_fill_cache(readopts: *mut DBReadOptions, v: bool);
pub fn rocksdb_readoptions_set_snapshot(readopts: *mut DBReadOptions,
snapshot: *const DBSnapshot); //TODO how do I make this a const ref?
snapshot: *const DBSnapshot);
pub fn rocksdb_readoptions_set_iterate_upper_bound(readopts: *mut DBReadOptions,
k: *const u8,
kLen: size_t);
pub fn rocksdb_readoptions_set_read_tier(readopts: *mut DBReadOptions,
tier: c_int);
pub fn rocksdb_readoptions_set_read_tier(readopts: *mut DBReadOptions, tier: c_int);
pub fn rocksdb_readoptions_set_tailing(readopts: *mut DBReadOptions, v: bool);
pub fn rocksdb_get(db: *const DBInstance,
......@@ -328,8 +307,7 @@ extern "C" {
name_fn: extern fn(*mut c_void) -> *const c_char,
) -> *mut DBMergeOperator;
pub fn rocksdb_mergeoperator_destroy(mo: *mut DBMergeOperator);
pub fn rocksdb_options_set_merge_operator(options: *mut DBOptions,
mo: *mut DBMergeOperator);
pub fn rocksdb_options_set_merge_operator(options: *mut DBOptions, mo: *mut DBMergeOperator);
// Iterator
pub fn rocksdb_iter_destroy(iter: *mut DBIterator);
pub fn rocksdb_iter_valid(iter: *const DBIterator) -> bool;
......@@ -347,9 +325,7 @@ extern "C" {
batch: *mut DBWriteBatch,
err: *mut *mut c_char);
pub fn rocksdb_writebatch_create() -> *mut DBWriteBatch;
pub fn rocksdb_writebatch_create_from(rep: *const u8,
size: size_t)
-> *mut DBWriteBatch;
pub fn rocksdb_writebatch_create_from(rep: *const u8, size: size_t) -> *mut DBWriteBatch;
pub fn rocksdb_writebatch_destroy(batch: *mut DBWriteBatch);
pub fn rocksdb_writebatch_clear(batch: *mut DBWriteBatch);
pub fn rocksdb_writebatch_count(batch: *mut DBWriteBatch) -> c_int;
......@@ -375,28 +351,25 @@ extern "C" {
klen: size_t,
val: *const u8,
vlen: size_t);
pub fn rocksdb_writebatch_delete(batch: *mut DBWriteBatch,
key: *const u8,
klen: size_t);
pub fn rocksdb_writebatch_delete(batch: *mut DBWriteBatch, key: *const u8, klen: size_t);
pub fn rocksdb_writebatch_delete_cf(batch: *mut DBWriteBatch,
cf: *mut DBCFHandle,
key: *const u8,
klen: size_t);
pub fn rocksdb_writebatch_iterate(
batch: *mut DBWriteBatch,
state: *mut c_void,
put_fn: extern fn(state: *mut c_void,
k: *const u8, klen: size_t,
v: *const u8, vlen: size_t),
deleted_fn: extern fn(state: *mut c_void,
k: *const u8, klen: size_t));
pub fn rocksdb_writebatch_data(batch: *mut DBWriteBatch,
size: *mut size_t)
-> *const u8;
pub fn rocksdb_writebatch_iterate(batch: *mut DBWriteBatch,
state: *mut c_void,
put_fn: extern "C" fn(state: *mut c_void,
k: *const u8,
klen: size_t,
v: *const u8,
vlen: size_t),
deleted_fn: extern "C" fn(state: *mut c_void,
k: *const u8,
klen: size_t));
pub fn rocksdb_writebatch_data(batch: *mut DBWriteBatch, size: *mut size_t) -> *const u8;
// Comparator
pub fn rocksdb_options_set_comparator(options: *mut DBOptions,
cb: *mut DBComparator);
pub fn rocksdb_options_set_comparator(options: *mut DBOptions, cb: *mut DBComparator);
pub fn rocksdb_comparator_create(state: *mut c_void,
destroy: extern "C" fn(*mut c_void) -> (),
compare: extern "C" fn(arg: *mut c_void,
......@@ -405,8 +378,7 @@ extern "C" {
b: *const c_char,
blen: size_t)
-> c_int,
name_fn: extern "C" fn(*mut c_void)
-> *const c_char)
name_fn: extern "C" fn(*mut c_void) -> *const c_char)
-> *mut DBComparator;
pub fn rocksdb_comparator_destroy(cmp: *mut DBComparator);
......@@ -417,8 +389,8 @@ extern "C" {
column_family_names: *const *const c_char,
column_family_options: *const *const DBOptions,
column_family_handles: *const *mut DBCFHandle,
err: *mut *mut c_char
) -> *mut DBInstance;
err: *mut *mut c_char)
-> *mut DBInstance;
pub fn rocksdb_create_column_family(db: *mut DBInstance,
column_family_options: *const DBOptions,
column_family_name: *const c_char,
......@@ -431,16 +403,14 @@ extern "C" {
pub fn rocksdb_list_column_families(db: *const DBOptions,
path: *const c_char,
lencf: *mut size_t,
err: *mut *mut c_char
) -> *mut *mut c_char;
pub fn rocksdb_list_column_families_destroy(list: *mut *mut c_char,
len: size_t);
err: *mut *mut c_char)
-> *mut *mut c_char;
pub fn rocksdb_list_column_families_destroy(list: *mut *mut c_char, len: size_t);
// Flush options
pub fn rocksdb_flushoptions_create() -> *mut DBFlushOptions;
pub fn rocksdb_flushoptions_destroy(opt: *mut DBFlushOptions);
pub fn rocksdb_flushoptions_set_wait(opt: *mut DBFlushOptions,
whether_wait: bool);
pub fn rocksdb_flushoptions_set_wait(opt: *mut DBFlushOptions, whether_wait: bool);
pub fn rocksdb_flush(db: *mut DBInstance,
options: *const DBFlushOptions,
......@@ -461,8 +431,17 @@ extern "C" {
range_limit_key: *const *const u8,
range_limit_key_len: *const size_t,
sizes: *mut uint64_t);
pub fn rocksdb_compact_range(db: *mut DBInstance, start_key: *const u8, start_key_len: size_t, limit_key: *const u8, limit_key_len: size_t);
pub fn rocksdb_compact_range_cf(db: *mut DBInstance, cf: *mut DBCFHandle, start_key: *const u8, start_key_len: size_t, limit_key: *const u8, limit_key_len: size_t);
pub fn rocksdb_compact_range(db: *mut DBInstance,
start_key: *const u8,
start_key_len: size_t,
limit_key: *const u8,
limit_key_len: size_t);
pub fn rocksdb_compact_range_cf(db: *mut DBInstance,
cf: *mut DBCFHandle,
start_key: *const u8,
start_key_len: size_t,
limit_key: *const u8,
limit_key_len: size_t);
pub fn rocksdb_delete_file_in_range(db: *mut DBInstance,
range_start_key: *const u8,
range_start_key_len: size_t,
......@@ -476,28 +455,37 @@ extern "C" {
range_limit_key: *const u8,
range_limit_key_len: size_t,
err: *mut *mut c_char);
pub fn rocksdb_property_value(db: *mut DBInstance,
propname: *const c_char)
-> *mut c_char;
pub fn rocksdb_property_value(db: *mut DBInstance, propname: *const c_char) -> *mut c_char;
pub fn rocksdb_property_value_cf(db: *mut DBInstance,
cf: *mut DBCFHandle,
propname: *const c_char)
-> *mut c_char;
// Compaction filter
pub fn rocksdb_compactionfilter_create(state: *mut c_void,
destructor: extern fn(*mut c_void),
filter: extern fn(*mut c_void, c_int, *const u8, size_t, *const u8, size_t, *mut *mut u8, *mut size_t, *mut bool) -> bool,
name: extern fn(*mut c_void) -> *const c_char) -> *mut DBCompactionFilter;
pub fn rocksdb_compactionfilter_set_ignore_snapshots(filter: *mut DBCompactionFilter, ignore_snapshot: bool);
destructor: extern "C" fn(*mut c_void),
filter: extern "C" fn(*mut c_void,
c_int,
*const u8,
size_t,
*const u8,
size_t,
*mut *mut u8,
*mut size_t,
*mut bool)
-> bool,
name: extern "C" fn(*mut c_void) -> *const c_char)
-> *mut DBCompactionFilter;
pub fn rocksdb_compactionfilter_set_ignore_snapshots(filter: *mut DBCompactionFilter,
ignore_snapshot: bool);
pub fn rocksdb_compactionfilter_destroy(filter: *mut DBCompactionFilter);
}
#[cfg(test)]
mod test {
use super::*;
use libc::{self, c_void};
use std::ffi::{CStr, CString};
use std::ptr;
use libc::{self, c_void};
use super::*;
use tempdir::TempDir;
#[test]
......@@ -510,10 +498,8 @@ mod test {
rocksdb_options_optimize_level_style_compaction(opts, 0);
rocksdb_options_set_create_if_missing(opts, true);
let rustpath = TempDir::new("_rust_rocksdb_internaltest")
.expect("");
let cpath = CString::new(rustpath.path().to_str().unwrap())
.unwrap();
let rustpath = TempDir::new("_rust_rocksdb_internaltest").expect("");
let cpath = CString::new(rustpath.path().to_str().unwrap()).unwrap();
let cpath_ptr = cpath.as_ptr();
let mut err = ptr::null_mut();
......@@ -525,13 +511,7 @@ mod test {
let key = b"name\x00";
let val = b"spacejam\x00";
rocksdb_put(db,
writeopts,
key.as_ptr(),
4,
val.as_ptr(),
8,
&mut err);
rocksdb_put(db, writeopts, key.as_ptr(), 4, val.as_ptr(), 8, &mut err);
rocksdb_writeoptions_destroy(writeopts);
assert!(err.is_null(), error_message(err));
......@@ -539,12 +519,7 @@ mod test {
assert!(!readopts.is_null());
let mut val_len = 0;
rocksdb_get(db,
readopts,
key.as_ptr(),
4,
&mut val_len,
&mut err);
rocksdb_get(db, readopts, key.as_ptr(), 4, &mut val_len, &mut err);
rocksdb_readoptions_destroy(readopts);
assert!(err.is_null(), error_message(err));
......@@ -574,13 +549,11 @@ mod test {
&mut err);
assert!(err.is_null(), error_message(err));
let propname = CString::new("rocksdb.total-sst-files-size")
.unwrap();
let propname = CString::new("rocksdb.total-sst-files-size").unwrap();
let value = rocksdb_property_value(db, propname.as_ptr());
assert!(!value.is_null());
let sst_size =
CStr::from_ptr(value).to_str().unwrap().parse::<u64>().unwrap();
let sst_size = CStr::from_ptr(value).to_str().unwrap().parse::<u64>().unwrap();
assert!(sst_size > 0);
libc::free(value as *mut c_void);
......
reorder_imports = true
max_width = 80
ideal_width = 80
......@@ -66,20 +66,18 @@ impl Drop for CompactionFilterHandle {
}
}
pub unsafe fn new_compaction_filter
(c_name: CString,
ignore_snapshots: bool,
f: Box<CompactionFilter>)
-> Result<CompactionFilterHandle, String> {
pub unsafe fn new_compaction_filter(c_name: CString,
ignore_snapshots: bool,
f: Box<CompactionFilter>)
-> Result<CompactionFilterHandle, String> {
let proxy = Box::into_raw(Box::new(CompactionFilterProxy {
name: c_name,
filter: f,
}));
let filter =
rocksdb_ffi::rocksdb_compactionfilter_create(proxy as *mut c_void,
destructor,
filter,
name);
let filter = rocksdb_ffi::rocksdb_compactionfilter_create(proxy as *mut c_void,
destructor,
filter,
name);
rocksdb_ffi::rocksdb_compactionfilter_set_ignore_snapshots(filter, ignore_snapshots);
Ok(CompactionFilterHandle { inner: filter })
}
......@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//
use libc::{c_char, c_int, c_void, size_t};
use std::ffi::CString;
use std::mem;
......@@ -29,8 +30,7 @@ pub extern "C" fn destructor_callback(raw_cb: *mut c_void) {
pub extern "C" fn name_callback(raw_cb: *mut c_void) -> *const c_char {
unsafe {
let cb: &mut ComparatorCallback =
&mut *(raw_cb as *mut ComparatorCallback);
let cb: &mut ComparatorCallback = &mut *(raw_cb as *mut ComparatorCallback);
let ptr = cb.name.as_ptr();
ptr as *const c_char
}
......@@ -43,12 +43,9 @@ pub extern "C" fn compare_callback(raw_cb: *mut c_void,
b_len: size_t)
-> c_int {
unsafe {
let cb: &mut ComparatorCallback =
&mut *(raw_cb as *mut ComparatorCallback);
let a: &[u8] = slice::from_raw_parts(a_raw as *const u8,
a_len as usize);
let b: &[u8] = slice::from_raw_parts(b_raw as *const u8,
b_len as usize);
let cb: &mut ComparatorCallback = &mut *(raw_cb as *mut ComparatorCallback);
let a: &[u8] = slice::from_raw_parts(a_raw as *const u8, a_len as usize);
let b: &[u8] = slice::from_raw_parts(b_raw as *const u8, b_len as usize);
(cb.f)(a, b)
}
}
......@@ -27,10 +27,8 @@ pub mod comparator;
mod compaction_filter;
pub use compaction_filter::CompactionFilter;
pub use librocksdb_sys::{DBCompactionStyle, DBCompressionType, DBRecoveryMode,
new_bloom_filter, self as rocksdb_ffi};
pub use librocksdb_sys::{DBCompactionStyle, DBCompressionType, DBRecoveryMode, new_bloom_filter,
self as rocksdb_ffi};
pub use merge_operator::MergeOperands;
pub use rocksdb::{DB, DBIterator, DBVector, Kv, SeekKey, Writable, WriteBatch,
CFHandle, Range};
pub use rocksdb_options::{BlockBasedOptions, Options, ReadOptions,
WriteOptions};
pub use rocksdb::{DB, DBIterator, DBVector, Kv, SeekKey, Writable, WriteBatch, CFHandle, Range};
pub use rocksdb_options::{BlockBasedOptions, Options, ReadOptions, WriteOptions};
......@@ -66,10 +66,7 @@ fn main() {
custom_merge();
}
fn concat_merge(_: &[u8],
existing_val: Option<&[u8]>,
operands: &mut MergeOperands)
-> Vec<u8> {
fn concat_merge(_: &[u8], existing_val: Option<&[u8]>, operands: &mut MergeOperands) -> Vec<u8> {
let mut result: Vec<u8> = Vec::with_capacity(operands.size_hint().0);
match existing_val {
Some(v) => {
......@@ -152,14 +149,13 @@ mod tests {
opts: &mut Options,
blockopts: &mut BlockBasedOptions)
-> DB {
let per_level_compression: [DBCompressionType; 7] =
[DBCompressionType::DBNo,
DBCompressionType::DBNo,
DBCompressionType::DBNo,
DBCompressionType::DBLz4,
DBCompressionType::DBLz4,
DBCompressionType::DBLz4,
DBCompressionType::DBLz4];
let per_level_compression: [DBCompressionType; 7] = [DBCompressionType::DBNo,
DBCompressionType::DBNo,
DBCompressionType::DBNo,
DBCompressionType::DBLz4,
DBCompressionType::DBLz4,
DBCompressionType::DBLz4,
DBCompressionType::DBLz4];
opts.create_if_missing(true);
opts.set_max_open_files(10000);
......
......@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//
use libc::{self, c_char, c_int, c_void, size_t};
use std::ffi::CString;
use std::mem;
......@@ -34,8 +35,7 @@ pub extern "C" fn destructor_callback(raw_cb: *mut c_void) {
pub extern "C" fn name_callback(raw_cb: *mut c_void) -> *const c_char {
unsafe {
let cb: &mut MergeOperatorCallback =
&mut *(raw_cb as *mut MergeOperatorCallback);
let cb: &mut MergeOperatorCallback = &mut *(raw_cb as *mut MergeOperatorCallback);
let ptr = cb.name.as_ptr();
ptr as *const c_char
}
......@@ -53,13 +53,9 @@ pub extern "C" fn full_merge_callback(raw_cb: *mut c_void,
new_value_length: *mut size_t)
-> *const c_char {
unsafe {
let cb: &mut MergeOperatorCallback =
&mut *(raw_cb as *mut MergeOperatorCallback);
let operands = &mut MergeOperands::new(operands_list,
operands_list_len,
num_operands);
let key: &[u8] = slice::from_raw_parts(raw_key as *const u8,
key_len as usize);
let cb: &mut MergeOperatorCallback = &mut *(raw_cb as *mut MergeOperatorCallback);
let operands = &mut MergeOperands::new(operands_list, operands_list_len, num_operands);
let key: &[u8] = slice::from_raw_parts(raw_key as *const u8, key_len as usize);
let oldval: &[u8] = slice::from_raw_parts(existing_value as *const u8,
existing_value_len as usize);
let mut result = (cb.merge_fn)(key, Some(oldval), operands);
......@@ -84,13 +80,9 @@ pub extern "C" fn partial_merge_callback(raw_cb: *mut c_void,
new_value_length: *mut size_t)
-> *const c_char {
unsafe {
let cb: &mut MergeOperatorCallback =
&mut *(raw_cb as *mut MergeOperatorCallback);
let operands = &mut MergeOperands::new(operands_list,
operands_list_len,
num_operands);
let key: &[u8] = slice::from_raw_parts(raw_key as *const u8,
key_len as usize);
let cb: &mut MergeOperatorCallback = &mut *(raw_cb as *mut MergeOperatorCallback);
let operands = &mut MergeOperands::new(operands_list, operands_list_len, num_operands);
let key: &[u8] = slice::from_raw_parts(raw_key as *const u8, key_len as usize);
let mut result = (cb.merge_fn)(key, None, operands);
result.shrink_to_fit();
// TODO(tan) investigate zero-copy techniques to improve performance
......@@ -137,13 +129,12 @@ impl<'a> Iterator for &'a mut MergeOperands {
let base_len = self.operands_list_len as usize;
let spacing = mem::size_of::<*const *const u8>();
let spacing_len = mem::size_of::<*const size_t>();
let len_ptr =
(base_len + (spacing_len * self.cursor)) as *const size_t;
let len_ptr = (base_len + (spacing_len * self.cursor)) as *const size_t;
let len = *len_ptr as usize;
let ptr = base + (spacing * self.cursor);
self.cursor += 1;
Some(mem::transmute(slice::from_raw_parts(*(ptr as *const *const u8)
as *const u8, len)))
Some(mem::transmute(slice::from_raw_parts(*(ptr as *const *const u8) as *const u8,
len)))
}
}
}
......@@ -156,9 +147,9 @@ impl<'a> Iterator for &'a mut MergeOperands {
#[cfg(test)]
mod test {
use super::*;
use rocksdb_options::Options;
use rocksdb::{DB, DBVector, Writable};
use rocksdb_options::Options;
use super::*;
use tempdir::TempDir;
#[allow(unused_variables)]
......
......@@ -17,8 +17,7 @@
use libc::{self, c_int, c_void, size_t};
use rocksdb_ffi::{self, DBWriteBatch, DBCFHandle, DBInstance};
use rocksdb_options::{Options, ReadOptions, UnsafeSnap, WriteOptions,
FlushOptions};
use rocksdb_options::{Options, ReadOptions, UnsafeSnap, WriteOptions, FlushOptions};
use std::collections::BTreeMap;
use std::collections::btree_map::Entry;
use std::ffi::{CStr, CString};
......@@ -84,9 +83,7 @@ impl<'a> From<&'a [u8]> for SeekKey<'a> {
impl<'a> DBIterator<'a> {
pub fn new(db: &'a DB, readopts: ReadOptions) -> DBIterator<'a> {
unsafe {
let iterator =
rocksdb_ffi::rocksdb_create_iterator(db.inner,
readopts.get_inner());
let iterator = rocksdb_ffi::rocksdb_create_iterator(db.inner, readopts.get_inner());
DBIterator {
db: db,
......@@ -99,16 +96,10 @@ impl<'a> DBIterator<'a> {
pub fn seek(&mut self, key: SeekKey) -> bool {
unsafe {
match key {
SeekKey::Start => {
rocksdb_ffi::rocksdb_iter_seek_to_first(self.inner)
}
SeekKey::End => {
rocksdb_ffi::rocksdb_iter_seek_to_last(self.inner)
}
SeekKey::Start => rocksdb_ffi::rocksdb_iter_seek_to_first(self.inner),
SeekKey::End => rocksdb_ffi::rocksdb_iter_seek_to_last(self.inner),
SeekKey::Key(key) => {
rocksdb_ffi::rocksdb_iter_seek(self.inner,
key.as_ptr(),
key.len() as size_t)
rocksdb_ffi::rocksdb_iter_seek(self.inner, key.as_ptr(), key.len() as size_t)
}
}
}
......@@ -134,8 +125,7 @@ impl<'a> DBIterator<'a> {
let mut key_len: size_t = 0;
let key_len_ptr: *mut size_t = &mut key_len;
unsafe {
let key_ptr = rocksdb_ffi::rocksdb_iter_key(self.inner,
key_len_ptr);
let key_ptr = rocksdb_ffi::rocksdb_iter_key(self.inner, key_len_ptr);
slice::from_raw_parts(key_ptr, key_len as usize)
}
}
......@@ -145,8 +135,7 @@ impl<'a> DBIterator<'a> {
let mut val_len: size_t = 0;
let val_len_ptr: *mut size_t = &mut val_len;
unsafe {
let val_ptr = rocksdb_ffi::rocksdb_iter_value(self.inner,
val_len_ptr);
let val_ptr = rocksdb_ffi::rocksdb_iter_value(self.inner, val_len_ptr);
slice::from_raw_parts(val_ptr, val_len as usize)
}
}
......@@ -163,15 +152,11 @@ impl<'a> DBIterator<'a> {
unsafe { rocksdb_ffi::rocksdb_iter_valid(self.inner) }
}
pub fn new_cf(db: &'a DB,
cf_handle: &CFHandle,
readopts: ReadOptions)
-> DBIterator<'a> {
pub fn new_cf(db: &'a DB, cf_handle: &CFHandle, readopts: ReadOptions) -> DBIterator<'a> {
unsafe {
let iterator =
rocksdb_ffi::rocksdb_create_iterator_cf(db.inner,
readopts.get_inner(),
cf_handle.inner);
let iterator = rocksdb_ffi::rocksdb_create_iterator_cf(db.inner,
readopts.get_inner(),
cf_handle.inner);
DBIterator {
db: db,
readopts: readopts,
......@@ -233,10 +218,7 @@ impl<'a> Snapshot<'a> {
self.db.get_opt(key, &readopts)
}
pub fn get_cf(&self,
cf: &CFHandle,
key: &[u8])
-> Result<Option<DBVector>, String> {
pub fn get_cf(&self, cf: &CFHandle, key: &[u8]) -> Result<Option<DBVector>, String> {
let mut readopts = ReadOptions::new();
unsafe {
readopts.set_snapshot(&self.snap);
......@@ -254,17 +236,9 @@ impl<'a> Drop for Snapshot<'a> {
// This is for the DB and write batches to share the same API
pub trait Writable {
fn put(&self, key: &[u8], value: &[u8]) -> Result<(), String>;
fn put_cf(&self,
cf: &CFHandle,
key: &[u8],
value: &[u8])
-> Result<(), String>;
fn put_cf(&self, cf: &CFHandle, key: &[u8], value: &[u8]) -> Result<(), String>;
fn merge(&self, key: &[u8], value: &[u8]) -> Result<(), String>;
fn merge_cf(&self,
cf: &CFHandle,
key: &[u8],
value: &[u8])
-> Result<(), String>;
fn merge_cf(&self, cf: &CFHandle, key: &[u8], value: &[u8]) -> Result<(), String>;
fn delete(&self, key: &[u8]) -> Result<(), String>;
fn delete_cf(&self, cf: &CFHandle, key: &[u8]) -> Result<(), String>;
}
......@@ -306,9 +280,7 @@ impl DB {
let cpath = match CString::new(path.as_bytes()) {
Ok(c) => c,
Err(_) => {
return Err("Failed to convert path to CString when opening \
rocksdb"
.to_owned())
return Err("Failed to convert path to CString when opening rocksdb".to_owned())
}
};
if let Err(e) = fs::create_dir_all(&Path::new(path)) {
......@@ -360,8 +332,7 @@ impl DB {
for handle in &cfhandles {
if handle.is_null() {
return Err("Received null column family handle from DB."
.to_owned());
return Err("Received null column family handle from DB.".to_owned());
}
}
......@@ -397,9 +368,7 @@ impl DB {
Ok(())
}
pub fn list_column_families(opts: &Options,
path: &str)
-> Result<Vec<String>, String> {
pub fn list_column_families(opts: &Options, path: &str) -> Result<Vec<String>, String> {
let cpath = match CString::new(path.as_bytes()) {
Ok(c) => c,
Err(_) => {
......@@ -412,18 +381,14 @@ impl DB {
let mut cfs: Vec<String> = vec![];
unsafe {
let mut lencf: size_t = 0;
let list = ffi_try!(rocksdb_list_column_families(opts.inner,
cpath.as_ptr(),
&mut lencf));
let list =
ffi_try!(rocksdb_list_column_families(opts.inner, cpath.as_ptr(), &mut lencf));
let list_cfs = slice::from_raw_parts(list, lencf);
for &cf_name in list_cfs {
let cf =
match CStr::from_ptr(cf_name).to_owned().into_string() {
Ok(s) => s,
Err(e) => {
return Err(format!("invalid utf8 bytes: {:?}", e))
}
};
let cf = match CStr::from_ptr(cf_name).to_owned().into_string() {
Ok(s) => s,
Err(e) => return Err(format!("invalid utf8 bytes: {:?}", e)),
};
cfs.push(cf);
}
rocksdb_ffi::rocksdb_list_column_families_destroy(list, lencf);
......@@ -436,10 +401,7 @@ impl DB {
&self.path
}
pub fn write_opt(&self,
batch: WriteBatch,
writeopts: &WriteOptions)
-> Result<(), String> {
pub fn write_opt(&self, batch: WriteBatch, writeopts: &WriteOptions) -> Result<(), String> {
unsafe {
ffi_try!(rocksdb_write(self.inner, writeopts.inner, batch.inner));
}
......@@ -456,10 +418,7 @@ impl DB {
self.write_opt(batch, &wo)
}
pub fn get_opt(&self,
key: &[u8],
readopts: &ReadOptions)
-> Result<Option<DBVector>, String> {
pub fn get_opt(&self, key: &[u8], readopts: &ReadOptions) -> Result<Option<DBVector>, String> {
unsafe {
let val_len: size_t = 0;
let val_len_ptr = &val_len as *const size_t;
......@@ -502,30 +461,21 @@ impl DB {
}
}
pub fn get_cf(&self,
cf: &CFHandle,
key: &[u8])
-> Result<Option<DBVector>, String> {
pub fn get_cf(&self, cf: &CFHandle, key: &[u8]) -> Result<Option<DBVector>, String> {
self.get_cf_opt(cf, key, &ReadOptions::new())
}
pub fn create_cf(&mut self,
name: &str,
opts: &Options)
-> Result<&CFHandle, String> {
pub fn create_cf(&mut self, name: &str, opts: &Options) -> Result<&CFHandle, String> {
let cname = match CString::new(name.as_bytes()) {
Ok(c) => c,
Err(_) => {
return Err("Failed to convert path to CString when opening \
rocksdb"
.to_owned())
return Err("Failed to convert path to CString when opening rocksdb".to_owned())
}
};
let cname_ptr = cname.as_ptr();
unsafe {
let cf_handler = ffi_try!(rocksdb_create_column_family(self.inner,
opts.inner,
cname_ptr));
let cf_handler =
ffi_try!(rocksdb_create_column_family(self.inner, opts.inner, cname_ptr));
let handle = CFHandle { inner: cf_handler };
Ok(match self.cfs.entry(name.to_owned()) {
Entry::Occupied(mut e) => {
......@@ -650,10 +600,7 @@ impl DB {
Ok(())
}
}
fn delete_opt(&self,
key: &[u8],
writeopts: &WriteOptions)
-> Result<(), String> {
fn delete_opt(&self, key: &[u8], writeopts: &WriteOptions) -> Result<(), String> {
unsafe {
ffi_try!(rocksdb_delete(self.inner,
writeopts.inner,
......@@ -703,17 +650,11 @@ impl DB {
self.get_approximate_sizes_cfopt(None, ranges)
}
pub fn get_approximate_sizes_cf(&self,
cf: &CFHandle,
ranges: &[Range])
-> Vec<u64> {
pub fn get_approximate_sizes_cf(&self, cf: &CFHandle, ranges: &[Range]) -> Vec<u64> {
self.get_approximate_sizes_cfopt(Some(cf), ranges)
}
fn get_approximate_sizes_cfopt(&self,
cf: Option<&CFHandle>,
ranges: &[Range])
-> Vec<u64> {
fn get_approximate_sizes_cfopt(&self, cf: Option<&CFHandle>, ranges: &[Range]) -> Vec<u64> {
let start_keys: Vec<*const u8> = ranges.iter()
.map(|x| x.start_key.as_ptr())
.collect();
......@@ -727,17 +668,13 @@ impl DB {
.map(|x| x.end_key.len())
.collect();
let mut sizes: Vec<u64> = vec![0; ranges.len()];
let (n,
start_key_ptr,
start_key_len_ptr,
end_key_ptr,
end_key_len_ptr,
size_ptr) = (ranges.len() as i32,
start_keys.as_ptr(),
start_key_lens.as_ptr(),
end_keys.as_ptr(),
end_key_lens.as_ptr(),
sizes.as_mut_ptr());
let (n, start_key_ptr, start_key_len_ptr, end_key_ptr, end_key_len_ptr, size_ptr) =
(ranges.len() as i32,
start_keys.as_ptr(),
start_key_lens.as_ptr(),
end_keys.as_ptr(),
end_key_lens.as_ptr(),
sizes.as_mut_ptr());
match cf {
None => unsafe {
rocksdb_ffi::rocksdb_approximate_sizes(self.inner,
......@@ -762,19 +699,11 @@ impl DB {
sizes
}
pub fn compact_range(&self,
start_key: Option<&[u8]>,
end_key: Option<&[u8]>) {
pub fn compact_range(&self, start_key: Option<&[u8]>, end_key: Option<&[u8]>) {
unsafe {
let (start, s_len) =
start_key.map_or((ptr::null(), 0), |k| (k.as_ptr(), k.len()));
let (end, e_len) =
end_key.map_or((ptr::null(), 0), |k| (k.as_ptr(), k.len()));
rocksdb_ffi::rocksdb_compact_range(self.inner,
start,
s_len,
end,
e_len);
let (start, s_len) = start_key.map_or((ptr::null(), 0), |k| (k.as_ptr(), k.len()));
let (end, e_len) = end_key.map_or((ptr::null(), 0), |k| (k.as_ptr(), k.len()));
rocksdb_ffi::rocksdb_compact_range(self.inner, start, s_len, end, e_len);
}
}
......@@ -783,23 +712,13 @@ impl DB {
start_key: Option<&[u8]>,
end_key: Option<&[u8]>) {
unsafe {
let (start, s_len) =
start_key.map_or((ptr::null(), 0), |k| (k.as_ptr(), k.len()));
let (end, e_len) =
end_key.map_or((ptr::null(), 0), |k| (k.as_ptr(), k.len()));
rocksdb_ffi::rocksdb_compact_range_cf(self.inner,
cf.inner,
start,
s_len,
end,
e_len);
let (start, s_len) = start_key.map_or((ptr::null(), 0), |k| (k.as_ptr(), k.len()));
let (end, e_len) = end_key.map_or((ptr::null(), 0), |k| (k.as_ptr(), k.len()));
rocksdb_ffi::rocksdb_compact_range_cf(self.inner, cf.inner, start, s_len, end, e_len);
}
}
pub fn delete_file_in_range(&self,
start_key: &[u8],
end_key: &[u8])
-> Result<(), String> {
pub fn delete_file_in_range(&self, start_key: &[u8], end_key: &[u8]) -> Result<(), String> {
unsafe {
ffi_try!(rocksdb_delete_file_in_range(self.inner,
start_key.as_ptr(),
......@@ -817,11 +736,11 @@ impl DB {
-> Result<(), String> {
unsafe {
ffi_try!(rocksdb_delete_file_in_range_cf(self.inner,
cf.inner,
start_key.as_ptr(),
start_key.len() as size_t,
end_key.as_ptr(),
end_key.len() as size_t));
cf.inner,
start_key.as_ptr(),
start_key.len() as size_t,
end_key.as_ptr(),
end_key.len() as size_t));
Ok(())
}
}
......@@ -830,10 +749,7 @@ impl DB {
self.get_property_value_cf_opt(None, name)
}
pub fn get_property_value_cf(&self,
cf: &CFHandle,
name: &str)
-> Option<String> {
pub fn get_property_value_cf(&self, cf: &CFHandle, name: &str) -> Option<String> {
self.get_property_value_cf_opt(Some(cf), name)
}
......@@ -843,29 +759,18 @@ impl DB {
self.get_property_int_cf_opt(None, name)
}
pub fn get_property_int_cf(&self,
cf: &CFHandle,
name: &str)
-> Option<u64> {
pub fn get_property_int_cf(&self, cf: &CFHandle, name: &str) -> Option<u64> {
self.get_property_int_cf_opt(Some(cf), name)
}
fn get_property_value_cf_opt(&self,
cf: Option<&CFHandle>,
name: &str)
-> Option<String> {
fn get_property_value_cf_opt(&self, cf: Option<&CFHandle>, name: &str) -> Option<String> {
unsafe {
let prop_name = CString::new(name).unwrap();
let value = match cf {
None => {
rocksdb_ffi::rocksdb_property_value(self.inner,
prop_name.as_ptr())
}
None => rocksdb_ffi::rocksdb_property_value(self.inner, prop_name.as_ptr()),
Some(cf) => {
rocksdb_ffi::rocksdb_property_value_cf(self.inner,
cf.inner,
prop_name.as_ptr())
rocksdb_ffi::rocksdb_property_value_cf(self.inner, cf.inner, prop_name.as_ptr())
}
};
......@@ -880,10 +785,7 @@ impl DB {
}
}
fn get_property_int_cf_opt(&self,
cf: Option<&CFHandle>,
name: &str)
-> Option<u64> {
fn get_property_int_cf_opt(&self, cf: Option<&CFHandle>, name: &str) -> Option<u64> {
// Rocksdb guarantees that the return property int
// value is u64 if exists.
if let Some(value) = self.get_property_value_cf_opt(cf, name) {
......@@ -901,11 +803,7 @@ impl Writable for DB {
self.put_opt(key, value, &WriteOptions::new())
}
fn put_cf(&self,
cf: &CFHandle,
key: &[u8],
value: &[u8])
-> Result<(), String> {
fn put_cf(&self, cf: &CFHandle, key: &[u8], value: &[u8]) -> Result<(), String> {
self.put_cf_opt(cf, key, value, &WriteOptions::new())
}
......@@ -913,11 +811,7 @@ impl Writable for DB {
self.merge_opt(key, value, &WriteOptions::new())
}
fn merge_cf(&self,
cf: &CFHandle,
key: &[u8],
value: &[u8])
-> Result<(), String> {
fn merge_cf(&self, cf: &CFHandle, key: &[u8], value: &[u8]) -> Result<(), String> {
self.merge_cf_opt(cf, key, value, &WriteOptions::new())
}
......@@ -932,9 +826,7 @@ impl Writable for DB {
impl Default for WriteBatch {
fn default() -> WriteBatch {
WriteBatch {
inner: unsafe { rocksdb_ffi::rocksdb_writebatch_create() },
}
WriteBatch { inner: unsafe { rocksdb_ffi::rocksdb_writebatch_create() } }
}
}
......@@ -979,11 +871,7 @@ impl Writable for WriteBatch {
}
}
fn put_cf(&self,
cf: &CFHandle,
key: &[u8],
value: &[u8])
-> Result<(), String> {
fn put_cf(&self, cf: &CFHandle, key: &[u8], value: &[u8]) -> Result<(), String> {
unsafe {
rocksdb_ffi::rocksdb_writebatch_put_cf(self.inner,
cf.inner,
......@@ -1006,11 +894,7 @@ impl Writable for WriteBatch {
}
}
fn merge_cf(&self,
cf: &CFHandle,
key: &[u8],
value: &[u8])
-> Result<(), String> {
fn merge_cf(&self, cf: &CFHandle, key: &[u8], value: &[u8]) -> Result<(), String> {
unsafe {
rocksdb_ffi::rocksdb_writebatch_merge_cf(self.inner,
cf.inner,
......@@ -1024,9 +908,7 @@ impl Writable for WriteBatch {
fn delete(&self, key: &[u8]) -> Result<(), String> {
unsafe {
rocksdb_ffi::rocksdb_writebatch_delete(self.inner,
key.as_ptr(),
key.len() as size_t);
rocksdb_ffi::rocksdb_writebatch_delete(self.inner, key.as_ptr(), key.len() as size_t);
Ok(())
}
}
......@@ -1195,8 +1077,7 @@ mod test {
#[test]
fn list_column_families_test() {
let path = TempDir::new("_rust_rocksdb_list_column_families_test")
.expect("");
let path = TempDir::new("_rust_rocksdb_list_column_families_test").expect("");
let mut cfs = ["default", "cf1", "cf2", "cf3"];
{
let mut cfs_opts = vec![];
......@@ -1207,8 +1088,7 @@ mod test {
let mut opts = Options::new();
opts.create_if_missing(true);
let mut db = DB::open(&opts, path.path().to_str().unwrap())
.unwrap();
let mut db = DB::open(&opts, path.path().to_str().unwrap()).unwrap();
for (&cf, &cf_opts) in cfs.iter().zip(&cfs_ref_opts) {
if cf == "default" {
continue;
......@@ -1217,10 +1097,8 @@ mod test {
}
}
let opts_list_cfs = Options::new();
let mut cfs_vec =
DB::list_column_families(&opts_list_cfs,
path.path().to_str().unwrap())
.unwrap();
let mut cfs_vec = DB::list_column_families(&opts_list_cfs, path.path().to_str().unwrap())
.unwrap();
cfs_vec.sort();
cfs.sort();
assert_eq!(cfs_vec, cfs);
......
......@@ -13,17 +13,14 @@
// limitations under the License.
//
use compaction_filter::{CompactionFilter, new_compaction_filter,
CompactionFilterHandle};
use compaction_filter::{CompactionFilter, new_compaction_filter, CompactionFilterHandle};
use comparator::{self, ComparatorCallback, compare_callback};
use libc::{c_int, size_t};
use merge_operator::{self, MergeOperatorCallback, full_merge_callback,
partial_merge_callback};
use merge_operator::{self, MergeOperatorCallback, full_merge_callback, partial_merge_callback};
use merge_operator::MergeFn;
use rocksdb_ffi::{self, DBOptions, DBWriteOptions, DBBlockBasedTableOptions,
DBReadOptions, DBCompressionType, DBRecoveryMode,
DBSnapshot, DBInstance, DBFlushOptions};
use rocksdb_ffi::{self, DBOptions, DBWriteOptions, DBBlockBasedTableOptions, DBReadOptions,
DBCompressionType, DBRecoveryMode, DBSnapshot, DBInstance, DBFlushOptions};
use std::ffi::CString;
use std::mem;
......@@ -57,8 +54,7 @@ impl BlockBasedOptions {
pub fn set_block_size(&mut self, size: usize) {
unsafe {
rocksdb_ffi::rocksdb_block_based_options_set_block_size(self.inner,
size);
rocksdb_ffi::rocksdb_block_based_options_set_block_size(self.inner, size);
}
}
......@@ -71,9 +67,7 @@ impl BlockBasedOptions {
}
}
pub fn set_bloom_filter(&mut self,
bits_per_key: c_int,
block_based: bool) {
pub fn set_bloom_filter(&mut self, bits_per_key: c_int, block_based: bool) {
unsafe {
let bloom = if block_based {
rocksdb_ffi::rocksdb_filterpolicy_create_bloom(bits_per_key)
......@@ -81,8 +75,7 @@ impl BlockBasedOptions {
rocksdb_ffi::rocksdb_filterpolicy_create_bloom_full(bits_per_key)
};
rocksdb_ffi::rocksdb_block_based_options_set_filter_policy(self.inner,
bloom);
rocksdb_ffi::rocksdb_block_based_options_set_filter_policy(self.inner, bloom);
}
}
......@@ -153,8 +146,7 @@ impl ReadOptions {
}
pub unsafe fn set_snapshot(&mut self, snapshot: &UnsafeSnap) {
rocksdb_ffi::rocksdb_readoptions_set_snapshot(self.inner,
snapshot.inner);
rocksdb_ffi::rocksdb_readoptions_set_snapshot(self.inner, snapshot.inner);
}
pub fn set_iterate_upper_bound(&mut self, key: &[u8]) {
......@@ -162,7 +154,7 @@ impl ReadOptions {
unsafe {
rocksdb_ffi::rocksdb_readoptions_set_iterate_upper_bound(self.inner,
self.upper_bound.as_ptr(),
self.upper_bound.len() as size_t);
self.upper_bound.len());
}
}
......@@ -247,16 +239,14 @@ impl Options {
pub fn increase_parallelism(&mut self, parallelism: i32) {
unsafe {
rocksdb_ffi::rocksdb_options_increase_parallelism(self.inner,
parallelism);
rocksdb_ffi::rocksdb_options_increase_parallelism(self.inner, parallelism);
}
}
pub fn optimize_level_style_compaction(&mut self,
memtable_memory_budget: i32) {
pub fn optimize_level_style_compaction(&mut self, memtable_memory_budget: i32) {
unsafe {
rocksdb_ffi::rocksdb_options_optimize_level_style_compaction(
self.inner, memtable_memory_budget);
rocksdb_ffi::rocksdb_options_optimize_level_style_compaction(self.inner,
memtable_memory_budget);
}
}
......@@ -282,13 +272,9 @@ impl Options {
unsafe {
let c_name = match CString::new(name) {
Ok(s) => s,
Err(e) => {
return Err(format!("failed to convert to cstring: {:?}", e))
}
Err(e) => return Err(format!("failed to convert to cstring: {:?}", e)),
};
self.filter = Some(try!(new_compaction_filter(c_name,
ignore_snapshots,
filter)));
self.filter = Some(try!(new_compaction_filter(c_name, ignore_snapshots, filter)));
rocksdb_ffi::rocksdb_options_set_compaction_filter(self.inner,
self.filter
.as_ref()
......@@ -300,8 +286,7 @@ impl Options {
pub fn create_if_missing(&mut self, create_if_missing: bool) {
unsafe {
rocksdb_ffi::rocksdb_options_set_create_if_missing(
self.inner, create_if_missing);
rocksdb_ffi::rocksdb_options_set_create_if_missing(self.inner, create_if_missing);
}
}
......@@ -311,12 +296,11 @@ impl Options {
}
}
pub fn compression_per_level(&mut self,
level_types: &[DBCompressionType]) {
pub fn compression_per_level(&mut self, level_types: &[DBCompressionType]) {
unsafe {
rocksdb_ffi::rocksdb_options_set_compression_per_level(self.inner,
level_types.as_ptr(),
level_types.len() as size_t)
level_types.as_ptr(),
level_types.len() as size_t)
}
}
......@@ -327,31 +311,27 @@ impl Options {
});
unsafe {
let mo = rocksdb_ffi::rocksdb_mergeoperator_create(
mem::transmute(cb),
merge_operator::destructor_callback,
full_merge_callback,
partial_merge_callback,
None,
merge_operator::name_callback);
let mo = rocksdb_ffi::rocksdb_mergeoperator_create(mem::transmute(cb),
merge_operator::destructor_callback,
full_merge_callback,
partial_merge_callback,
None,
merge_operator::name_callback);
rocksdb_ffi::rocksdb_options_set_merge_operator(self.inner, mo);
}
}
pub fn add_comparator(&mut self,
name: &str,
compare_fn: fn(&[u8], &[u8]) -> i32) {
pub fn add_comparator(&mut self, name: &str, compare_fn: fn(&[u8], &[u8]) -> i32) {
let cb = Box::new(ComparatorCallback {
name: CString::new(name.as_bytes()).unwrap(),
f: compare_fn,
});
unsafe {
let cmp = rocksdb_ffi::rocksdb_comparator_create(
mem::transmute(cb),
comparator::destructor_callback,
compare_callback,
comparator::name_callback);
let cmp = rocksdb_ffi::rocksdb_comparator_create(mem::transmute(cb),
comparator::destructor_callback,
compare_callback,
comparator::name_callback);
rocksdb_ffi::rocksdb_options_set_comparator(self.inner, cmp);
}
}
......@@ -359,8 +339,7 @@ impl Options {
pub fn set_block_cache_size_mb(&mut self, cache_size: u64) {
unsafe {
rocksdb_ffi::rocksdb_options_optimize_for_point_lookup(self.inner,
cache_size);
rocksdb_ffi::rocksdb_options_optimize_for_point_lookup(self.inner, cache_size);
}
}
......@@ -389,47 +368,40 @@ impl Options {
pub fn set_disable_data_sync(&mut self, disable: bool) {
unsafe {
if disable {
rocksdb_ffi::rocksdb_options_set_disable_data_sync(self.inner,
1);
rocksdb_ffi::rocksdb_options_set_disable_data_sync(self.inner, 1);
} else {
rocksdb_ffi::rocksdb_options_set_disable_data_sync(self.inner,
0);
rocksdb_ffi::rocksdb_options_set_disable_data_sync(self.inner, 0);
}
}
}
pub fn allow_os_buffer(&mut self, is_allow: bool) {
unsafe {
rocksdb_ffi::rocksdb_options_set_allow_os_buffer(self.inner,
is_allow);
rocksdb_ffi::rocksdb_options_set_allow_os_buffer(self.inner, is_allow);
}
}
pub fn set_table_cache_num_shard_bits(&mut self, nbits: c_int) {
unsafe {
rocksdb_ffi::rocksdb_options_set_table_cache_numshardbits(self.inner,
nbits);
rocksdb_ffi::rocksdb_options_set_table_cache_numshardbits(self.inner, nbits);
}
}
pub fn set_min_write_buffer_number(&mut self, nbuf: c_int) {
unsafe {
rocksdb_ffi::rocksdb_options_set_min_write_buffer_number_to_merge(
self.inner, nbuf);
rocksdb_ffi::rocksdb_options_set_min_write_buffer_number_to_merge(self.inner, nbuf);
}
}
pub fn set_max_write_buffer_number(&mut self, nbuf: c_int) {
unsafe {
rocksdb_ffi::rocksdb_options_set_max_write_buffer_number(self.inner,
nbuf);
rocksdb_ffi::rocksdb_options_set_max_write_buffer_number(self.inner, nbuf);
}
}
pub fn set_write_buffer_size(&mut self, size: u64) {
unsafe {
rocksdb_ffi::rocksdb_options_set_write_buffer_size(self.inner,
size);
rocksdb_ffi::rocksdb_options_set_write_buffer_size(self.inner, size);
}
}
......@@ -447,65 +419,55 @@ impl Options {
pub fn set_max_manifest_file_size(&mut self, size: u64) {
unsafe {
rocksdb_ffi::rocksdb_options_set_max_manifest_file_size(self.inner,
size);
rocksdb_ffi::rocksdb_options_set_max_manifest_file_size(self.inner, size);
}
}
pub fn set_target_file_size_base(&mut self, size: u64) {
unsafe {
rocksdb_ffi::rocksdb_options_set_target_file_size_base(self.inner,
size);
rocksdb_ffi::rocksdb_options_set_target_file_size_base(self.inner, size);
}
}
pub fn set_min_write_buffer_number_to_merge(&mut self, to_merge: c_int) {
unsafe {
rocksdb_ffi::rocksdb_options_set_min_write_buffer_number_to_merge(
self.inner, to_merge);
rocksdb_ffi::rocksdb_options_set_min_write_buffer_number_to_merge(self.inner, to_merge);
}
}
pub fn set_level_zero_file_num_compaction_trigger(&mut self, n: c_int) {
unsafe {
rocksdb_ffi::rocksdb_options_set_level0_file_num_compaction_trigger(
self.inner, n);
rocksdb_ffi::rocksdb_options_set_level0_file_num_compaction_trigger(self.inner, n);
}
}
pub fn set_level_zero_slowdown_writes_trigger(&mut self, n: c_int) {
unsafe {
rocksdb_ffi::rocksdb_options_set_level0_slowdown_writes_trigger(
self.inner, n);
rocksdb_ffi::rocksdb_options_set_level0_slowdown_writes_trigger(self.inner, n);
}
}
pub fn set_level_zero_stop_writes_trigger(&mut self, n: c_int) {
unsafe {
rocksdb_ffi::rocksdb_options_set_level0_stop_writes_trigger(
self.inner, n);
rocksdb_ffi::rocksdb_options_set_level0_stop_writes_trigger(self.inner, n);
}
}
pub fn set_compaction_style(&mut self,
style: rocksdb_ffi::DBCompactionStyle) {
pub fn set_compaction_style(&mut self, style: rocksdb_ffi::DBCompactionStyle) {
unsafe {
rocksdb_ffi::rocksdb_options_set_compaction_style(self.inner,
style);
rocksdb_ffi::rocksdb_options_set_compaction_style(self.inner, style);
}
}
pub fn set_max_background_compactions(&mut self, n: c_int) {
unsafe {
rocksdb_ffi::rocksdb_options_set_max_background_compactions(
self.inner, n);
rocksdb_ffi::rocksdb_options_set_max_background_compactions(self.inner, n);
}
}
pub fn set_max_background_flushes(&mut self, n: c_int) {
unsafe {
rocksdb_ffi::rocksdb_options_set_max_background_flushes(self.inner,
n);
rocksdb_ffi::rocksdb_options_set_max_background_flushes(self.inner, n);
}
}
......@@ -525,8 +487,7 @@ impl Options {
}
}
pub fn set_block_based_table_factory(&mut self,
factory: &BlockBasedOptions) {
pub fn set_block_based_table_factory(&mut self, factory: &BlockBasedOptions) {
unsafe {
rocksdb_ffi::rocksdb_options_set_block_based_table_factory(self.inner, factory.inner);
}
......@@ -535,19 +496,16 @@ impl Options {
pub fn set_report_bg_io_stats(&mut self, enable: bool) {
unsafe {
if enable {
rocksdb_ffi::rocksdb_options_set_report_bg_io_stats(self.inner,
1);
rocksdb_ffi::rocksdb_options_set_report_bg_io_stats(self.inner, 1);
} else {
rocksdb_ffi::rocksdb_options_set_report_bg_io_stats(self.inner,
0);
rocksdb_ffi::rocksdb_options_set_report_bg_io_stats(self.inner, 0);
}
}
}
pub fn set_wal_recovery_mode(&mut self, mode: DBRecoveryMode) {
unsafe {
rocksdb_ffi::rocksdb_options_set_wal_recovery_mode(self.inner,
mode);
rocksdb_ffi::rocksdb_options_set_wal_recovery_mode(self.inner, mode);
}
}
......@@ -559,8 +517,7 @@ impl Options {
pub fn set_stats_dump_period_sec(&mut self, period: usize) {
unsafe {
rocksdb_ffi::rocksdb_options_set_stats_dump_period_sec(self.inner,
period);
rocksdb_ffi::rocksdb_options_set_stats_dump_period_sec(self.inner, period);
}
}
......@@ -577,21 +534,21 @@ pub struct FlushOptions {
impl FlushOptions {
pub fn new() -> FlushOptions {
unsafe {
FlushOptions {
inner: rocksdb_ffi::rocksdb_flushoptions_create(),
}
}
unsafe { FlushOptions { inner: rocksdb_ffi::rocksdb_flushoptions_create() } }
}
pub fn set_wait(&mut self, wait: bool) {
unsafe {rocksdb_ffi::rocksdb_flushoptions_set_wait(self.inner, wait);}
unsafe {
rocksdb_ffi::rocksdb_flushoptions_set_wait(self.inner, wait);
}
}
}
impl Drop for FlushOptions {
fn drop(&mut self) {
unsafe {rocksdb_ffi::rocksdb_flushoptions_destroy(self.inner);}
unsafe {
rocksdb_ffi::rocksdb_flushoptions_destroy(self.inner);
}
}
}
......
......@@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//
use rocksdb::{DB, MergeOperands, Options, Writable};
use tempdir::TempDir;
......@@ -114,11 +115,7 @@ pub fn test_column_family() {
}
// should b able to drop a cf
{
let mut db = DB::open_cf(&Options::new(),
path_str,
&["cf1"],
&[&Options::new()])
.unwrap();
let mut db = DB::open_cf(&Options::new(), path_str, &["cf1"], &[&Options::new()]).unwrap();
match db.drop_cf("cf1") {
Ok(_) => println!("cf1 successfully dropped."),
Err(e) => panic!("failed to drop column family: {}", e),
......
use tempdir::TempDir;
use rocksdb::{DB, Options, Range, Writable};
use rocksdb::{DB, Options, Range, Writable};use tempdir::TempDir;
#[test]
fn test_compact_range() {
let path = TempDir::new("_rust_rocksdb_test_compact_range").expect("");
let mut opts = Options::new();
opts.create_if_missing(true);
let db = DB::open(&opts, path.path().to_str().unwrap()).unwrap();
let samples = vec![
let path = TempDir::new("_rust_rocksdb_test_compact_range").expect("");
let mut opts = Options::new();
opts.create_if_missing(true);
let db = DB::open(&opts, path.path().to_str().unwrap()).unwrap();
let samples = vec![
(b"k1".to_vec(), b"value--------1".to_vec()),
(b"k2".to_vec(), b"value--------2".to_vec()),
(b"k3".to_vec(), b"value--------3".to_vec()),
(b"k4".to_vec(), b"value--------4".to_vec()),
(b"k5".to_vec(), b"value--------5".to_vec()),
];
for &(ref k, ref v) in &samples {
db.put(k, v).unwrap();
assert_eq!(v.as_slice(), &*db.get(k).unwrap().unwrap());
}
for &(ref k, ref v) in &samples {
db.put(k, v).unwrap();
assert_eq!(v.as_slice(), &*db.get(k).unwrap().unwrap());
}
// flush memtable to sst file
db.flush(true).unwrap();
let old_size = db.get_approximate_sizes(&[Range::new(b"k0", b"k6")])[0];
// flush memtable to sst file
db.flush(true).unwrap();
let old_size = db.get_approximate_sizes(&[Range::new(b"k0", b"k6")])[0];
// delete all and compact whole range
for &(ref k, _) in &samples {
db.delete(k).unwrap()
}
db.compact_range(None, None);
let new_size = db.get_approximate_sizes(&[Range::new(b"k0", b"k6")])[0];
assert!(old_size > new_size);
// delete all and compact whole range
for &(ref k, _) in &samples {
db.delete(k).unwrap()
}
db.compact_range(None, None);
let new_size = db.get_approximate_sizes(&[Range::new(b"k0", b"k6")])[0];
assert!(old_size > new_size);
}
use tempdir::TempDir;
use std::sync::{Arc, RwLock};
use std::sync::atomic::{AtomicBool, Ordering};
use rocksdb::{Writable, DB, CompactionFilter, Options};
use std::sync::{Arc, RwLock};
use std::sync::atomic::{AtomicBool, Ordering};use tempdir::TempDir;
struct Filter {
drop_called: Arc<AtomicBool>,
......@@ -29,10 +29,13 @@ fn test_compaction_filter() {
let drop_called = Arc::new(AtomicBool::new(false));
let filtered_kvs = Arc::new(RwLock::new(vec![]));
// set ignore_snapshots to false
opts.set_compaction_filter("test", false, Box::new(Filter {
drop_called: drop_called.clone(),
filtered_kvs: filtered_kvs.clone(),
})).unwrap();
opts.set_compaction_filter("test",
false,
Box::new(Filter {
drop_called: drop_called.clone(),
filtered_kvs: filtered_kvs.clone(),
}))
.unwrap();
opts.create_if_missing(true);
let db = DB::open(&opts, path.path().to_str().unwrap()).unwrap();
let samples = vec![
......@@ -56,10 +59,13 @@ fn test_compaction_filter() {
drop(db);
// reregister with ignore_snapshots set to true
opts.set_compaction_filter("test", true, Box::new(Filter {
drop_called: drop_called.clone(),
filtered_kvs: filtered_kvs.clone(),
})).unwrap();
opts.set_compaction_filter("test",
true,
Box::new(Filter {
drop_called: drop_called.clone(),
filtered_kvs: filtered_kvs.clone(),
}))
.unwrap();
assert!(drop_called.load(Ordering::Relaxed));
drop_called.store(false, Ordering::Relaxed);
{
......
......@@ -29,12 +29,11 @@ pub fn test_iterator() {
assert!(p.is_ok());
let p = db.put(k3, v3);
assert!(p.is_ok());
let expected = vec![(k1.to_vec(), v1.to_vec()),
(k2.to_vec(), v2.to_vec()),
(k3.to_vec(), v3.to_vec())];
let expected =
vec![(k1.to_vec(), v1.to_vec()), (k2.to_vec(), v2.to_vec()), (k3.to_vec(), v3.to_vec())];
let mut iter = db.iter();
iter.seek(SeekKey::Start);
assert_eq!(iter.collect::<Vec<_>>(), expected);
......@@ -75,9 +74,8 @@ pub fn test_iterator() {
assert_eq!(iter.collect::<Vec<_>>(), expected2);
iter.seek(SeekKey::Key(k2));
let expected = vec![(k2.to_vec(), v2.to_vec()),
(k3.to_vec(), v3.to_vec()),
(k4.to_vec(), v4.to_vec())];
let expected =
vec![(k2.to_vec(), v2.to_vec()), (k3.to_vec(), v3.to_vec()), (k4.to_vec(), v4.to_vec())];
assert_eq!(iter.collect::<Vec<_>>(), expected);
iter.seek(SeekKey::Key(k2));
......
use rocksdb::{DB, Writable};
use std::thread;
use std::sync::Arc;
use std::thread;
use tempdir::TempDir;
const N: usize = 100_000;
......
use tempdir::TempDir;
use rocksdb::{DB, Options};
use rocksdb::{DB, Options};use tempdir::TempDir;
#[test]
fn test_set_num_levels() {
let path = TempDir::new("_rust_rocksdb_test_set_num_levels").expect("");
let mut opts = Options::new();
opts.create_if_missing(true);
opts.set_num_levels(2);
let db = DB::open(&opts, path.path().to_str().unwrap()).unwrap();
drop(db);
let path = TempDir::new("_rust_rocksdb_test_set_num_levels").expect("");
let mut opts = Options::new();
opts.create_if_missing(true);
opts.set_num_levels(2);
let db = DB::open(&opts, path.path().to_str().unwrap()).unwrap();
drop(db);
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment