Commit dab2ef77 authored by follitude's avatar follitude Committed by siddontang

*: format code (#121)

parent 4fd63913
# Complete list of style options can be found at:
# http://clang.llvm.org/docs/ClangFormatStyleOptions.html
---
BasedOnStyle: Google
...
......@@ -12,13 +12,8 @@ cache:
os:
- linux
# - osx
before_script:
- make -f travis-build/Makefile prepare-rustfmt
script:
- cargo fmt -- --write-mode diff || (echo please make format and run tests before creating a pr!; exit 1)
- cargo build --features static-link
- cargo test --all --features static-link
......@@ -26,4 +21,3 @@ env:
global:
- RUST_TEST_THREADS=1
- LD_LIBRARY_PATH: "/usr/local/lib"
- RUSTFMT_VERSION=v0.6.0
......@@ -8,14 +8,13 @@ machine:
CPPFLAGS: "-I$HOME/.local/include"
CXXFLAGS: "-I$HOME/.local/include"
PKG_CONFIG_PATH: "$PKG_CONFIG_PATH:$HOME/.local/lib/pkgconfig"
RUSTC_DATE: "2017-03-28"
RUSTC_DATE: "2017-08-09"
LOCAL_PREFIX: "$HOME/.local"
# used by cargo
LIBRARY_PATH: "$LIBRARY_PATH:$HOME/.local/lib"
RUST_TEST_THREADS: 1
RUST_BACKTRACE: 1
RUSTFLAGS: "-Dwarnings"
RUSTFMT_VERSION: "v0.6.0"
pre:
- |
sudo add-apt-repository ppa:ubuntu-toolchain-r/test -y;
......
extern crate gcc;
use gcc::Build;
use std::{env, fs, str};
use std::path::PathBuf;
use std::process::Command;
use gcc::Config;
macro_rules! t {
($e:expr) => (match $e {
Ok(n) => n,
......@@ -26,8 +25,8 @@ fn main() {
println!("cargo:rustc-link-lib=static=crocksdb");
}
fn build_rocksdb() -> Config {
let mut cfg = Config::new();
fn build_rocksdb() -> Build {
let mut cfg = Build::new();
if !cfg!(feature = "static-link") {
if cfg!(target_os = "windows") {
......@@ -89,10 +88,12 @@ fn build_rocksdb() -> Config {
}
if let Err(e) = fs::rename(src.as_path(), dst.as_path()) {
panic!("failed to move {} to {}: {:?}",
panic!(
"failed to move {} to {}: {:?}",
src.display(),
dst.display(),
e);
e
);
}
}
......@@ -124,23 +125,29 @@ fn build_rocksdb() -> Config {
return cfg;
}
let output =
Command::new(p.as_path()).args(&["find_library", std_lib_name]).output().unwrap();
let output = Command::new(p.as_path())
.args(&["find_library", std_lib_name])
.output()
.unwrap();
if output.status.success() && !output.stdout.is_empty() {
if let Ok(path_str) = str::from_utf8(&output.stdout) {
let path = PathBuf::from(path_str);
if path.is_absolute() {
println!("cargo:rustc-link-lib=static=stdc++");
println!("cargo:rustc-link-search=native={}",
path.parent().unwrap().display());
println!(
"cargo:rustc-link-search=native={}",
path.parent().unwrap().display()
);
cfg.cpp_link_stdlib(None);
return cfg;
}
}
}
println!("failed to detect {}: {:?}, fallback to dynamic",
println!(
"failed to detect {}: {:?}, fallback to dynamic",
std_lib_name,
output);
output
);
cfg
}
......
......@@ -628,7 +628,8 @@ crocksdb_compactionjobinfo_table_properties(
extern C_ROCKSDB_LIBRARY_API uint64_t
crocksdb_compactionjobinfo_elapsed_micros(const crocksdb_compactionjobinfo_t*);
extern C_ROCKSDB_LIBRARY_API uint64_t
crocksdb_compactionjobinfo_num_corrupt_keys(const crocksdb_compactionjobinfo_t*);
crocksdb_compactionjobinfo_num_corrupt_keys(
const crocksdb_compactionjobinfo_t*);
/* External file ingestion info */
......
This diff is collapsed.
use crocksdb_ffi::{self, DBCompactionFilter};
use libc::{c_void, c_char, c_int, size_t};
use libc::{c_char, c_int, c_void, size_t};
use std::ffi::CString;
use std::slice;
......@@ -34,7 +34,8 @@ extern "C" fn destructor(filter: *mut c_void) {
}
}
extern "C" fn filter(filter: *mut c_void,
extern "C" fn filter(
filter: *mut c_void,
level: c_int,
key: *const u8,
key_len: size_t,
......@@ -42,8 +43,8 @@ extern "C" fn filter(filter: *mut c_void,
value_len: size_t,
_: *mut *mut u8,
_: *mut size_t,
value_changed: *mut bool)
-> bool {
value_changed: *mut bool,
) -> bool {
unsafe {
let filter = &mut *(filter as *mut CompactionFilterProxy);
let key = slice::from_raw_parts(key, key_len);
......@@ -65,18 +66,21 @@ impl Drop for CompactionFilterHandle {
}
}
pub unsafe fn new_compaction_filter(c_name: CString,
pub unsafe fn new_compaction_filter(
c_name: CString,
ignore_snapshots: bool,
f: Box<CompactionFilter>)
-> Result<CompactionFilterHandle, String> {
f: Box<CompactionFilter>,
) -> Result<CompactionFilterHandle, String> {
let proxy = Box::into_raw(Box::new(CompactionFilterProxy {
name: c_name,
filter: f,
}));
let filter = crocksdb_ffi::crocksdb_compactionfilter_create(proxy as *mut c_void,
let filter = crocksdb_ffi::crocksdb_compactionfilter_create(
proxy as *mut c_void,
destructor,
filter,
name);
name,
);
crocksdb_ffi::crocksdb_compactionfilter_set_ignore_snapshots(filter, ignore_snapshots);
Ok(CompactionFilterHandle { inner: filter })
}
......@@ -36,12 +36,13 @@ pub extern "C" fn name_callback(raw_cb: *mut c_void) -> *const c_char {
}
}
pub extern "C" fn compare_callback(raw_cb: *mut c_void,
pub extern "C" fn compare_callback(
raw_cb: *mut c_void,
a_raw: *const c_char,
a_len: size_t,
b_raw: *const c_char,
b_len: size_t)
-> c_int {
b_len: size_t,
) -> c_int {
unsafe {
let cb: &mut ComparatorCallback = &mut *(raw_cb as *mut ComparatorCallback);
let a: &[u8] = slice::from_raw_parts(a_raw as *const u8, a_len as usize);
......
......@@ -13,10 +13,10 @@
use {TableProperties, TablePropertiesCollectionView};
use crocksdb_ffi::{self, DBInstance, DBFlushJobInfo, DBCompactionJobInfo, DBIngestionInfo,
DBEventListener};
use crocksdb_ffi::{self, DBCompactionJobInfo, DBEventListener, DBFlushJobInfo, DBIngestionInfo,
DBInstance};
use libc::c_void;
use std::{slice, mem, str};
use std::{mem, slice, str};
use std::path::Path;
......@@ -98,8 +98,11 @@ impl IngestionInfo {
}
pub fn internal_file_path(&self) -> &Path {
let p =
unsafe { fetch_str!(crocksdb_externalfileingestioninfo_internal_file_path(&self.0)) };
let p = unsafe {
fetch_str!(crocksdb_externalfileingestioninfo_internal_file_path(
&self.0
))
};
Path::new(p)
}
......@@ -134,23 +137,29 @@ extern "C" fn destructor(ctx: *mut c_void) {
// Maybe we should reuse db instance?
// TODO: refactor DB implement so that we can convert DBInstance to DB.
extern "C" fn on_flush_completed(ctx: *mut c_void,
extern "C" fn on_flush_completed(
ctx: *mut c_void,
_: *mut DBInstance,
info: *const DBFlushJobInfo) {
info: *const DBFlushJobInfo,
) {
let (ctx, info) = unsafe { (&*(ctx as *mut Box<EventListener>), mem::transmute(&*info)) };
ctx.on_flush_completed(info);
}
extern "C" fn on_compaction_completed(ctx: *mut c_void,
extern "C" fn on_compaction_completed(
ctx: *mut c_void,
_: *mut DBInstance,
info: *const DBCompactionJobInfo) {
info: *const DBCompactionJobInfo,
) {
let (ctx, info) = unsafe { (&*(ctx as *mut Box<EventListener>), mem::transmute(&*info)) };
ctx.on_compaction_completed(info);
}
extern "C" fn on_external_file_ingested(ctx: *mut c_void,
extern "C" fn on_external_file_ingested(
ctx: *mut c_void,
_: *mut DBInstance,
info: *const DBIngestionInfo) {
info: *const DBIngestionInfo,
) {
let (ctx, info) = unsafe { (&*(ctx as *mut Box<EventListener>), mem::transmute(&*info)) };
ctx.on_external_file_ingested(info);
}
......@@ -158,10 +167,12 @@ extern "C" fn on_external_file_ingested(ctx: *mut c_void,
pub fn new_event_listener<L: EventListener>(l: L) -> *mut DBEventListener {
let p: Box<EventListener> = Box::new(l);
unsafe {
crocksdb_ffi::crocksdb_eventlistener_create(Box::into_raw(Box::new(p)) as *mut c_void,
crocksdb_ffi::crocksdb_eventlistener_create(
Box::into_raw(Box::new(p)) as *mut c_void,
destructor,
on_flush_completed,
on_compaction_completed,
on_external_file_ingested)
on_external_file_ingested,
)
}
}
......@@ -32,16 +32,16 @@ mod table_properties_collector_factory;
mod event_listener;
pub use compaction_filter::CompactionFilter;
pub use event_listener::{EventListener, CompactionJobInfo, IngestionInfo, FlushJobInfo};
pub use librocksdb_sys::{DBCompactionStyle, DBCompressionType, DBRecoveryMode, DBInfoLogLevel,
DBStatisticsTickerType, DBStatisticsHistogramType, new_bloom_filter,
CompactionPriority, DBEntryType, self as crocksdb_ffi};
pub use event_listener::{CompactionJobInfo, EventListener, FlushJobInfo, IngestionInfo};
pub use librocksdb_sys::{self as crocksdb_ffi, new_bloom_filter, CompactionPriority,
DBCompactionStyle, DBCompressionType, DBEntryType, DBInfoLogLevel,
DBRecoveryMode, DBStatisticsHistogramType, DBStatisticsTickerType};
pub use merge_operator::MergeOperands;
pub use rocksdb::{DB, DBIterator, DBVector, Kv, SeekKey, Writable, WriteBatch, CFHandle, Range,
BackupEngine, SstFileWriter};
pub use rocksdb_options::{BlockBasedOptions, DBOptions, ColumnFamilyOptions, ReadOptions,
WriteOptions, RestoreOptions, IngestExternalFileOptions, EnvOptions,
HistogramData, CompactOptions};
pub use rocksdb::{BackupEngine, CFHandle, DBIterator, DBVector, Kv, Range, SeekKey, SstFileWriter,
Writable, WriteBatch, DB};
pub use rocksdb_options::{BlockBasedOptions, ColumnFamilyOptions, CompactOptions, DBOptions,
EnvOptions, HistogramData, IngestExternalFileOptions, ReadOptions,
RestoreOptions, WriteOptions};
pub use slice_transform::SliceTransform;
pub use table_properties::{TableProperties, TablePropertiesCollection,
TablePropertiesCollectionView, UserCollectedProperties};
......
......@@ -14,7 +14,7 @@
//
extern crate rocksdb;
use rocksdb::{DB, MergeOperands, DBOptions, Writable, ColumnFamilyOptions};
use rocksdb::{ColumnFamilyOptions, DBOptions, MergeOperands, Writable, DB};
// fn snapshot_test() {
// let path = "_rust_rocksdb_iteratortest";
......@@ -52,12 +52,10 @@ fn main() {
let db = DB::open_default(path).unwrap();
assert!(db.put(b"my key", b"my value").is_ok());
match db.get(b"my key") {
Ok(Some(value)) => {
match value.to_utf8() {
Ok(Some(value)) => match value.to_utf8() {
Some(v) => println!("retrieved utf8 value: {}", v),
None => println!("did not read valid utf-8 out of the db"),
}
}
},
Ok(None) => panic!("value not present!"),
Err(e) => println!("error retrieving value: {}", e),
}
......@@ -70,11 +68,9 @@ fn main() {
fn concat_merge(_: &[u8], existing_val: Option<&[u8]>, operands: &mut MergeOperands) -> Vec<u8> {
let mut result: Vec<u8> = Vec::with_capacity(operands.size_hint().0);
match existing_val {
Some(v) => {
for e in v {
Some(v) => for e in v {
result.push(*e)
}
}
},
None => (),
}
for op in operands {
......@@ -100,12 +96,10 @@ fn custom_merge() {
db.merge(b"k1", b"efg").unwrap();
db.merge(b"k1", b"h").unwrap();
match db.get(b"k1") {
Ok(Some(value)) => {
match value.to_utf8() {
Ok(Some(value)) => match value.to_utf8() {
Some(v) => println!("retrieved utf8 value: {}", v),
None => println!("did not read valid utf-8 out of the db"),
}
}
},
Ok(None) => panic!("value not present!"),
Err(e) => println!("error retrieving value: {}", e),
}
......@@ -116,22 +110,25 @@ fn custom_merge() {
#[cfg(test)]
mod tests {
use rocksdb::{BlockBasedOptions, DB, DBCompressionType, ColumnFamilyOptions, DBOptions};
use rocksdb::{BlockBasedOptions, ColumnFamilyOptions, DBCompressionType, DBOptions, DB};
use rocksdb::DBCompactionStyle;
use rocksdb::DBRecoveryMode;
#[allow(dead_code)]
fn tuned_for_somebody_elses_disk(path: &str,
fn tuned_for_somebody_elses_disk(
path: &str,
mut opts: DBOptions,
blockopts: &mut BlockBasedOptions)
-> DB {
let per_level_compression: [DBCompressionType; 7] = [DBCompressionType::No,
blockopts: &mut BlockBasedOptions,
) -> DB {
let per_level_compression: [DBCompressionType; 7] = [
DBCompressionType::No,
DBCompressionType::No,
DBCompressionType::No,
DBCompressionType::Lz4,
DBCompressionType::Lz4,
DBCompressionType::Lz4,
DBCompressionType::Lz4,
DBCompressionType::Lz4];
];
let mut cf_opts = ColumnFamilyOptions::new();
opts.create_if_missing(true);
opts.set_max_open_files(10000);
......
......@@ -41,7 +41,8 @@ pub extern "C" fn name_callback(raw_cb: *mut c_void) -> *const c_char {
}
}
pub extern "C" fn full_merge_callback(raw_cb: *mut c_void,
pub extern "C" fn full_merge_callback(
raw_cb: *mut c_void,
raw_key: *const c_char,
key_len: size_t,
existing_value: *const c_char,
......@@ -50,14 +51,14 @@ pub extern "C" fn full_merge_callback(raw_cb: *mut c_void,
operands_list_len: *const size_t,
num_operands: c_int,
success: *mut u8,
new_value_length: *mut size_t)
-> *const c_char {
new_value_length: *mut size_t,
) -> *const c_char {
unsafe {
let cb: &mut MergeOperatorCallback = &mut *(raw_cb as *mut MergeOperatorCallback);
let operands = &mut MergeOperands::new(operands_list, operands_list_len, num_operands);
let key: &[u8] = slice::from_raw_parts(raw_key as *const u8, key_len as usize);
let oldval: &[u8] = slice::from_raw_parts(existing_value as *const u8,
existing_value_len as usize);
let oldval: &[u8] =
slice::from_raw_parts(existing_value as *const u8, existing_value_len as usize);
let mut result = (cb.merge_fn)(key, Some(oldval), operands);
result.shrink_to_fit();
// TODO(tan) investigate zero-copy techniques to improve performance
......@@ -70,15 +71,16 @@ pub extern "C" fn full_merge_callback(raw_cb: *mut c_void,
}
}
pub extern "C" fn partial_merge_callback(raw_cb: *mut c_void,
pub extern "C" fn partial_merge_callback(
raw_cb: *mut c_void,
raw_key: *const c_char,
key_len: size_t,
operands_list: *const *const c_char,
operands_list_len: *const size_t,
num_operands: c_int,
success: *mut u8,
new_value_length: *mut size_t)
-> *const c_char {
new_value_length: *mut size_t,
) -> *const c_char {
unsafe {
let cb: &mut MergeOperatorCallback = &mut *(raw_cb as *mut MergeOperatorCallback);
let operands = &mut MergeOperands::new(operands_list, operands_list_len, num_operands);
......@@ -104,10 +106,11 @@ pub struct MergeOperands {
}
impl MergeOperands {
fn new(operands_list: *const *const c_char,
fn new(
operands_list: *const *const c_char,
operands_list_len: *const size_t,
num_operands: c_int)
-> MergeOperands {
num_operands: c_int,
) -> MergeOperands {
assert!(num_operands >= 0);
MergeOperands {
operands_list: operands_list,
......@@ -133,8 +136,10 @@ impl<'a> Iterator for &'a mut MergeOperands {
let len = *len_ptr as usize;
let ptr = base + (spacing * self.cursor);
self.cursor += 1;
Some(mem::transmute(slice::from_raw_parts(*(ptr as *const *const u8) as *const u8,
len)))
Some(mem::transmute(slice::from_raw_parts(
*(ptr as *const *const u8) as *const u8,
len,
)))
}
}
}
......@@ -147,17 +152,18 @@ impl<'a> Iterator for &'a mut MergeOperands {
#[cfg(test)]
mod test {
use rocksdb::{DB, DBVector, Writable};
use rocksdb_options::{DBOptions, ColumnFamilyOptions};
use super::*;
use rocksdb::{DBVector, Writable, DB};
use rocksdb_options::{ColumnFamilyOptions, DBOptions};
use tempdir::TempDir;
#[allow(unused_variables)]
#[allow(dead_code)]
fn test_provided_merge(new_key: &[u8],
fn test_provided_merge(
new_key: &[u8],
existing_val: Option<&[u8]>,
operands: &mut MergeOperands)
-> Vec<u8> {
operands: &mut MergeOperands,
) -> Vec<u8> {
let nops = operands.size_hint().0;
let mut result: Vec<u8> = Vec::with_capacity(nops);
if let Some(v) = existing_val {
......@@ -181,11 +187,12 @@ mod test {
opts.create_if_missing(true);
let mut cf_opts = ColumnFamilyOptions::new();
cf_opts.add_merge_operator("test operator", test_provided_merge);
let db = DB::open_cf(opts,
let db = DB::open_cf(
opts,
path.path().to_str().unwrap(),
vec!["default"],
vec![cf_opts])
.unwrap();
vec![cf_opts],
).unwrap();
let p = db.put(b"k1", b"a");
assert!(p.is_ok());
let _ = db.merge(b"k1", b"b");
......@@ -195,12 +202,10 @@ mod test {
let m = db.merge(b"k1", b"h");
assert!(m.is_ok());
match db.get(b"k1") {
Ok(Some(value)) => {
match value.to_utf8() {
Ok(Some(value)) => match value.to_utf8() {
Some(v) => println!("retrieved utf8 value: {}", v),
None => println!("did not read valid utf-8 out of the db"),
}
}
},
Err(e) => println!("error reading value {:?}", e),
_ => panic!("value not present"),
}
......
This diff is collapsed.
This diff is collapsed.
......@@ -13,7 +13,7 @@
use crocksdb_ffi::{self, DBSliceTransform};
use libc::{c_void, c_char, size_t};
use libc::{c_char, c_void, size_t};
use std::ffi::CString;
use std::slice;
......@@ -54,11 +54,12 @@ extern "C" fn destructor(transform: *mut c_void) {
}
}
extern "C" fn transform(transform: *mut c_void,
extern "C" fn transform(
transform: *mut c_void,
key: *const u8,
key_len: size_t,
dest_len: *mut size_t)
-> *const u8 {
dest_len: *mut size_t,
) -> *const u8 {
unsafe {
let transform = &mut *(transform as *mut SliceTransformProxy);
let key = slice::from_raw_parts(key, key_len);
......@@ -85,18 +86,21 @@ extern "C" fn in_range(transform: *mut c_void, key: *const u8, key_len: size_t)
}
pub unsafe fn new_slice_transform(c_name: CString,
f: Box<SliceTransform>)
-> Result<*mut DBSliceTransform, String> {
pub unsafe fn new_slice_transform(
c_name: CString,
f: Box<SliceTransform>,
) -> Result<*mut DBSliceTransform, String> {
let proxy = Box::into_raw(Box::new(SliceTransformProxy {
name: c_name,
transform: f,
}));
let transform = crocksdb_ffi::crocksdb_slicetransform_create(proxy as *mut c_void,
let transform = crocksdb_ffi::crocksdb_slicetransform_create(
proxy as *mut c_void,
destructor,
transform,
in_domain,
in_range,
name);
name,
);
Ok(transform)
}
......@@ -12,20 +12,21 @@
// limitations under the License.
use crocksdb_ffi::{self, DBTableProperties, DBTableProperty, DBUserCollectedPropertiesIterator,
DBTablePropertiesCollection, DBTablePropertiesCollectionIterator,
DBUserCollectedProperties};
use crocksdb_ffi::{self, DBTableProperties, DBTablePropertiesCollection,
DBTablePropertiesCollectionIterator, DBTableProperty,
DBUserCollectedProperties, DBUserCollectedPropertiesIterator};
use libc::size_t;
use std::{slice, str, mem};
use std::{mem, slice, str};
use std::marker::PhantomData;
use std::ops::{Index, Deref};
use std::ops::{Deref, Index};
pub struct TablePropertiesCollectionView(DBTablePropertiesCollection);
impl TablePropertiesCollectionView {
pub unsafe fn from_ptr<'a>(collection: *const DBTablePropertiesCollection)
-> &'a TablePropertiesCollectionView {
pub unsafe fn from_ptr<'a>(
collection: *const DBTablePropertiesCollection,
) -> &'a TablePropertiesCollectionView {
let c = &*collection;
mem::transmute(c)
}
......@@ -86,8 +87,8 @@ impl<'a> Iterator for TablePropertiesCollectionIter<'a> {
}
let mut klen: size_t = 0;
let k = crocksdb_ffi::crocksdb_table_properties_collection_iter_key(self.inner,
&mut klen);
let k =
crocksdb_ffi::crocksdb_table_properties_collection_iter_key(self.inner, &mut klen);
let bytes = slice::from_raw_parts(k, klen);
let key = str::from_utf8(bytes).unwrap();
let props = crocksdb_ffi::crocksdb_table_properties_collection_iter_value(self.inner);
......@@ -237,10 +238,12 @@ impl UserCollectedProperties {
let bytes = index.as_ref();
let mut size = 0;
unsafe {
let ptr = crocksdb_ffi::crocksdb_user_collected_properties_get(&self.inner,
let ptr = crocksdb_ffi::crocksdb_user_collected_properties_get(
&self.inner,
bytes.as_ptr(),
bytes.len(),
&mut size);
&mut size,
);
if ptr.is_null() {
return None;
}
......@@ -262,7 +265,8 @@ impl<Q: AsRef<[u8]>> Index<Q> for UserCollectedProperties {
fn index(&self, index: Q) -> &[u8] {
let key = index.as_ref();
self.get(key).unwrap_or_else(|| panic!("no entry found for key {:?}", key))
self.get(key)
.unwrap_or_else(|| panic!("no entry found for key {:?}", key))
}
}
......@@ -308,13 +312,13 @@ impl<'a> Iterator for UserCollectedPropertiesIter<'a> {
return None;
}
let mut klen: size_t = 0;
let k = crocksdb_ffi::crocksdb_user_collected_properties_iter_key(self.inner,
&mut klen);
let k =
crocksdb_ffi::crocksdb_user_collected_properties_iter_key(self.inner, &mut klen);
let key = slice::from_raw_parts(k, klen);
let mut vlen: size_t = 0;
let v = crocksdb_ffi::crocksdb_user_collected_properties_iter_value(self.inner,
&mut vlen);
let v =
crocksdb_ffi::crocksdb_user_collected_properties_iter_value(self.inner, &mut vlen);
let val = slice::from_raw_parts(v, vlen);
crocksdb_ffi::crocksdb_user_collected_properties_iter_next(self.inner);
......
......@@ -11,8 +11,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use crocksdb_ffi::{self, DBEntryType, DBUserCollectedProperties, DBTablePropertiesCollector};
use libc::{c_void, c_char, c_int, uint8_t, uint64_t, size_t};
use crocksdb_ffi::{self, DBEntryType, DBTablePropertiesCollector, DBUserCollectedProperties};
use libc::{c_char, c_int, c_void, size_t, uint64_t, uint8_t};
use std::collections::HashMap;
use std::ffi::CString;
use std::mem;
......@@ -26,12 +26,7 @@ use std::slice;
/// TablePropertiesCollector object per table and then call it sequentially
pub trait TablePropertiesCollector {
/// Will be called when a new key/value pair is inserted into the table.
fn add(&mut self,
key: &[u8],
value: &[u8],
entry_type: DBEntryType,
seq: u64,
file_size: u64);
fn add(&mut self, key: &[u8], value: &[u8], entry_type: DBEntryType, seq: u64, file_size: u64);
/// Will be called when a table has already been built and is ready for
/// writing the properties block.
......@@ -65,19 +60,23 @@ extern "C" fn destruct(handle: *mut c_void) {
}
}
pub extern "C" fn add(handle: *mut c_void,
pub extern "C" fn add(
handle: *mut c_void,
key: *const uint8_t,
key_len: size_t,
value: *const uint8_t,
value_len: size_t,
entry_type: c_int,
seq: uint64_t,
file_size: uint64_t) {
file_size: uint64_t,
) {
unsafe {
let handle = &mut *(handle as *mut TablePropertiesCollectorHandle);
let key = slice::from_raw_parts(key, key_len);
let value = slice::from_raw_parts(value, value_len);
handle.rep.add(key, value, mem::transmute(entry_type), seq, file_size);
handle
.rep
.add(key, value, mem::transmute(entry_type), seq, file_size);
}
}
......@@ -85,18 +84,21 @@ pub extern "C" fn finish(handle: *mut c_void, props: *mut DBUserCollectedPropert
unsafe {
let handle = &mut *(handle as *mut TablePropertiesCollectorHandle);
for (key, value) in handle.rep.finish() {
crocksdb_ffi::crocksdb_user_collected_properties_add(props,
crocksdb_ffi::crocksdb_user_collected_properties_add(
props,
key.as_ptr(),
key.len(),
value.as_ptr(),
value.len());
value.len(),
);
}
}
}
pub unsafe fn new_table_properties_collector(cname: &str,
collector: Box<TablePropertiesCollector>)
-> *mut DBTablePropertiesCollector {
pub unsafe fn new_table_properties_collector(
cname: &str,
collector: Box<TablePropertiesCollector>,
) -> *mut DBTablePropertiesCollector {
let handle = TablePropertiesCollectorHandle::new(cname, collector);
crocksdb_ffi::crocksdb_table_properties_collector_create(
Box::into_raw(Box::new(handle)) as *mut c_void,
......
......@@ -12,9 +12,9 @@
// limitations under the License.
use crocksdb_ffi::{self, DBTablePropertiesCollector, DBTablePropertiesCollectorFactory};
use libc::{c_void, c_char, uint32_t};
use libc::{c_char, c_void, uint32_t};
use std::ffi::CString;
use table_properties_collector::{TablePropertiesCollector, new_table_properties_collector};
use table_properties_collector::{new_table_properties_collector, TablePropertiesCollector};
/// Constructs `TablePropertiesCollector`.
/// Internals create a new `TablePropertiesCollector` for each new table.
......@@ -29,9 +29,10 @@ struct TablePropertiesCollectorFactoryHandle {
}
impl TablePropertiesCollectorFactoryHandle {
fn new(name: &str,
rep: Box<TablePropertiesCollectorFactory>)
-> TablePropertiesCollectorFactoryHandle {
fn new(
name: &str,
rep: Box<TablePropertiesCollectorFactory>,
) -> TablePropertiesCollectorFactoryHandle {
TablePropertiesCollectorFactoryHandle {
name: CString::new(name).unwrap(),
rep: rep,
......@@ -52,9 +53,10 @@ extern "C" fn destruct(handle: *mut c_void) {
}
}
extern "C" fn create_table_properties_collector(handle: *mut c_void,
cf: uint32_t)
-> *mut DBTablePropertiesCollector {
extern "C" fn create_table_properties_collector(
handle: *mut c_void,
cf: uint32_t,
) -> *mut DBTablePropertiesCollector {
unsafe {
let handle = &mut *(handle as *mut TablePropertiesCollectorFactoryHandle);
let collector = handle.rep.create_table_properties_collector(cf);
......@@ -62,9 +64,10 @@ extern "C" fn create_table_properties_collector(handle: *mut c_void,
}
}
pub unsafe fn new_table_properties_collector_factory
(fname: &str, factory: Box<TablePropertiesCollectorFactory>)
-> *mut DBTablePropertiesCollectorFactory {
pub unsafe fn new_table_properties_collector_factory(
fname: &str,
factory: Box<TablePropertiesCollectorFactory>,
) -> *mut DBTablePropertiesCollectorFactory {
let handle = TablePropertiesCollectorFactoryHandle::new(fname, factory);
crocksdb_ffi::crocksdb_table_properties_collector_factory_create(
Box::into_raw(Box::new(handle)) as *mut c_void,
......
......@@ -13,7 +13,7 @@
// limitations under the License.
//
use rocksdb::{DB, MergeOperands, DBOptions, ColumnFamilyOptions, Writable};
use rocksdb::{ColumnFamilyOptions, DBOptions, MergeOperands, Writable, DB};
use tempdir::TempDir;
#[test]
......@@ -43,15 +43,15 @@ pub fn test_column_family() {
let mut cf_opts = ColumnFamilyOptions::new();
cf_opts.add_merge_operator("test operator", test_provided_merge);
match DB::open_cf(DBOptions::new(), path_str, vec!["default"], vec![cf_opts]) {
Ok(_) => {
panic!("should not have opened DB successfully without \
Ok(_) => panic!(
"should not have opened DB successfully without \
specifying column
families")
}
Err(e) => {
assert!(e.starts_with("Invalid argument: You have to open \
all column families."))
}
families"
),
Err(e) => assert!(e.starts_with(
"Invalid argument: You have to open \
all column families."
)),
}
}
......@@ -77,11 +77,7 @@ pub fn test_column_family() {
};
let cf1 = db.cf_handle("cf1").unwrap();
assert!(db.put_cf(cf1, b"k1", b"v1").is_ok());
assert!(db.get_cf(cf1, b"k1")
.unwrap()
.unwrap()
.to_utf8()
.unwrap() == "v1");
assert!(db.get_cf(cf1, b"k1").unwrap().unwrap().to_utf8().unwrap() == "v1");
let p = db.put_cf(cf1, b"k1", b"a");
assert!(p.is_ok());
/*
......@@ -117,11 +113,12 @@ pub fn test_column_family() {
{}
// should be able to drop a cf
{
let mut db = DB::open_cf(DBOptions::new(),
let mut db = DB::open_cf(
DBOptions::new(),
path_str,
vec!["cf1"],
vec![ColumnFamilyOptions::new()])
.unwrap();
vec![ColumnFamilyOptions::new()],
).unwrap();
match db.drop_cf("cf1") {
Ok(_) => println!("cf1 successfully dropped."),
Err(e) => panic!("failed to drop column family: {}", e),
......@@ -129,18 +126,17 @@ pub fn test_column_family() {
}
}
fn test_provided_merge(_: &[u8],
fn test_provided_merge(
_: &[u8],
existing_val: Option<&[u8]>,
operands: &mut MergeOperands)
-> Vec<u8> {
operands: &mut MergeOperands,
) -> Vec<u8> {
let nops = operands.size_hint().0;
let mut result: Vec<u8> = Vec::with_capacity(nops);
match existing_val {
Some(v) => {
for e in v {
Some(v) => for e in v {
result.push(*e);
}
}
},
None => (),
}
for op in operands {
......
......@@ -11,7 +11,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use rocksdb::{DB, DBOptions, Range, Writable};
use rocksdb::{DBOptions, Range, Writable, DB};
use tempdir::TempDir;
......@@ -21,11 +21,13 @@ fn test_compact_range() {
let mut opts = DBOptions::new();
opts.create_if_missing(true);
let db = DB::open(opts, path.path().to_str().unwrap()).unwrap();
let samples = vec![(b"k1".to_vec(), b"value--------1".to_vec()),
let samples = vec![
(b"k1".to_vec(), b"value--------1".to_vec()),
(b"k2".to_vec(), b"value--------2".to_vec()),
(b"k3".to_vec(), b"value--------3".to_vec()),
(b"k4".to_vec(), b"value--------4".to_vec()),
(b"k5".to_vec(), b"value--------5".to_vec())];
(b"k5".to_vec(), b"value--------5".to_vec()),
];
for &(ref k, ref v) in &samples {
db.put(k, v).unwrap();
assert_eq!(v.as_slice(), &*db.get(k).unwrap().unwrap());
......
......@@ -11,7 +11,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use rocksdb::{Writable, DB, CompactionFilter, DBOptions, ColumnFamilyOptions};
use rocksdb::{ColumnFamilyOptions, CompactionFilter, DBOptions, Writable, DB};
use std::sync::{Arc, RwLock};
use std::sync::atomic::{AtomicBool, Ordering};
use tempdir::TempDir;
......@@ -44,22 +44,28 @@ fn test_compaction_filter() {
let drop_called = Arc::new(AtomicBool::new(false));
let filtered_kvs = Arc::new(RwLock::new(vec![]));
// set ignore_snapshots to false
cf_opts.set_compaction_filter("test",
cf_opts
.set_compaction_filter(
"test",
false,
Box::new(Filter {
drop_called: drop_called.clone(),
filtered_kvs: filtered_kvs.clone(),
}))
}),
)
.unwrap();
let mut opts = DBOptions::new();
opts.create_if_missing(true);
let db = DB::open_cf(opts,
let db = DB::open_cf(
opts,
path.path().to_str().unwrap(),
vec!["default"],
vec![cf_opts])
.unwrap();
let samples = vec![(b"key1".to_vec(), b"value1".to_vec()),
(b"key2".to_vec(), b"value2".to_vec())];
vec![cf_opts],
).unwrap();
let samples = vec![
(b"key1".to_vec(), b"value1".to_vec()),
(b"key2".to_vec(), b"value2".to_vec()),
];
for &(ref k, ref v) in &samples {
db.put(k, v).unwrap();
assert_eq!(v.as_slice(), &*db.get(k).unwrap().unwrap());
......@@ -79,21 +85,25 @@ fn test_compaction_filter() {
// reregister with ignore_snapshots set to true
let mut cf_opts = ColumnFamilyOptions::new();
let opts = DBOptions::new();
cf_opts.set_compaction_filter("test",
cf_opts
.set_compaction_filter(
"test",
true,
Box::new(Filter {
drop_called: drop_called.clone(),
filtered_kvs: filtered_kvs.clone(),
}))
}),
)
.unwrap();
assert!(drop_called.load(Ordering::Relaxed));
drop_called.store(false, Ordering::Relaxed);
{
let db = DB::open_cf(opts,
let db = DB::open_cf(
opts,
path.path().to_str().unwrap(),
vec!["default"],
vec![cf_opts])
.unwrap();
vec![cf_opts],
).unwrap();
let _snap = db.snapshot();
// Because ignore_snapshots is true, so all the keys will be compacted.
db.compact_range(Some(b"key1"), Some(b"key3"));
......
This diff is collapsed.
......@@ -86,17 +86,19 @@ fn test_event_listener_basic() {
opts.create_if_missing(true);
let db = DB::open(opts, path_str).unwrap();
for i in 1..8000 {
db.put(format!("{:04}", i).as_bytes(),
format!("{:04}", i).as_bytes())
.unwrap();
db.put(
format!("{:04}", i).as_bytes(),
format!("{:04}", i).as_bytes(),
).unwrap();
}
db.flush(true).unwrap();
assert_ne!(counter.flush.load(Ordering::SeqCst), 0);
for i in 1..8000 {
db.put(format!("{:04}", i).as_bytes(),
format!("{:04}", i).as_bytes())
.unwrap();
db.put(
format!("{:04}", i).as_bytes(),
format!("{:04}", i).as_bytes(),
).unwrap();
}
db.flush(true).unwrap();
let flush_cnt = counter.flush.load(Ordering::SeqCst);
......@@ -125,10 +127,12 @@ fn test_event_listener_ingestion() {
let test_sstfile_str = test_sstfile.to_str().unwrap();
let default_options = db.get_options();
gen_sst(default_options,
gen_sst(
default_options,
Some(db.cf_handle("default").unwrap()),
test_sstfile_str,
&[(b"k1", b"v1"), (b"k2", b"v2")]);
&[(b"k1", b"v1"), (b"k2", b"v2")],
);
let ingest_opt = IngestExternalFileOptions::new();
db.ingest_external_file(&ingest_opt, &[test_sstfile_str])
......
......@@ -16,10 +16,12 @@ use rocksdb::*;
use std::fs;
use tempdir::TempDir;
pub fn gen_sst(opt: ColumnFamilyOptions,
pub fn gen_sst(
opt: ColumnFamilyOptions,
cf: Option<&CFHandle>,
path: &str,
data: &[(&[u8], &[u8])]) {
data: &[(&[u8], &[u8])],
) {
let _ = fs::remove_file(path);
let env_opt = EnvOptions::new();
let mut writer = if cf.is_some() {
......@@ -79,11 +81,9 @@ fn gen_sst_delete(opt: ColumnFamilyOptions, cf: Option<&CFHandle>, path: &str) {
fn concat_merge(_: &[u8], existing_val: Option<&[u8]>, operands: &mut MergeOperands) -> Vec<u8> {
let mut result: Vec<u8> = Vec::with_capacity(operands.size_hint().0);
match existing_val {
Some(v) => {
for e in v {
Some(v) => for e in v {
result.push(*e)
}
}
},
None => (),
}
for op in operands {
......@@ -106,10 +106,12 @@ fn test_ingest_external_file() {
let test_sstfile_str = test_sstfile.to_str().unwrap();
let default_options = db.get_options();
gen_sst(default_options,
gen_sst(
default_options,
Some(db.cf_handle("default").unwrap()),
test_sstfile_str,
&[(b"k1", b"v1"), (b"k2", b"v2")]);
&[(b"k1", b"v1"), (b"k2", b"v2")],
);
let mut ingest_opt = IngestExternalFileOptions::new();
db.ingest_external_file(&ingest_opt, &[test_sstfile_str])
.unwrap();
......@@ -117,20 +119,24 @@ fn test_ingest_external_file() {
assert_eq!(db.get(b"k1").unwrap().unwrap(), b"v1");
assert_eq!(db.get(b"k2").unwrap().unwrap(), b"v2");
gen_sst(ColumnFamilyOptions::new(),
gen_sst(
ColumnFamilyOptions::new(),
None,
test_sstfile_str,
&[(b"k1", b"v3"), (b"k2", b"v4")]);
&[(b"k1", b"v3"), (b"k2", b"v4")],
);
db.ingest_external_file_cf(handle, &ingest_opt, &[test_sstfile_str])
.unwrap();
assert_eq!(db.get_cf(handle, b"k1").unwrap().unwrap(), b"v3");
assert_eq!(db.get_cf(handle, b"k2").unwrap().unwrap(), b"v4");
let snap = db.snapshot();
gen_sst(ColumnFamilyOptions::new(),
gen_sst(
ColumnFamilyOptions::new(),
None,
test_sstfile_str,
&[(b"k2", b"v5"), (b"k3", b"v6")]);
&[(b"k2", b"v5"), (b"k3", b"v6")],
);
ingest_opt.move_files(true);
db.ingest_external_file_cf(handle, &ingest_opt, &[test_sstfile_str])
.unwrap();
......@@ -157,9 +163,11 @@ fn test_ingest_external_file_new() {
let test_sstfile_str = test_sstfile.to_str().unwrap();
let default_options = db.get_options();
gen_sst_put(default_options,
gen_sst_put(
default_options,
Some(db.cf_handle("default").unwrap()),
test_sstfile_str);
test_sstfile_str,
);
let mut ingest_opt = IngestExternalFileOptions::new();
db.ingest_external_file(&ingest_opt, &[test_sstfile_str])
.unwrap();
......@@ -171,9 +179,11 @@ fn test_ingest_external_file_new() {
let snap = db.snapshot();
let default_options = db.get_options();
gen_sst_merge(default_options,
gen_sst_merge(
default_options,
Some(db.cf_handle("default").unwrap()),
test_sstfile_str);
test_sstfile_str,
);
db.ingest_external_file(&ingest_opt, &[test_sstfile_str])
.unwrap();
......@@ -182,9 +192,11 @@ fn test_ingest_external_file_new() {
assert_eq!(db.get(b"k3").unwrap().unwrap(), b"cd");
let default_options = db.get_options();
gen_sst_delete(default_options,
gen_sst_delete(
default_options,
Some(db.cf_handle("default").unwrap()),
test_sstfile_str);
test_sstfile_str,
);
ingest_opt.move_files(true);
db.ingest_external_file(&ingest_opt, &[test_sstfile_str])
.unwrap();
......@@ -300,10 +312,12 @@ fn test_ingest_simulate_real_world() {
for cf in &ALL_CFS {
let handle = db.cf_handle(cf).unwrap();
let cf_opts = ColumnFamilyOptions::new();
put_delete_and_generate_sst_cf(cf_opts,
put_delete_and_generate_sst_cf(
cf_opts,
&db,
&handle,
gen_path.path().join(cf).to_str().unwrap());
gen_path.path().join(cf).to_str().unwrap(),
);
}
let path2 = TempDir::new("_rust_rocksdb_ingest_real_world_2").expect("");
......@@ -318,29 +332,47 @@ fn test_ingest_simulate_real_world() {
let handle = db2.cf_handle(cf).unwrap();
let mut ingest_opt = IngestExternalFileOptions::new();
ingest_opt.move_files(true);
db2.ingest_external_file_cf(handle,
db2.ingest_external_file_cf(
handle,
&ingest_opt,
&[gen_path.path().join(cf).to_str().unwrap()])
.unwrap();
check_kv(&db,
&[gen_path.path().join(cf).to_str().unwrap()],
).unwrap();
check_kv(
&db,
db.cf_handle(cf),
&[(b"k1", None), (b"k2", Some(b"v2")), (b"k3", None), (b"k4", Some(b"v4"))]);
&[
(b"k1", None),
(b"k2", Some(b"v2")),
(b"k3", None),
(b"k4", Some(b"v4")),
],
);
let cf_opts = ColumnFamilyOptions::new();
gen_sst_from_cf(cf_opts,
gen_sst_from_cf(
cf_opts,
&db2,
&handle,
gen_path.path().join(cf).to_str().unwrap());
gen_path.path().join(cf).to_str().unwrap(),
);
}
for cf in &ALL_CFS {
let handle = db.cf_handle(cf).unwrap();
let ingest_opt = IngestExternalFileOptions::new();
db.ingest_external_file_cf(handle,
db.ingest_external_file_cf(
handle,
&ingest_opt,
&[gen_path.path().join(cf).to_str().unwrap()])
.unwrap();
check_kv(&db,
&[gen_path.path().join(cf).to_str().unwrap()],
).unwrap();
check_kv(
&db,
db.cf_handle(cf),
&[(b"k1", None), (b"k2", Some(b"v2")), (b"k3", None), (b"k4", Some(b"v4"))]);
&[
(b"k1", None),
(b"k2", Some(b"v2")),
(b"k3", None),
(b"k4", Some(b"v4")),
],
);
}
}
......@@ -79,8 +79,11 @@ pub fn test_iterator() {
assert!(p.is_ok());
let p = db.put(k3, v3);
assert!(p.is_ok());
let expected =
vec![(k1.to_vec(), v1.to_vec()), (k2.to_vec(), v2.to_vec()), (k3.to_vec(), v3.to_vec())];
let expected = vec![
(k1.to_vec(), v1.to_vec()),
(k2.to_vec(), v2.to_vec()),
(k3.to_vec(), v3.to_vec()),
];
let mut iter = db.iter();
......@@ -113,10 +116,12 @@ pub fn test_iterator() {
old_iterator.seek(SeekKey::Start);
let p = db.put(&*k4, &*v4);
assert!(p.is_ok());
let expected2 = vec![(k1.to_vec(), v1.to_vec()),
let expected2 = vec![
(k1.to_vec(), v1.to_vec()),
(k2.to_vec(), v2.to_vec()),
(k3.to_vec(), v3.to_vec()),
(k4.to_vec(), v4.to_vec())];
(k4.to_vec(), v4.to_vec()),
];
assert_eq!(old_iterator.collect::<Vec<_>>(), expected);
iter = db.iter();
......@@ -124,8 +129,11 @@ pub fn test_iterator() {
assert_eq!(iter.collect::<Vec<_>>(), expected2);
iter.seek(SeekKey::Key(k2));
let expected =
vec![(k2.to_vec(), v2.to_vec()), (k3.to_vec(), v3.to_vec()), (k4.to_vec(), v4.to_vec())];
let expected = vec![
(k2.to_vec(), v2.to_vec()),
(k3.to_vec(), v3.to_vec()),
(k4.to_vec(), v4.to_vec()),
];
assert_eq!(iter.collect::<Vec<_>>(), expected);
iter.seek(SeekKey::Key(k2));
......@@ -241,19 +249,32 @@ fn test_total_order_seek() {
let mut opts = DBOptions::new();
opts.create_if_missing(true);
cf_opts.set_block_based_table_factory(&bbto);
cf_opts.set_prefix_extractor("FixedPrefixTransform",
Box::new(FixedPrefixTransform { prefix_len: 2 }))
cf_opts
.set_prefix_extractor(
"FixedPrefixTransform",
Box::new(FixedPrefixTransform { prefix_len: 2 }),
)
.unwrap();
// also create prefix bloom for memtable
cf_opts.set_memtable_prefix_bloom_size_ratio(0.1 as f64);
let keys = vec![b"k1-1", b"k1-2", b"k1-3", b"k2-1", b"k2-2", b"k2-3", b"k3-1", b"k3-2",
b"k3-3"];
let db = DB::open_cf(opts,
let keys = vec![
b"k1-1",
b"k1-2",
b"k1-3",
b"k2-1",
b"k2-2",
b"k2-3",
b"k3-1",
b"k3-2",
b"k3-3",
];
let db = DB::open_cf(
opts,
path.path().to_str().unwrap(),
vec!["default"],
vec![cf_opts])
.unwrap();
vec![cf_opts],
).unwrap();
let wopts = WriteOptions::new();
// sst1
......@@ -324,15 +345,19 @@ fn test_fixed_suffix_seek() {
let mut cf_opts = ColumnFamilyOptions::new();
opts.create_if_missing(true);
cf_opts.set_block_based_table_factory(&bbto);
cf_opts.set_prefix_extractor("FixedSuffixTransform",
Box::new(FixedSuffixTransform { suffix_len: 2 }))
cf_opts
.set_prefix_extractor(
"FixedSuffixTransform",
Box::new(FixedSuffixTransform { suffix_len: 2 }),
)
.unwrap();
let db = DB::open_cf(opts,
let db = DB::open_cf(
opts,
path.path().to_str().unwrap(),
vec!["default"],
vec![cf_opts])
.unwrap();
vec![cf_opts],
).unwrap();
db.put(b"k-eghe-5", b"a").unwrap();
db.put(b"k-24yfae-6", b"a").unwrap();
db.put(b"k-h1fwd-7", b"a").unwrap();
......
......@@ -11,7 +11,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use rocksdb::{DB, Writable};
use rocksdb::{Writable, DB};
use std::sync::Arc;
use std::thread;
use tempdir::TempDir;
......@@ -40,11 +40,9 @@ pub fn test_multithreaded() {
let db3 = db.clone();
let j3 = thread::spawn(move || for _ in 1..N {
match db3.get(b"key") {
Ok(Some(v)) => {
if &v[..] != b"value1" && &v[..] != b"value2" {
Ok(Some(v)) => if &v[..] != b"value1" && &v[..] != b"value2" {
assert!(false);
}
}
},
_ => {
assert!(false);
}
......
......@@ -31,8 +31,17 @@ impl SliceTransform for FixedPrefixTransform {
#[test]
fn test_prefix_extractor_compatibility() {
let path = TempDir::new("_rust_rocksdb_prefix_extractor_compatibility").expect("");
let keys = vec![b"k1-0", b"k1-1", b"k1-2", b"k1-3", b"k1-4", b"k1-5", b"k1-6", b"k1-7",
b"k1-8"];
let keys = vec![
b"k1-0",
b"k1-1",
b"k1-2",
b"k1-3",
b"k1-4",
b"k1-5",
b"k1-6",
b"k1-7",
b"k1-8",
];
// create db with no prefix extractor, and insert data
{
......@@ -57,16 +66,20 @@ fn test_prefix_extractor_compatibility() {
let mut cf_opts = ColumnFamilyOptions::new();
opts.create_if_missing(false);
cf_opts.set_block_based_table_factory(&bbto);
cf_opts.set_prefix_extractor("FixedPrefixTransform",
Box::new(FixedPrefixTransform { prefix_len: 2 }))
cf_opts
.set_prefix_extractor(
"FixedPrefixTransform",
Box::new(FixedPrefixTransform { prefix_len: 2 }),
)
.unwrap();
// also create prefix bloom for memtable
cf_opts.set_memtable_prefix_bloom_size_ratio(0.1 as f64);
let db = DB::open_cf(opts,
let db = DB::open_cf(
opts,
path.path().to_str().unwrap(),
vec!["default"],
vec![cf_opts])
.unwrap();
vec![cf_opts],
).unwrap();
let wopts = WriteOptions::new();
// sst2 with prefix bloom.
......
......@@ -11,11 +11,11 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use rocksdb::{DB, ColumnFamilyOptions, DBOptions, BlockBasedOptions, WriteOptions, ReadOptions,
SliceTransform, Writable, CompactOptions, SeekKey};
use rocksdb::crocksdb_ffi::{DBStatisticsHistogramType as HistogramType,
DBStatisticsTickerType as TickerType, DBInfoLogLevel as InfoLogLevel,
CompactionPriority, DBCompressionType};
use rocksdb::{BlockBasedOptions, ColumnFamilyOptions, CompactOptions, DBOptions, ReadOptions,
SeekKey, SliceTransform, Writable, WriteOptions, DB};
use rocksdb::crocksdb_ffi::{CompactionPriority, DBCompressionType, DBInfoLogLevel as InfoLogLevel,
DBStatisticsHistogramType as HistogramType,
DBStatisticsTickerType as TickerType};
use std::path::Path;
use std::thread;
use std::time::Duration;
......@@ -29,11 +29,12 @@ fn test_set_num_levels() {
let mut cf_opts = ColumnFamilyOptions::new();
opts.create_if_missing(true);
cf_opts.set_num_levels(2);
let db = DB::open_cf(opts,
let db = DB::open_cf(
opts,
path.path().to_str().unwrap(),
vec!["default"],
vec![cf_opts])
.unwrap();
vec![cf_opts],
).unwrap();
drop(db);
}
......@@ -71,14 +72,22 @@ fn test_enable_statistics() {
opts.enable_statistics();
opts.set_stats_dump_period_sec(60);
assert!(opts.get_statistics().is_some());
assert!(opts.get_statistics_histogram_string(HistogramType::SeekMicros)
.is_some());
assert_eq!(opts.get_statistics_ticker_count(TickerType::BlockCacheMiss),
0);
assert_eq!(opts.get_and_reset_statistics_ticker_count(TickerType::BlockCacheMiss),
0);
assert_eq!(opts.get_statistics_ticker_count(TickerType::BlockCacheMiss),
0);
assert!(
opts.get_statistics_histogram_string(HistogramType::SeekMicros)
.is_some()
);
assert_eq!(
opts.get_statistics_ticker_count(TickerType::BlockCacheMiss),
0
);
assert_eq!(
opts.get_and_reset_statistics_ticker_count(TickerType::BlockCacheMiss),
0
);
assert_eq!(
opts.get_statistics_ticker_count(TickerType::BlockCacheMiss),
0
);
let opts = DBOptions::new();
assert!(opts.get_statistics().is_none());
......@@ -104,16 +113,18 @@ fn test_memtable_insert_hint_prefix_extractor() {
let mut opts = DBOptions::new();
let mut cf_opts = ColumnFamilyOptions::new();
opts.create_if_missing(true);
cf_opts.set_memtable_insert_hint_prefix_extractor("FixedPrefixTransform",
Box::new(FixedPrefixTransform {
prefix_len: 2,
}))
cf_opts
.set_memtable_insert_hint_prefix_extractor(
"FixedPrefixTransform",
Box::new(FixedPrefixTransform { prefix_len: 2 }),
)
.unwrap();
let db = DB::open_cf(opts,
let db = DB::open_cf(
opts,
path.path().to_str().unwrap(),
vec!["default"],
vec![cf_opts])
.unwrap();
vec![cf_opts],
).unwrap();
let wopts = WriteOptions::new();
db.put_opt(b"k0-1", b"a", &wopts).unwrap();
......@@ -229,11 +240,12 @@ fn test_set_pin_l0_filter_and_index_blocks_in_cache() {
let mut block_opts = BlockBasedOptions::new();
block_opts.set_pin_l0_filter_and_index_blocks_in_cache(true);
cf_opts.set_block_based_table_factory(&block_opts);
DB::open_cf(opts,
DB::open_cf(
opts,
path.path().to_str().unwrap(),
vec!["default"],
vec![cf_opts])
.unwrap();
vec![cf_opts],
).unwrap();
}
#[test]
fn test_pending_compaction_bytes_limit() {
......@@ -243,11 +255,12 @@ fn test_pending_compaction_bytes_limit() {
opts.create_if_missing(true);
cf_opts.set_soft_pending_compaction_bytes_limit(64 * 1024 * 1024 * 1024);
cf_opts.set_hard_pending_compaction_bytes_limit(256 * 1024 * 1024 * 1024);
DB::open_cf(opts,
DB::open_cf(
opts,
path.path().to_str().unwrap(),
vec!["default"],
vec![cf_opts])
.unwrap();
vec![cf_opts],
).unwrap();
}
#[test]
......@@ -276,11 +289,12 @@ fn test_set_optimize_filters_for_hits() {
let mut cf_opts = ColumnFamilyOptions::new();
opts.create_if_missing(true);
cf_opts.set_optimize_filters_for_hits(true);
DB::open_cf(opts,
DB::open_cf(
opts,
path.path().to_str().unwrap(),
vec!["default"],
vec![cf_opts])
.unwrap();
vec![cf_opts],
).unwrap();
}
#[test]
......@@ -295,11 +309,12 @@ fn test_get_block_cache_usage() {
let mut block_opts = BlockBasedOptions::new();
block_opts.set_lru_cache(16 * 1024 * 1024);
cf_opts.set_block_based_table_factory(&block_opts);
let db = DB::open_cf(opts,
let db = DB::open_cf(
opts,
path.path().to_str().unwrap(),
vec!["default"],
vec![cf_opts])
.unwrap();
vec![cf_opts],
).unwrap();
for i in 0..200 {
db.put(format!("k_{}", i).as_bytes(), b"v").unwrap();
......@@ -319,11 +334,12 @@ fn test_set_level_compaction_dynamic_level_bytes() {
let mut cf_opts = ColumnFamilyOptions::new();
opts.create_if_missing(true);
cf_opts.set_level_compaction_dynamic_level_bytes(true);
DB::open_cf(opts,
DB::open_cf(
opts,
path.path().to_str().unwrap(),
vec!["default"],
vec![cf_opts])
.unwrap();
vec![cf_opts],
).unwrap();
}
#[test]
......@@ -375,11 +391,12 @@ fn test_set_compaction_pri() {
let mut cf_opts = ColumnFamilyOptions::new();
opts.create_if_missing(true);
cf_opts.compaction_priority(CompactionPriority::MinOverlappingRatio);
DB::open_cf(opts,
DB::open_cf(
opts,
path.path().to_str().unwrap(),
vec!["default"],
vec![cf_opts])
.unwrap();
vec![cf_opts],
).unwrap();
}
#[test]
......@@ -438,11 +455,12 @@ fn test_bottommost_compression() {
let cf_opts = ColumnFamilyOptions::new();
opts.create_if_missing(true);
cf_opts.bottommost_compression(DBCompressionType::No);
DB::open_cf(opts,
DB::open_cf(
opts,
path.path().to_str().unwrap(),
vec!["default"],
vec![cf_opts])
.unwrap();
vec![cf_opts],
).unwrap();
}
#[test]
......
......@@ -11,8 +11,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use rocksdb::{Writable, DB, SliceTransform, ColumnFamilyOptions, DBOptions, SeekKey,
BlockBasedOptions};
use rocksdb::{BlockBasedOptions, ColumnFamilyOptions, DBOptions, SeekKey, SliceTransform,
Writable, DB};
use tempdir::TempDir;
struct FixedPostfixTransform {
......@@ -43,19 +43,23 @@ fn test_slice_transform() {
cf_opts.set_block_based_table_factory(&block_opts);
cf_opts.set_memtable_prefix_bloom_size_ratio(0.25);
cf_opts.set_prefix_extractor("test", Box::new(FixedPostfixTransform { postfix_len: 2 }))
cf_opts
.set_prefix_extractor("test", Box::new(FixedPostfixTransform { postfix_len: 2 }))
.unwrap();
opts.create_if_missing(true);
let db = DB::open_cf(opts,
let db = DB::open_cf(
opts,
path.path().to_str().unwrap(),
vec!["default"],
vec![cf_opts])
.unwrap();
let samples = vec![(b"key_01".to_vec(), b"1".to_vec()),
vec![cf_opts],
).unwrap();
let samples = vec![
(b"key_01".to_vec(), b"1".to_vec()),
(b"key_02".to_vec(), b"2".to_vec()),
(b"key_0303".to_vec(), b"3".to_vec()),
(b"key_0404".to_vec(), b"4".to_vec())];
(b"key_0404".to_vec(), b"4".to_vec()),
];
for &(ref k, ref v) in &samples {
db.put(k, v).unwrap();
......@@ -64,17 +68,23 @@ fn test_slice_transform() {
let mut it = db.iter();
let invalid_seeks =
vec![b"key_".to_vec(), b"key_0".to_vec(), b"key_030".to_vec(), b"key_03000".to_vec()];
let invalid_seeks = vec![
b"key_".to_vec(),
b"key_0".to_vec(),
b"key_030".to_vec(),
b"key_03000".to_vec(),
];
for key in invalid_seeks {
it.seek(SeekKey::Key(&key));
assert!(!it.valid());
}
let valid_seeks = vec![(b"key_00".to_vec(), b"key_01".to_vec()),
let valid_seeks = vec![
(b"key_00".to_vec(), b"key_01".to_vec()),
(b"key_03".to_vec(), b"key_0303".to_vec()),
(b"key_0301".to_vec(), b"key_0303".to_vec())];
(b"key_0301".to_vec(), b"key_0303".to_vec()),
];
for (key, expect_key) in valid_seeks {
it.seek(SeekKey::Key(&key));
......
......@@ -12,7 +12,7 @@
// limitations under the License.
use rocksdb::*;
use rocksdb::{DBStatisticsTickerType as TickerType, DBStatisticsHistogramType as HistogramType};
use rocksdb::{DBStatisticsHistogramType as HistogramType, DBStatisticsTickerType as TickerType};
use tempdir::TempDir;
#[test]
......@@ -35,8 +35,12 @@ fn test_db_statistics() {
assert!(db.get_statistics_ticker_count(TickerType::BlockCacheHit) > 0);
assert!(db.get_and_reset_statistics_ticker_count(TickerType::BlockCacheHit) > 0);
assert_eq!(db.get_statistics_ticker_count(TickerType::BlockCacheHit), 0);
assert!(db.get_statistics_histogram_string(HistogramType::GetMicros)
.is_some());
assert!(db.get_statistics_histogram(HistogramType::GetMicros)
.is_some());
assert!(
db.get_statistics_histogram_string(HistogramType::GetMicros)
.is_some()
);
assert!(
db.get_statistics_histogram(HistogramType::GetMicros)
.is_some()
);
}
......@@ -12,9 +12,9 @@
// limitations under the License.
use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
use rocksdb::{DB, Range, ColumnFamilyOptions, DBOptions, Writable, DBEntryType,
TablePropertiesCollection, TablePropertiesCollector,
TablePropertiesCollectorFactory, UserCollectedProperties};
use rocksdb::{ColumnFamilyOptions, DBEntryType, DBOptions, Range, TablePropertiesCollection,
TablePropertiesCollector, TablePropertiesCollectorFactory, UserCollectedProperties,
Writable, DB};
use std::collections::HashMap;
use std::fmt;
use tempdir::TempDir;
......@@ -82,7 +82,11 @@ impl ExampleCollector {
for (k, v) in props {
assert_eq!(v, props.get(k).unwrap());
}
assert!(props.get(&[Props::NumKeys as u8, Props::NumPuts as u8]).is_none());
assert!(
props
.get(&[Props::NumKeys as u8, Props::NumPuts as u8])
.is_none()
);
assert!(props.len() >= 4);
c
......@@ -91,12 +95,14 @@ impl ExampleCollector {
impl fmt::Display for ExampleCollector {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f,
write!(
f,
"keys={}, puts={}, merges={}, deletes={}",
self.num_keys,
self.num_puts,
self.num_merges,
self.num_deletes)
self.num_deletes
)
}
}
......@@ -110,8 +116,7 @@ impl TablePropertiesCollector for ExampleCollector {
match entry_type {
DBEntryType::Put => self.num_puts += 1,
DBEntryType::Merge => self.num_merges += 1,
DBEntryType::Delete |
DBEntryType::SingleDelete => self.num_deletes += 1,
DBEntryType::Delete | DBEntryType::SingleDelete => self.num_deletes += 1,
DBEntryType::Other => {}
}
}
......@@ -135,12 +140,14 @@ impl TablePropertiesCollectorFactory for ExampleFactory {
}
}
fn check_collection(collection: &TablePropertiesCollection,
fn check_collection(
collection: &TablePropertiesCollection,
num_files: usize,
num_keys: u32,
num_puts: u32,
num_merges: u32,
num_deletes: u32) {
num_deletes: u32,
) {
let mut res = ExampleCollector::new();
assert!(!collection.is_empty());
let props: HashMap<_, _> = collection.iter().collect();
......@@ -166,16 +173,19 @@ fn test_table_properties_collector_factory() {
cf_opts.add_table_properties_collector_factory("example-collector", Box::new(f));
let path = TempDir::new("_rust_rocksdb_collectortest").expect("");
let db = DB::open_cf(opts,
let db = DB::open_cf(
opts,
path.path().to_str().unwrap(),
vec!["default"],
vec![cf_opts])
.unwrap();
vec![cf_opts],
).unwrap();
let samples = vec![(b"key1".to_vec(), b"value1".to_vec()),
let samples = vec![
(b"key1".to_vec(), b"value1".to_vec()),
(b"key2".to_vec(), b"value2".to_vec()),
(b"key3".to_vec(), b"value3".to_vec()),
(b"key4".to_vec(), b"value4".to_vec())];
(b"key4".to_vec(), b"value4".to_vec()),
];
// Put 4 keys.
for &(ref k, ref v) in &samples {
......
prepare-rustfmt:
curl -L https://github.com/tennix/rustfmt/releases/download/v0.6/rustfmt-${RUSTFMT_VERSION}-linux-amd64.tar.gz -o rustfmt-${RUSTFMT_VERSION}-linux-amd64.tar.gz && \
mkdir -p ${HOME}/.cargo/bin && tar xzf rustfmt-${RUSTFMT_VERSION}-linux-amd64.tar.gz -C ${HOME}/.cargo/bin --strip-components=1
@[[ "`cargo fmt -- --version`" = "0.2.1-nightly ( )" ]] || cargo install --vers 0.2.1 --force rustfmt-nightly || exit 0
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment