Commit dab2ef77 authored by follitude's avatar follitude Committed by siddontang

*: format code (#121)

parent 4fd63913
# Complete list of style options can be found at:
# http://clang.llvm.org/docs/ClangFormatStyleOptions.html
---
BasedOnStyle: Google
...
......@@ -12,13 +12,8 @@ cache:
os:
- linux
# - osx
before_script:
- make -f travis-build/Makefile prepare-rustfmt
script:
- cargo fmt -- --write-mode diff || (echo please make format and run tests before creating a pr!; exit 1)
- cargo build --features static-link
- cargo test --all --features static-link
......@@ -26,4 +21,3 @@ env:
global:
- RUST_TEST_THREADS=1
- LD_LIBRARY_PATH: "/usr/local/lib"
- RUSTFMT_VERSION=v0.6.0
......@@ -8,14 +8,13 @@ machine:
CPPFLAGS: "-I$HOME/.local/include"
CXXFLAGS: "-I$HOME/.local/include"
PKG_CONFIG_PATH: "$PKG_CONFIG_PATH:$HOME/.local/lib/pkgconfig"
RUSTC_DATE: "2017-03-28"
RUSTC_DATE: "2017-08-09"
LOCAL_PREFIX: "$HOME/.local"
# used by cargo
LIBRARY_PATH: "$LIBRARY_PATH:$HOME/.local/lib"
RUST_TEST_THREADS: 1
RUST_BACKTRACE: 1
RUSTFLAGS: "-Dwarnings"
RUSTFMT_VERSION: "v0.6.0"
pre:
- |
sudo add-apt-repository ppa:ubuntu-toolchain-r/test -y;
......
extern crate gcc;
use gcc::Build;
use std::{env, fs, str};
use std::path::PathBuf;
use std::process::Command;
use gcc::Config;
macro_rules! t {
($e:expr) => (match $e {
Ok(n) => n,
......@@ -26,8 +25,8 @@ fn main() {
println!("cargo:rustc-link-lib=static=crocksdb");
}
fn build_rocksdb() -> Config {
let mut cfg = Config::new();
fn build_rocksdb() -> Build {
let mut cfg = Build::new();
if !cfg!(feature = "static-link") {
if cfg!(target_os = "windows") {
......@@ -89,10 +88,12 @@ fn build_rocksdb() -> Config {
}
if let Err(e) = fs::rename(src.as_path(), dst.as_path()) {
panic!("failed to move {} to {}: {:?}",
src.display(),
dst.display(),
e);
panic!(
"failed to move {} to {}: {:?}",
src.display(),
dst.display(),
e
);
}
}
......@@ -124,23 +125,29 @@ fn build_rocksdb() -> Config {
return cfg;
}
let output =
Command::new(p.as_path()).args(&["find_library", std_lib_name]).output().unwrap();
let output = Command::new(p.as_path())
.args(&["find_library", std_lib_name])
.output()
.unwrap();
if output.status.success() && !output.stdout.is_empty() {
if let Ok(path_str) = str::from_utf8(&output.stdout) {
let path = PathBuf::from(path_str);
if path.is_absolute() {
println!("cargo:rustc-link-lib=static=stdc++");
println!("cargo:rustc-link-search=native={}",
path.parent().unwrap().display());
println!(
"cargo:rustc-link-search=native={}",
path.parent().unwrap().display()
);
cfg.cpp_link_stdlib(None);
return cfg;
}
}
}
println!("failed to detect {}: {:?}, fallback to dynamic",
std_lib_name,
output);
println!(
"failed to detect {}: {:?}, fallback to dynamic",
std_lib_name,
output
);
cfg
}
......
......@@ -628,7 +628,8 @@ crocksdb_compactionjobinfo_table_properties(
extern C_ROCKSDB_LIBRARY_API uint64_t
crocksdb_compactionjobinfo_elapsed_micros(const crocksdb_compactionjobinfo_t*);
extern C_ROCKSDB_LIBRARY_API uint64_t
crocksdb_compactionjobinfo_num_corrupt_keys(const crocksdb_compactionjobinfo_t*);
crocksdb_compactionjobinfo_num_corrupt_keys(
const crocksdb_compactionjobinfo_t*);
/* External file ingestion info */
......
This diff is collapsed.
use crocksdb_ffi::{self, DBCompactionFilter};
use libc::{c_void, c_char, c_int, size_t};
use libc::{c_char, c_int, c_void, size_t};
use std::ffi::CString;
use std::slice;
......@@ -34,16 +34,17 @@ extern "C" fn destructor(filter: *mut c_void) {
}
}
extern "C" fn filter(filter: *mut c_void,
level: c_int,
key: *const u8,
key_len: size_t,
value: *const u8,
value_len: size_t,
_: *mut *mut u8,
_: *mut size_t,
value_changed: *mut bool)
-> bool {
extern "C" fn filter(
filter: *mut c_void,
level: c_int,
key: *const u8,
key_len: size_t,
value: *const u8,
value_len: size_t,
_: *mut *mut u8,
_: *mut size_t,
value_changed: *mut bool,
) -> bool {
unsafe {
let filter = &mut *(filter as *mut CompactionFilterProxy);
let key = slice::from_raw_parts(key, key_len);
......@@ -65,18 +66,21 @@ impl Drop for CompactionFilterHandle {
}
}
pub unsafe fn new_compaction_filter(c_name: CString,
ignore_snapshots: bool,
f: Box<CompactionFilter>)
-> Result<CompactionFilterHandle, String> {
pub unsafe fn new_compaction_filter(
c_name: CString,
ignore_snapshots: bool,
f: Box<CompactionFilter>,
) -> Result<CompactionFilterHandle, String> {
let proxy = Box::into_raw(Box::new(CompactionFilterProxy {
name: c_name,
filter: f,
}));
let filter = crocksdb_ffi::crocksdb_compactionfilter_create(proxy as *mut c_void,
destructor,
filter,
name);
let filter = crocksdb_ffi::crocksdb_compactionfilter_create(
proxy as *mut c_void,
destructor,
filter,
name,
);
crocksdb_ffi::crocksdb_compactionfilter_set_ignore_snapshots(filter, ignore_snapshots);
Ok(CompactionFilterHandle { inner: filter })
}
......@@ -36,12 +36,13 @@ pub extern "C" fn name_callback(raw_cb: *mut c_void) -> *const c_char {
}
}
pub extern "C" fn compare_callback(raw_cb: *mut c_void,
a_raw: *const c_char,
a_len: size_t,
b_raw: *const c_char,
b_len: size_t)
-> c_int {
pub extern "C" fn compare_callback(
raw_cb: *mut c_void,
a_raw: *const c_char,
a_len: size_t,
b_raw: *const c_char,
b_len: size_t,
) -> c_int {
unsafe {
let cb: &mut ComparatorCallback = &mut *(raw_cb as *mut ComparatorCallback);
let a: &[u8] = slice::from_raw_parts(a_raw as *const u8, a_len as usize);
......
......@@ -13,10 +13,10 @@
use {TableProperties, TablePropertiesCollectionView};
use crocksdb_ffi::{self, DBInstance, DBFlushJobInfo, DBCompactionJobInfo, DBIngestionInfo,
DBEventListener};
use crocksdb_ffi::{self, DBCompactionJobInfo, DBEventListener, DBFlushJobInfo, DBIngestionInfo,
DBInstance};
use libc::c_void;
use std::{slice, mem, str};
use std::{mem, slice, str};
use std::path::Path;
......@@ -98,8 +98,11 @@ impl IngestionInfo {
}
pub fn internal_file_path(&self) -> &Path {
let p =
unsafe { fetch_str!(crocksdb_externalfileingestioninfo_internal_file_path(&self.0)) };
let p = unsafe {
fetch_str!(crocksdb_externalfileingestioninfo_internal_file_path(
&self.0
))
};
Path::new(p)
}
......@@ -134,23 +137,29 @@ extern "C" fn destructor(ctx: *mut c_void) {
// Maybe we should reuse db instance?
// TODO: refactor DB implement so that we can convert DBInstance to DB.
extern "C" fn on_flush_completed(ctx: *mut c_void,
_: *mut DBInstance,
info: *const DBFlushJobInfo) {
extern "C" fn on_flush_completed(
ctx: *mut c_void,
_: *mut DBInstance,
info: *const DBFlushJobInfo,
) {
let (ctx, info) = unsafe { (&*(ctx as *mut Box<EventListener>), mem::transmute(&*info)) };
ctx.on_flush_completed(info);
}
extern "C" fn on_compaction_completed(ctx: *mut c_void,
_: *mut DBInstance,
info: *const DBCompactionJobInfo) {
extern "C" fn on_compaction_completed(
ctx: *mut c_void,
_: *mut DBInstance,
info: *const DBCompactionJobInfo,
) {
let (ctx, info) = unsafe { (&*(ctx as *mut Box<EventListener>), mem::transmute(&*info)) };
ctx.on_compaction_completed(info);
}
extern "C" fn on_external_file_ingested(ctx: *mut c_void,
_: *mut DBInstance,
info: *const DBIngestionInfo) {
extern "C" fn on_external_file_ingested(
ctx: *mut c_void,
_: *mut DBInstance,
info: *const DBIngestionInfo,
) {
let (ctx, info) = unsafe { (&*(ctx as *mut Box<EventListener>), mem::transmute(&*info)) };
ctx.on_external_file_ingested(info);
}
......@@ -158,10 +167,12 @@ extern "C" fn on_external_file_ingested(ctx: *mut c_void,
pub fn new_event_listener<L: EventListener>(l: L) -> *mut DBEventListener {
let p: Box<EventListener> = Box::new(l);
unsafe {
crocksdb_ffi::crocksdb_eventlistener_create(Box::into_raw(Box::new(p)) as *mut c_void,
destructor,
on_flush_completed,
on_compaction_completed,
on_external_file_ingested)
crocksdb_ffi::crocksdb_eventlistener_create(
Box::into_raw(Box::new(p)) as *mut c_void,
destructor,
on_flush_completed,
on_compaction_completed,
on_external_file_ingested,
)
}
}
......@@ -32,16 +32,16 @@ mod table_properties_collector_factory;
mod event_listener;
pub use compaction_filter::CompactionFilter;
pub use event_listener::{EventListener, CompactionJobInfo, IngestionInfo, FlushJobInfo};
pub use librocksdb_sys::{DBCompactionStyle, DBCompressionType, DBRecoveryMode, DBInfoLogLevel,
DBStatisticsTickerType, DBStatisticsHistogramType, new_bloom_filter,
CompactionPriority, DBEntryType, self as crocksdb_ffi};
pub use event_listener::{CompactionJobInfo, EventListener, FlushJobInfo, IngestionInfo};
pub use librocksdb_sys::{self as crocksdb_ffi, new_bloom_filter, CompactionPriority,
DBCompactionStyle, DBCompressionType, DBEntryType, DBInfoLogLevel,
DBRecoveryMode, DBStatisticsHistogramType, DBStatisticsTickerType};
pub use merge_operator::MergeOperands;
pub use rocksdb::{DB, DBIterator, DBVector, Kv, SeekKey, Writable, WriteBatch, CFHandle, Range,
BackupEngine, SstFileWriter};
pub use rocksdb_options::{BlockBasedOptions, DBOptions, ColumnFamilyOptions, ReadOptions,
WriteOptions, RestoreOptions, IngestExternalFileOptions, EnvOptions,
HistogramData, CompactOptions};
pub use rocksdb::{BackupEngine, CFHandle, DBIterator, DBVector, Kv, Range, SeekKey, SstFileWriter,
Writable, WriteBatch, DB};
pub use rocksdb_options::{BlockBasedOptions, ColumnFamilyOptions, CompactOptions, DBOptions,
EnvOptions, HistogramData, IngestExternalFileOptions, ReadOptions,
RestoreOptions, WriteOptions};
pub use slice_transform::SliceTransform;
pub use table_properties::{TableProperties, TablePropertiesCollection,
TablePropertiesCollectionView, UserCollectedProperties};
......
......@@ -14,7 +14,7 @@
//
extern crate rocksdb;
use rocksdb::{DB, MergeOperands, DBOptions, Writable, ColumnFamilyOptions};
use rocksdb::{ColumnFamilyOptions, DBOptions, MergeOperands, Writable, DB};
// fn snapshot_test() {
// let path = "_rust_rocksdb_iteratortest";
......@@ -52,12 +52,10 @@ fn main() {
let db = DB::open_default(path).unwrap();
assert!(db.put(b"my key", b"my value").is_ok());
match db.get(b"my key") {
Ok(Some(value)) => {
match value.to_utf8() {
Some(v) => println!("retrieved utf8 value: {}", v),
None => println!("did not read valid utf-8 out of the db"),
}
}
Ok(Some(value)) => match value.to_utf8() {
Some(v) => println!("retrieved utf8 value: {}", v),
None => println!("did not read valid utf-8 out of the db"),
},
Ok(None) => panic!("value not present!"),
Err(e) => println!("error retrieving value: {}", e),
}
......@@ -70,11 +68,9 @@ fn main() {
fn concat_merge(_: &[u8], existing_val: Option<&[u8]>, operands: &mut MergeOperands) -> Vec<u8> {
let mut result: Vec<u8> = Vec::with_capacity(operands.size_hint().0);
match existing_val {
Some(v) => {
for e in v {
result.push(*e)
}
}
Some(v) => for e in v {
result.push(*e)
},
None => (),
}
for op in operands {
......@@ -100,12 +96,10 @@ fn custom_merge() {
db.merge(b"k1", b"efg").unwrap();
db.merge(b"k1", b"h").unwrap();
match db.get(b"k1") {
Ok(Some(value)) => {
match value.to_utf8() {
Some(v) => println!("retrieved utf8 value: {}", v),
None => println!("did not read valid utf-8 out of the db"),
}
}
Ok(Some(value)) => match value.to_utf8() {
Some(v) => println!("retrieved utf8 value: {}", v),
None => println!("did not read valid utf-8 out of the db"),
},
Ok(None) => panic!("value not present!"),
Err(e) => println!("error retrieving value: {}", e),
}
......@@ -116,22 +110,25 @@ fn custom_merge() {
#[cfg(test)]
mod tests {
use rocksdb::{BlockBasedOptions, DB, DBCompressionType, ColumnFamilyOptions, DBOptions};
use rocksdb::{BlockBasedOptions, ColumnFamilyOptions, DBCompressionType, DBOptions, DB};
use rocksdb::DBCompactionStyle;
use rocksdb::DBRecoveryMode;
#[allow(dead_code)]
fn tuned_for_somebody_elses_disk(path: &str,
mut opts: DBOptions,
blockopts: &mut BlockBasedOptions)
-> DB {
let per_level_compression: [DBCompressionType; 7] = [DBCompressionType::No,
DBCompressionType::No,
DBCompressionType::No,
DBCompressionType::Lz4,
DBCompressionType::Lz4,
DBCompressionType::Lz4,
DBCompressionType::Lz4];
fn tuned_for_somebody_elses_disk(
path: &str,
mut opts: DBOptions,
blockopts: &mut BlockBasedOptions,
) -> DB {
let per_level_compression: [DBCompressionType; 7] = [
DBCompressionType::No,
DBCompressionType::No,
DBCompressionType::No,
DBCompressionType::Lz4,
DBCompressionType::Lz4,
DBCompressionType::Lz4,
DBCompressionType::Lz4,
];
let mut cf_opts = ColumnFamilyOptions::new();
opts.create_if_missing(true);
opts.set_max_open_files(10000);
......
......@@ -41,23 +41,24 @@ pub extern "C" fn name_callback(raw_cb: *mut c_void) -> *const c_char {
}
}
pub extern "C" fn full_merge_callback(raw_cb: *mut c_void,
raw_key: *const c_char,
key_len: size_t,
existing_value: *const c_char,
existing_value_len: size_t,
operands_list: *const *const c_char,
operands_list_len: *const size_t,
num_operands: c_int,
success: *mut u8,
new_value_length: *mut size_t)
-> *const c_char {
pub extern "C" fn full_merge_callback(
raw_cb: *mut c_void,
raw_key: *const c_char,
key_len: size_t,
existing_value: *const c_char,
existing_value_len: size_t,
operands_list: *const *const c_char,
operands_list_len: *const size_t,
num_operands: c_int,
success: *mut u8,
new_value_length: *mut size_t,
) -> *const c_char {
unsafe {
let cb: &mut MergeOperatorCallback = &mut *(raw_cb as *mut MergeOperatorCallback);
let operands = &mut MergeOperands::new(operands_list, operands_list_len, num_operands);
let key: &[u8] = slice::from_raw_parts(raw_key as *const u8, key_len as usize);
let oldval: &[u8] = slice::from_raw_parts(existing_value as *const u8,
existing_value_len as usize);
let oldval: &[u8] =
slice::from_raw_parts(existing_value as *const u8, existing_value_len as usize);
let mut result = (cb.merge_fn)(key, Some(oldval), operands);
result.shrink_to_fit();
// TODO(tan) investigate zero-copy techniques to improve performance
......@@ -70,15 +71,16 @@ pub extern "C" fn full_merge_callback(raw_cb: *mut c_void,
}
}
pub extern "C" fn partial_merge_callback(raw_cb: *mut c_void,
raw_key: *const c_char,
key_len: size_t,
operands_list: *const *const c_char,
operands_list_len: *const size_t,
num_operands: c_int,
success: *mut u8,
new_value_length: *mut size_t)
-> *const c_char {
pub extern "C" fn partial_merge_callback(
raw_cb: *mut c_void,
raw_key: *const c_char,
key_len: size_t,
operands_list: *const *const c_char,
operands_list_len: *const size_t,
num_operands: c_int,
success: *mut u8,
new_value_length: *mut size_t,
) -> *const c_char {
unsafe {
let cb: &mut MergeOperatorCallback = &mut *(raw_cb as *mut MergeOperatorCallback);
let operands = &mut MergeOperands::new(operands_list, operands_list_len, num_operands);
......@@ -104,10 +106,11 @@ pub struct MergeOperands {
}
impl MergeOperands {
fn new(operands_list: *const *const c_char,
operands_list_len: *const size_t,
num_operands: c_int)
-> MergeOperands {
fn new(
operands_list: *const *const c_char,
operands_list_len: *const size_t,
num_operands: c_int,
) -> MergeOperands {
assert!(num_operands >= 0);
MergeOperands {
operands_list: operands_list,
......@@ -133,8 +136,10 @@ impl<'a> Iterator for &'a mut MergeOperands {
let len = *len_ptr as usize;
let ptr = base + (spacing * self.cursor);
self.cursor += 1;
Some(mem::transmute(slice::from_raw_parts(*(ptr as *const *const u8) as *const u8,
len)))
Some(mem::transmute(slice::from_raw_parts(
*(ptr as *const *const u8) as *const u8,
len,
)))
}
}
}
......@@ -147,17 +152,18 @@ impl<'a> Iterator for &'a mut MergeOperands {
#[cfg(test)]
mod test {
use rocksdb::{DB, DBVector, Writable};
use rocksdb_options::{DBOptions, ColumnFamilyOptions};
use super::*;
use rocksdb::{DBVector, Writable, DB};
use rocksdb_options::{ColumnFamilyOptions, DBOptions};
use tempdir::TempDir;
#[allow(unused_variables)]
#[allow(dead_code)]
fn test_provided_merge(new_key: &[u8],
existing_val: Option<&[u8]>,
operands: &mut MergeOperands)
-> Vec<u8> {
fn test_provided_merge(
new_key: &[u8],
existing_val: Option<&[u8]>,
operands: &mut MergeOperands,
) -> Vec<u8> {
let nops = operands.size_hint().0;
let mut result: Vec<u8> = Vec::with_capacity(nops);
if let Some(v) = existing_val {
......@@ -181,11 +187,12 @@ mod test {
opts.create_if_missing(true);
let mut cf_opts = ColumnFamilyOptions::new();
cf_opts.add_merge_operator("test operator", test_provided_merge);
let db = DB::open_cf(opts,
path.path().to_str().unwrap(),
vec!["default"],
vec![cf_opts])
.unwrap();
let db = DB::open_cf(
opts,
path.path().to_str().unwrap(),
vec!["default"],
vec![cf_opts],
).unwrap();
let p = db.put(b"k1", b"a");
assert!(p.is_ok());
let _ = db.merge(b"k1", b"b");
......@@ -195,12 +202,10 @@ mod test {
let m = db.merge(b"k1", b"h");
assert!(m.is_ok());
match db.get(b"k1") {
Ok(Some(value)) => {
match value.to_utf8() {
Some(v) => println!("retrieved utf8 value: {}", v),
None => println!("did not read valid utf-8 out of the db"),
}
}
Ok(Some(value)) => match value.to_utf8() {
Some(v) => println!("retrieved utf8 value: {}", v),
None => println!("did not read valid utf-8 out of the db"),
},
Err(e) => println!("error reading value {:?}", e),
_ => panic!("value not present"),
}
......
This diff is collapsed.
This diff is collapsed.
......@@ -13,7 +13,7 @@
use crocksdb_ffi::{self, DBSliceTransform};
use libc::{c_void, c_char, size_t};
use libc::{c_char, c_void, size_t};
use std::ffi::CString;
use std::slice;
......@@ -54,11 +54,12 @@ extern "C" fn destructor(transform: *mut c_void) {
}
}
extern "C" fn transform(transform: *mut c_void,
key: *const u8,
key_len: size_t,
dest_len: *mut size_t)
-> *const u8 {
extern "C" fn transform(
transform: *mut c_void,
key: *const u8,
key_len: size_t,
dest_len: *mut size_t,
) -> *const u8 {
unsafe {
let transform = &mut *(transform as *mut SliceTransformProxy);
let key = slice::from_raw_parts(key, key_len);
......@@ -85,18 +86,21 @@ extern "C" fn in_range(transform: *mut c_void, key: *const u8, key_len: size_t)
}
pub unsafe fn new_slice_transform(c_name: CString,
f: Box<SliceTransform>)
-> Result<*mut DBSliceTransform, String> {
pub unsafe fn new_slice_transform(
c_name: CString,
f: Box<SliceTransform>,
) -> Result<*mut DBSliceTransform, String> {
let proxy = Box::into_raw(Box::new(SliceTransformProxy {
name: c_name,
transform: f,
}));
let transform = crocksdb_ffi::crocksdb_slicetransform_create(proxy as *mut c_void,
destructor,
transform,
in_domain,
in_range,
name);
let transform = crocksdb_ffi::crocksdb_slicetransform_create(
proxy as *mut c_void,
destructor,
transform,
in_domain,
in_range,
name,
);
Ok(transform)
}
......@@ -12,20 +12,21 @@
// limitations under the License.
use crocksdb_ffi::{self, DBTableProperties, DBTableProperty, DBUserCollectedPropertiesIterator,
DBTablePropertiesCollection, DBTablePropertiesCollectionIterator,
DBUserCollectedProperties};
use crocksdb_ffi::{self, DBTableProperties, DBTablePropertiesCollection,
DBTablePropertiesCollectionIterator, DBTableProperty,
DBUserCollectedProperties, DBUserCollectedPropertiesIterator};
use libc::size_t;
use std::{slice, str, mem};
use std::{mem, slice, str};
use std::marker::PhantomData;
use std::ops::{Index, Deref};
use std::ops::{Deref, Index};
pub struct TablePropertiesCollectionView(DBTablePropertiesCollection);
impl TablePropertiesCollectionView {
pub unsafe fn from_ptr<'a>(collection: *const DBTablePropertiesCollection)
-> &'a TablePropertiesCollectionView {
pub unsafe fn from_ptr<'a>(
collection: *const DBTablePropertiesCollection,
) -> &'a TablePropertiesCollectionView {
let c = &*collection;
mem::transmute(c)
}
......@@ -86,8 +87,8 @@ impl<'a> Iterator for TablePropertiesCollectionIter<'a> {
}
let mut klen: size_t = 0;
let k = crocksdb_ffi::crocksdb_table_properties_collection_iter_key(self.inner,
&mut klen);
let k =
crocksdb_ffi::crocksdb_table_properties_collection_iter_key(self.inner, &mut klen);
let bytes = slice::from_raw_parts(k, klen);
let key = str::from_utf8(bytes).unwrap();
let props = crocksdb_ffi::crocksdb_table_properties_collection_iter_value(self.inner);
......@@ -237,10 +238,12 @@ impl UserCollectedProperties {
let bytes = index.as_ref();
let mut size = 0;
unsafe {
let ptr = crocksdb_ffi::crocksdb_user_collected_properties_get(&self.inner,
bytes.as_ptr(),
bytes.len(),
&mut size);
let ptr = crocksdb_ffi::crocksdb_user_collected_properties_get(
&self.inner,
bytes.as_ptr(),
bytes.len(),
&mut size,
);
if ptr.is_null() {
return None;
}
......@@ -262,7 +265,8 @@ impl<Q: AsRef<[u8]>> Index<Q> for UserCollectedProperties {
fn index(&self, index: Q) -> &[u8] {
let key = index.as_ref();
self.get(key).unwrap_or_else(|| panic!("no entry found for key {:?}", key))
self.get(key)
.unwrap_or_else(|| panic!("no entry found for key {:?}", key))
}
}
......@@ -308,13 +312,13 @@ impl<'a> Iterator for UserCollectedPropertiesIter<'a> {
return None;
}
let mut klen: size_t = 0;
let k = crocksdb_ffi::crocksdb_user_collected_properties_iter_key(self.inner,
&mut klen);
let k =
crocksdb_ffi::crocksdb_user_collected_properties_iter_key(self.inner, &mut klen);
let key = slice::from_raw_parts(k, klen);
let mut vlen: size_t = 0;
let v = crocksdb_ffi::crocksdb_user_collected_properties_iter_value(self.inner,
&mut vlen);
let v =
crocksdb_ffi::crocksdb_user_collected_properties_iter_value(self.inner, &mut vlen);
let val = slice::from_raw_parts(v, vlen);
crocksdb_ffi::crocksdb_user_collected_properties_iter_next(self.inner);
......
......@@ -11,8 +11,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use crocksdb_ffi::{self, DBEntryType, DBUserCollectedProperties, DBTablePropertiesCollector};
use libc::{c_void, c_char, c_int, uint8_t, uint64_t, size_t};
use crocksdb_ffi::{self, DBEntryType, DBTablePropertiesCollector, DBUserCollectedProperties};
use libc::{c_char, c_int, c_void, size_t, uint64_t, uint8_t};
use std::collections::HashMap;
use std::ffi::CString;
use std::mem;
......@@ -26,12 +26,7 @@ use std::slice;
/// TablePropertiesCollector object per table and then call it sequentially
pub trait TablePropertiesCollector {
/// Will be called when a new key/value pair is inserted into the table.
fn add(&mut self,
key: &[u8],
value: &[u8],
entry_type: DBEntryType,
seq: u64,
file_size: u64);
fn add(&mut self, key: &[u8], value: &[u8], entry_type: DBEntryType, seq: u64, file_size: u64);
/// Will be called when a table has already been built and is ready for
/// writing the properties block.
......@@ -65,19 +60,23 @@ extern "C" fn destruct(handle: *mut c_void) {
}
}
pub extern "C" fn add(handle: *mut c_void,
key: *const uint8_t,
key_len: size_t,
value: *const uint8_t,
value_len: size_t,
entry_type: c_int,
seq: uint64_t,
file_size: uint64_t) {
pub extern "C" fn add(
handle: *mut c_void,
key: *const uint8_t,
key_len: size_t,
value: *const uint8_t,
value_len: size_t,
entry_type: c_int,
seq: uint64_t,
file_size: uint64_t,
) {
unsafe {
let handle = &mut *(handle as *mut TablePropertiesCollectorHandle);
let key = slice::from_raw_parts(key, key_len);
let value = slice::from_raw_parts(value, value_len);
handle.rep.add(key, value, mem::transmute(entry_type), seq, file_size);
handle
.rep
.add(key, value, mem::transmute(entry_type), seq, file_size);
}
}
......@@ -85,18 +84,21 @@ pub extern "C" fn finish(handle: *mut c_void, props: *mut DBUserCollectedPropert
unsafe {
let handle = &mut *(handle as *mut TablePropertiesCollectorHandle);
for (key, value) in handle.rep.finish() {
crocksdb_ffi::crocksdb_user_collected_properties_add(props,
key.as_ptr(),
key.len(),
value.as_ptr(),
value.len());
crocksdb_ffi::crocksdb_user_collected_properties_add(
props,
key.as_ptr(),
key.len(),
value.as_ptr(),
value.len(),
);
}
}
}
pub unsafe fn new_table_properties_collector(cname: &str,
collector: Box<TablePropertiesCollector>)
-> *mut DBTablePropertiesCollector {
pub unsafe fn new_table_properties_collector(
cname: &str,
collector: Box<TablePropertiesCollector>,
) -> *mut DBTablePropertiesCollector {
let handle = TablePropertiesCollectorHandle::new(cname, collector);
crocksdb_ffi::crocksdb_table_properties_collector_create(
Box::into_raw(Box::new(handle)) as *mut c_void,
......
......@@ -12,9 +12,9 @@
// limitations under the License.
use crocksdb_ffi::{self, DBTablePropertiesCollector, DBTablePropertiesCollectorFactory};
use libc::{c_void, c_char, uint32_t};
use libc::{c_char, c_void, uint32_t};
use std::ffi::CString;
use table_properties_collector::{TablePropertiesCollector, new_table_properties_collector};
use table_properties_collector::{new_table_properties_collector, TablePropertiesCollector};
/// Constructs `TablePropertiesCollector`.
/// Internals create a new `TablePropertiesCollector` for each new table.
......@@ -29,9 +29,10 @@ struct TablePropertiesCollectorFactoryHandle {
}
impl TablePropertiesCollectorFactoryHandle {
fn new(name: &str,
rep: Box<TablePropertiesCollectorFactory>)
-> TablePropertiesCollectorFactoryHandle {
fn new(
name: &str,
rep: Box<TablePropertiesCollectorFactory>,
) -> TablePropertiesCollectorFactoryHandle {
TablePropertiesCollectorFactoryHandle {
name: CString::new(name).unwrap(),
rep: rep,
......@@ -52,9 +53,10 @@ extern "C" fn destruct(handle: *mut c_void) {
}
}
extern "C" fn create_table_properties_collector(handle: *mut c_void,
cf: uint32_t)
-> *mut DBTablePropertiesCollector {
extern "C" fn create_table_properties_collector(
handle: *mut c_void,
cf: uint32_t,
) -> *mut DBTablePropertiesCollector {
unsafe {
let handle = &mut *(handle as *mut TablePropertiesCollectorFactoryHandle);
let collector = handle.rep.create_table_properties_collector(cf);
......@@ -62,14 +64,15 @@ extern "C" fn create_table_properties_collector(handle: *mut c_void,
}
}
pub unsafe fn new_table_properties_collector_factory
(fname: &str, factory: Box<TablePropertiesCollectorFactory>)
-> *mut DBTablePropertiesCollectorFactory {
pub unsafe fn new_table_properties_collector_factory(
fname: &str,
factory: Box<TablePropertiesCollectorFactory>,
) -> *mut DBTablePropertiesCollectorFactory {
let handle = TablePropertiesCollectorFactoryHandle::new(fname, factory);
crocksdb_ffi::crocksdb_table_properties_collector_factory_create(
Box::into_raw(Box::new(handle)) as *mut c_void,
name,
destruct,
create_table_properties_collector,
Box::into_raw(Box::new(handle)) as *mut c_void,
name,
destruct,
create_table_properties_collector,
)
}
......@@ -13,7 +13,7 @@
// limitations under the License.
//
use rocksdb::{DB, MergeOperands, DBOptions, ColumnFamilyOptions, Writable};
use rocksdb::{ColumnFamilyOptions, DBOptions, MergeOperands, Writable, DB};
use tempdir::TempDir;
#[test]
......@@ -43,15 +43,15 @@ pub fn test_column_family() {
let mut cf_opts = ColumnFamilyOptions::new();
cf_opts.add_merge_operator("test operator", test_provided_merge);
match DB::open_cf(DBOptions::new(), path_str, vec!["default"], vec![cf_opts]) {
Ok(_) => {
panic!("should not have opened DB successfully without \
Ok(_) => panic!(
"should not have opened DB successfully without \
specifying column
families")
}
Err(e) => {
assert!(e.starts_with("Invalid argument: You have to open \
all column families."))
}
families"
),
Err(e) => assert!(e.starts_with(
"Invalid argument: You have to open \
all column families."
)),
}
}
......@@ -77,11 +77,7 @@ pub fn test_column_family() {
};
let cf1 = db.cf_handle("cf1").unwrap();
assert!(db.put_cf(cf1, b"k1", b"v1").is_ok());
assert!(db.get_cf(cf1, b"k1")
.unwrap()
.unwrap()
.to_utf8()
.unwrap() == "v1");
assert!(db.get_cf(cf1, b"k1").unwrap().unwrap().to_utf8().unwrap() == "v1");
let p = db.put_cf(cf1, b"k1", b"a");
assert!(p.is_ok());
/*
......@@ -117,11 +113,12 @@ pub fn test_column_family() {
{}
// should be able to drop a cf
{
let mut db = DB::open_cf(DBOptions::new(),
path_str,
vec!["cf1"],
vec![ColumnFamilyOptions::new()])
.unwrap();
let mut db = DB::open_cf(
DBOptions::new(),
path_str,
vec!["cf1"],
vec![ColumnFamilyOptions::new()],
).unwrap();
match db.drop_cf("cf1") {
Ok(_) => println!("cf1 successfully dropped."),
Err(e) => panic!("failed to drop column family: {}", e),
......@@ -129,18 +126,17 @@ pub fn test_column_family() {
}
}
fn test_provided_merge(_: &[u8],
existing_val: Option<&[u8]>,
operands: &mut MergeOperands)
-> Vec<u8> {
fn test_provided_merge(
_: &[u8],
existing_val: Option<&[u8]>,
operands: &mut MergeOperands,
) -> Vec<u8> {
let nops = operands.size_hint().0;
let mut result: Vec<u8> = Vec::with_capacity(nops);
match existing_val {
Some(v) => {
for e in v {
result.push(*e);
}
}
Some(v) => for e in v {
result.push(*e);
},
None => (),
}
for op in operands {
......
......@@ -11,7 +11,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use rocksdb::{DB, DBOptions, Range, Writable};
use rocksdb::{DBOptions, Range, Writable, DB};
use tempdir::TempDir;
......@@ -21,11 +21,13 @@ fn test_compact_range() {
let mut opts = DBOptions::new();
opts.create_if_missing(true);
let db = DB::open(opts, path.path().to_str().unwrap()).unwrap();
let samples = vec![(b"k1".to_vec(), b"value--------1".to_vec()),
(b"k2".to_vec(), b"value--------2".to_vec()),
(b"k3".to_vec(), b"value--------3".to_vec()),
(b"k4".to_vec(), b"value--------4".to_vec()),
(b"k5".to_vec(), b"value--------5".to_vec())];
let samples = vec![
(b"k1".to_vec(), b"value--------1".to_vec()),
(b"k2".to_vec(), b"value--------2".to_vec()),
(b"k3".to_vec(), b"value--------3".to_vec()),
(b"k4".to_vec(), b"value--------4".to_vec()),
(b"k5".to_vec(), b"value--------5".to_vec()),
];
for &(ref k, ref v) in &samples {
db.put(k, v).unwrap();
assert_eq!(v.as_slice(), &*db.get(k).unwrap().unwrap());
......
......@@ -11,7 +11,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use rocksdb::{Writable, DB, CompactionFilter, DBOptions, ColumnFamilyOptions};
use rocksdb::{ColumnFamilyOptions, CompactionFilter, DBOptions, Writable, DB};
use std::sync::{Arc, RwLock};
use std::sync::atomic::{AtomicBool, Ordering};
use tempdir::TempDir;
......@@ -44,22 +44,28 @@ fn test_compaction_filter() {
let drop_called = Arc::new(AtomicBool::new(false));
let filtered_kvs = Arc::new(RwLock::new(vec![]));
// set ignore_snapshots to false
cf_opts.set_compaction_filter("test",
false,
Box::new(Filter {
drop_called: drop_called.clone(),
filtered_kvs: filtered_kvs.clone(),
}))
cf_opts
.set_compaction_filter(
"test",
false,
Box::new(Filter {
drop_called: drop_called.clone(),
filtered_kvs: filtered_kvs.clone(),
}),
)
.unwrap();
let mut opts = DBOptions::new();
opts.create_if_missing(true);
let db = DB::open_cf(opts,
path.path().to_str().unwrap(),
vec!["default"],
vec![cf_opts])
.unwrap();
let samples = vec![(b"key1".to_vec(), b"value1".to_vec()),
(b"key2".to_vec(), b"value2".to_vec())];
let db = DB::open_cf(
opts,
path.path().to_str().unwrap(),
vec!["default"],
vec![cf_opts],
).unwrap();
let samples = vec![
(b"key1".to_vec(), b"value1".to_vec()),
(b"key2".to_vec(), b"value2".to_vec()),
];
for &(ref k, ref v) in &samples {
db.put(k, v).unwrap();
assert_eq!(v.as_slice(), &*db.get(k).unwrap().unwrap());
......@@ -79,21 +85,25 @@ fn test_compaction_filter() {
// reregister with ignore_snapshots set to true
let mut cf_opts = ColumnFamilyOptions::new();
let opts = DBOptions::new();
cf_opts.set_compaction_filter("test",
true,
Box::new(Filter {
drop_called: drop_called.clone(),
filtered_kvs: filtered_kvs.clone(),
}))
cf_opts
.set_compaction_filter(
"test",
true,
Box::new(Filter {
drop_called: drop_called.clone(),
filtered_kvs: filtered_kvs.clone(),
}),
)
.unwrap();
assert!(drop_called.load(Ordering::Relaxed));
drop_called.store(false, Ordering::Relaxed);
{
let db = DB::open_cf(opts,
path.path().to_str().unwrap(),
vec!["default"],
vec![cf_opts])
.unwrap();
let db = DB::open_cf(
opts,
path.path().to_str().unwrap(),
vec!["default"],
vec![cf_opts],
).unwrap();
let _snap = db.snapshot();
// Because ignore_snapshots is true, so all the keys will be compacted.
db.compact_range(Some(b"key1"), Some(b"key3"));
......
This diff is collapsed.
......@@ -86,17 +86,19 @@ fn test_event_listener_basic() {
opts.create_if_missing(true);
let db = DB::open(opts, path_str).unwrap();
for i in 1..8000 {
db.put(format!("{:04}", i).as_bytes(),
format!("{:04}", i).as_bytes())
.unwrap();
db.put(
format!("{:04}", i).as_bytes(),
format!("{:04}", i).as_bytes(),
).unwrap();
}
db.flush(true).unwrap();
assert_ne!(counter.flush.load(Ordering::SeqCst), 0);
for i in 1..8000 {
db.put(format!("{:04}", i).as_bytes(),
format!("{:04}", i).as_bytes())
.unwrap();
db.put(
format!("{:04}", i).as_bytes(),
format!("{:04}", i).as_bytes(),
).unwrap();
}
db.flush(true).unwrap();
let flush_cnt = counter.flush.load(Ordering::SeqCst);
......@@ -125,10 +127,12 @@ fn test_event_listener_ingestion() {
let test_sstfile_str = test_sstfile.to_str().unwrap();
let default_options = db.get_options();
gen_sst(default_options,
Some(db.cf_handle("default").unwrap()),
test_sstfile_str,
&[(b"k1", b"v1"), (b"k2", b"v2")]);
gen_sst(
default_options,
Some(db.cf_handle("default").unwrap()),
test_sstfile_str,
&[(b"k1", b"v1"), (b"k2", b"v2")],
);
let ingest_opt = IngestExternalFileOptions::new();
db.ingest_external_file(&ingest_opt, &[test_sstfile_str])
......
This diff is collapsed.
This diff is collapsed.
......@@ -11,7 +11,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use rocksdb::{DB, Writable};
use rocksdb::{Writable, DB};
use std::sync::Arc;
use std::thread;
use tempdir::TempDir;
......@@ -40,11 +40,9 @@ pub fn test_multithreaded() {
let db3 = db.clone();
let j3 = thread::spawn(move || for _ in 1..N {
match db3.get(b"key") {
Ok(Some(v)) => {
if &v[..] != b"value1" && &v[..] != b"value2" {
assert!(false);
}
}
Ok(Some(v)) => if &v[..] != b"value1" && &v[..] != b"value2" {
assert!(false);
},
_ => {
assert!(false);
}
......
......@@ -31,8 +31,17 @@ impl SliceTransform for FixedPrefixTransform {
#[test]
fn test_prefix_extractor_compatibility() {
let path = TempDir::new("_rust_rocksdb_prefix_extractor_compatibility").expect("");
let keys = vec![b"k1-0", b"k1-1", b"k1-2", b"k1-3", b"k1-4", b"k1-5", b"k1-6", b"k1-7",
b"k1-8"];
let keys = vec![
b"k1-0",
b"k1-1",
b"k1-2",
b"k1-3",
b"k1-4",
b"k1-5",
b"k1-6",
b"k1-7",
b"k1-8",
];
// create db with no prefix extractor, and insert data
{
......@@ -57,16 +66,20 @@ fn test_prefix_extractor_compatibility() {
let mut cf_opts = ColumnFamilyOptions::new();
opts.create_if_missing(false);
cf_opts.set_block_based_table_factory(&bbto);
cf_opts.set_prefix_extractor("FixedPrefixTransform",
Box::new(FixedPrefixTransform { prefix_len: 2 }))
cf_opts
.set_prefix_extractor(
"FixedPrefixTransform",
Box::new(FixedPrefixTransform { prefix_len: 2 }),
)
.unwrap();
// also create prefix bloom for memtable
cf_opts.set_memtable_prefix_bloom_size_ratio(0.1 as f64);
let db = DB::open_cf(opts,
path.path().to_str().unwrap(),
vec!["default"],
vec![cf_opts])
.unwrap();
let db = DB::open_cf(
opts,
path.path().to_str().unwrap(),
vec!["default"],
vec![cf_opts],
).unwrap();
let wopts = WriteOptions::new();
// sst2 with prefix bloom.
......
This diff is collapsed.
This diff is collapsed.
......@@ -12,7 +12,7 @@
// limitations under the License.
use rocksdb::*;
use rocksdb::{DBStatisticsTickerType as TickerType, DBStatisticsHistogramType as HistogramType};
use rocksdb::{DBStatisticsHistogramType as HistogramType, DBStatisticsTickerType as TickerType};
use tempdir::TempDir;
#[test]
......@@ -35,8 +35,12 @@ fn test_db_statistics() {
assert!(db.get_statistics_ticker_count(TickerType::BlockCacheHit) > 0);
assert!(db.get_and_reset_statistics_ticker_count(TickerType::BlockCacheHit) > 0);
assert_eq!(db.get_statistics_ticker_count(TickerType::BlockCacheHit), 0);
assert!(db.get_statistics_histogram_string(HistogramType::GetMicros)
.is_some());
assert!(db.get_statistics_histogram(HistogramType::GetMicros)
.is_some());
assert!(
db.get_statistics_histogram_string(HistogramType::GetMicros)
.is_some()
);
assert!(
db.get_statistics_histogram(HistogramType::GetMicros)
.is_some()
);
}
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment