Commit bde37b67 authored by siddontang's avatar siddontang

merge master and fix conflict

parents b87bb3f2 a22a722c
...@@ -7,3 +7,7 @@ Cargo.lock ...@@ -7,3 +7,7 @@ Cargo.lock
_rust_rocksdb* _rust_rocksdb*
*rlib *rlib
tags tags
.idea/
out/
*.iml
\ No newline at end of file
...@@ -25,3 +25,5 @@ path = "test/test.rs" ...@@ -25,3 +25,5 @@ path = "test/test.rs"
[dependencies] [dependencies]
libc = "0.1.8" libc = "0.1.8"
tempdir = "0.3.4"
clippy = "*"
...@@ -12,8 +12,7 @@ ...@@ -12,8 +12,7 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
// //
extern crate libc; use libc::{c_char, c_int, c_void, size_t};
use self::libc::{c_char, c_int, c_void, size_t};
use std::ffi::CString; use std::ffi::CString;
use std::mem; use std::mem;
use std::slice; use std::slice;
......
...@@ -12,8 +12,7 @@ ...@@ -12,8 +12,7 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
// //
extern crate libc; use libc::{self, c_char, c_int, c_void, size_t, uint64_t};
use self::libc::{c_char, c_int, c_void, size_t};
use std::ffi::CStr; use std::ffi::CStr;
use std::str::from_utf8; use std::str::from_utf8;
...@@ -56,6 +55,9 @@ pub struct DBWriteBatch(pub *const c_void); ...@@ -56,6 +55,9 @@ pub struct DBWriteBatch(pub *const c_void);
#[derive(Copy, Clone)] #[derive(Copy, Clone)]
#[repr(C)] #[repr(C)]
pub struct DBComparator(pub *const c_void); pub struct DBComparator(pub *const c_void);
#[derive(Copy, Clone)]
#[repr(C)]
pub struct DBFlushOptions(pub *const c_void);
pub fn new_bloom_filter(bits: c_int) -> DBFilterPolicy { pub fn new_bloom_filter(bits: c_int) -> DBFilterPolicy {
unsafe { rocksdb_filterpolicy_create_bloom(bits) } unsafe { rocksdb_filterpolicy_create_bloom(bits) }
...@@ -67,19 +69,19 @@ pub fn new_cache(capacity: size_t) -> DBCache { ...@@ -67,19 +69,19 @@ pub fn new_cache(capacity: size_t) -> DBCache {
#[repr(C)] #[repr(C)]
pub enum DBCompressionType { pub enum DBCompressionType {
DBNoCompression = 0, DBNo = 0,
DBSnappyCompression = 1, DBSnappy = 1,
DBZlibCompression = 2, DBZlib = 2,
DBBz2Compression = 3, DBBz2 = 3,
DBLz4Compression = 4, DBLz4 = 4,
DBLz4hcCompression = 5, DBLz4hc = 5,
} }
#[repr(C)] #[repr(C)]
pub enum DBCompactionStyle { pub enum DBCompactionStyle {
DBLevelCompaction = 0, DBLevel = 0,
DBUniversalCompaction = 1, DBUniversal = 1,
DBFifoCompaction = 2, DBFifo = 2,
} }
#[repr(C)] #[repr(C)]
...@@ -92,7 +94,7 @@ pub fn error_message(ptr: *const i8) -> String { ...@@ -92,7 +94,7 @@ pub fn error_message(ptr: *const i8) -> String {
let c_str = unsafe { CStr::from_ptr(ptr as *const _) }; let c_str = unsafe { CStr::from_ptr(ptr as *const _) };
let s = from_utf8(c_str.to_bytes()).unwrap().to_owned(); let s = from_utf8(c_str.to_bytes()).unwrap().to_owned();
unsafe { unsafe {
libc::free(ptr as *mut libc::c_void); libc::free(ptr as *mut c_void);
} }
s s
} }
...@@ -410,61 +412,108 @@ extern "C" { ...@@ -410,61 +412,108 @@ extern "C" {
err: *mut *const i8); err: *mut *const i8);
pub fn rocksdb_column_family_handle_destroy(column_family_handle: DBCFHandle); pub fn rocksdb_column_family_handle_destroy(column_family_handle: DBCFHandle);
// Flush options
pub fn rocksdb_flushoptions_create() -> DBFlushOptions;
pub fn rocksdb_flushoptions_destroy(opt: DBFlushOptions);
pub fn rocksdb_flushoptions_set_wait(opt: DBFlushOptions,
whether_wait: bool);
pub fn rocksdb_flush(db: DBInstance,
options: DBFlushOptions,
err: *mut *const i8);
pub fn rocksdb_approximate_sizes(db: DBInstance,
num_ranges: c_int,
range_start_key: *const *const u8,
range_start_key_len: *const size_t,
range_limit_key: *const *const u8,
range_limit_key_len: *const size_t,
sizes: *mut uint64_t);
pub fn rocksdb_approximate_sizes_cf(db: DBInstance,
cf: DBCFHandle,
num_ranges: c_int,
range_start_key: *const *const u8,
range_start_key_len: *const size_t,
range_limit_key: *const *const u8,
range_limit_key_len: *const size_t,
sizes: *mut uint64_t);
} }
#[test] #[cfg(test)]
fn internal() { mod test {
unsafe { use super::*;
use std::ffi::CString; use std::ffi::CString;
let opts = rocksdb_options_create(); use tempdir::TempDir;
assert!(!opts.0.is_null());
rocksdb_options_increase_parallelism(opts, 0); #[test]
rocksdb_options_optimize_level_style_compaction(opts, 0); fn internal() {
rocksdb_options_set_create_if_missing(opts, true); unsafe {
let opts = rocksdb_options_create();
assert!(!opts.0.is_null());
let rustpath = "_rust_rocksdb_internaltest"; rocksdb_options_increase_parallelism(opts, 0);
let cpath = CString::new(rustpath).unwrap(); rocksdb_options_optimize_level_style_compaction(opts, 0);
let cpath_ptr = cpath.as_ptr(); rocksdb_options_set_create_if_missing(opts, true);
let mut err: *const i8 = 0 as *const i8; let rustpath = TempDir::new("_rust_rocksdb_internaltest")
let err_ptr: *mut *const i8 = &mut err; .expect("");
let db = rocksdb_open(opts, cpath_ptr, err_ptr); let cpath = CString::new(rustpath.path().to_str().unwrap())
if !err.is_null() { .unwrap();
println!("failed to open rocksdb: {}", error_message(err)); let cpath_ptr = cpath.as_ptr();
}
assert!(err.is_null()); let mut err = 0 as *const i8;
let db = rocksdb_open(opts, cpath_ptr, &mut err);
assert!(err.is_null(), error_message(err));
let writeopts = rocksdb_writeoptions_create();
assert!(!writeopts.0.is_null());
let writeopts = rocksdb_writeoptions_create(); let key = b"name\x00";
assert!(!writeopts.0.is_null()); let val = b"spacejam\x00";
rocksdb_put(db,
writeopts.clone(),
key.as_ptr(),
4,
val.as_ptr(),
8,
&mut err);
rocksdb_writeoptions_destroy(writeopts);
assert!(err.is_null(), error_message(err));
let key = b"name\x00"; let readopts = rocksdb_readoptions_create();
let val = b"spacejam\x00"; assert!(!readopts.0.is_null());
rocksdb_put(db,
writeopts.clone(),
key.as_ptr(),
4,
val.as_ptr(),
8,
err_ptr);
rocksdb_writeoptions_destroy(writeopts);
assert!(err.is_null());
let readopts = rocksdb_readoptions_create(); let mut val_len = 0;
assert!(!readopts.0.is_null()); rocksdb_get(db,
readopts.clone(),
key.as_ptr(),
4,
&mut val_len,
&mut err);
rocksdb_readoptions_destroy(readopts);
assert!(err.is_null(), error_message(err));
let val_len: size_t = 0; // flush first to get approximate size later.
let val_len_ptr = &val_len as *const size_t; let flush_opt = rocksdb_flushoptions_create();
rocksdb_get(db, rocksdb_flushoptions_set_wait(flush_opt, true);
readopts.clone(), rocksdb_flush(db, flush_opt, &mut err);
key.as_ptr(), rocksdb_flushoptions_destroy(flush_opt);
4, assert!(err.is_null(), error_message(err));
val_len_ptr,
err_ptr); let mut sizes = vec![0; 1];
rocksdb_readoptions_destroy(readopts); rocksdb_approximate_sizes(db,
assert!(err.is_null()); 1,
rocksdb_close(db); vec![b"\x00\x00".as_ptr()].as_ptr(),
rocksdb_destroy_db(opts, cpath_ptr, err_ptr); vec![1].as_ptr(),
assert!(err.is_null()); vec![b"\xff\x00".as_ptr()].as_ptr(),
vec![1].as_ptr(),
sizes.as_mut_ptr());
assert_eq!(sizes.len(), 1);
assert!(sizes[0] > 0);
rocksdb_close(db);
rocksdb_destroy_db(opts, cpath_ptr, &mut err);
assert!(err.is_null());
}
} }
} }
...@@ -12,14 +12,23 @@ ...@@ -12,14 +12,23 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
// //
pub use ffi as rocksdb_ffi; #![feature(plugin)]
pub use ffi::{DBCompactionStyle, DBComparator, new_bloom_filter}; #![plugin(clippy)]
pub use rocksdb::{DB, DBIterator, DBVector, Direction, IteratorMode, Writable,
WriteBatch}; extern crate libc;
pub use rocksdb_options::{BlockBasedOptions, Options, WriteOptions};
pub use merge_operator::MergeOperands; #[cfg(test)]
extern crate tempdir;
pub mod rocksdb; pub mod rocksdb;
pub mod ffi; pub mod ffi;
pub mod rocksdb_options; pub mod rocksdb_options;
pub mod merge_operator; pub mod merge_operator;
pub mod comparator; pub mod comparator;
pub use ffi::{DBCompactionStyle, DBComparator, new_bloom_filter,
self as rocksdb_ffi};
pub use rocksdb::{DB, DBIterator, DBVector, Direction, IteratorMode, Writable,
WriteBatch};
pub use rocksdb_options::{BlockBasedOptions, Options, WriteOptions};
pub use merge_operator::MergeOperands;
...@@ -144,8 +144,9 @@ fn main() { ...@@ -144,8 +144,9 @@ fn main() {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use rocksdb::{BlockBasedOptions, DB, Options}; use rocksdb::{BlockBasedOptions, DB, Options};
use rocksdb::DBCompactionStyle::DBUniversalCompaction; use rocksdb::DBCompactionStyle::DBUniversal;
#[allow(dead_code)]
fn tuned_for_somebody_elses_disk(path: &str, fn tuned_for_somebody_elses_disk(path: &str,
opts: &mut Options, opts: &mut Options,
blockopts: &mut BlockBasedOptions) blockopts: &mut BlockBasedOptions)
...@@ -163,7 +164,7 @@ mod tests { ...@@ -163,7 +164,7 @@ mod tests {
opts.set_min_write_buffer_number_to_merge(4); opts.set_min_write_buffer_number_to_merge(4);
opts.set_level_zero_stop_writes_trigger(2000); opts.set_level_zero_stop_writes_trigger(2000);
opts.set_level_zero_slowdown_writes_trigger(0); opts.set_level_zero_slowdown_writes_trigger(0);
opts.set_compaction_style(DBUniversalCompaction); opts.set_compaction_style(DBUniversal);
opts.set_max_background_compactions(4); opts.set_max_background_compactions(4);
opts.set_max_background_flushes(4); opts.set_max_background_flushes(4);
opts.set_filter_deletes(false); opts.set_filter_deletes(false);
......
...@@ -12,8 +12,7 @@ ...@@ -12,8 +12,7 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
// //
extern crate libc; use libc::{self, c_char, c_int, c_void, size_t};
use self::libc::{c_char, c_int, c_void, size_t};
use std::ffi::CString; use std::ffi::CString;
use std::mem; use std::mem;
use std::ptr; use std::ptr;
...@@ -128,9 +127,10 @@ impl MergeOperands { ...@@ -128,9 +127,10 @@ impl MergeOperands {
impl<'a> Iterator for &'a mut MergeOperands { impl<'a> Iterator for &'a mut MergeOperands {
type Item = &'a [u8]; type Item = &'a [u8];
fn next(&mut self) -> Option<&'a [u8]> { fn next(&mut self) -> Option<&'a [u8]> {
match self.cursor == self.num_operands { if self.cursor == self.num_operands {
true => None, None
false => unsafe { } else {
unsafe {
let base = self.operands_list as usize; let base = self.operands_list as usize;
let base_len = self.operands_list_len as usize; let base_len = self.operands_list_len as usize;
let spacing = mem::size_of::<*const *const u8>(); let spacing = mem::size_of::<*const *const u8>();
...@@ -142,7 +142,7 @@ impl<'a> Iterator for &'a mut MergeOperands { ...@@ -142,7 +142,7 @@ impl<'a> Iterator for &'a mut MergeOperands {
self.cursor += 1; self.cursor += 1;
Some(mem::transmute(slice::from_raw_parts(*(ptr as *const *const u8) Some(mem::transmute(slice::from_raw_parts(*(ptr as *const *const u8)
as *const u8, len))) as *const u8, len)))
}, }
} }
} }
...@@ -152,43 +152,42 @@ impl<'a> Iterator for &'a mut MergeOperands { ...@@ -152,43 +152,42 @@ impl<'a> Iterator for &'a mut MergeOperands {
} }
} }
#[allow(unused_variables)] #[cfg(test)]
#[allow(dead_code)] mod test {
fn test_provided_merge(new_key: &[u8], use super::*;
existing_val: Option<&[u8]>, use rocksdb_options::Options;
operands: &mut MergeOperands) use rocksdb::{DB, DBVector, Writable};
-> Vec<u8> { use tempdir::TempDir;
let nops = operands.size_hint().0;
let mut result: Vec<u8> = Vec::with_capacity(nops); #[allow(unused_variables)]
match existing_val { #[allow(dead_code)]
Some(v) => { fn test_provided_merge(new_key: &[u8],
existing_val: Option<&[u8]>,
operands: &mut MergeOperands)
-> Vec<u8> {
let nops = operands.size_hint().0;
let mut result: Vec<u8> = Vec::with_capacity(nops);
if let Some(v) = existing_val {
for e in v { for e in v {
result.push(*e); result.push(*e);
} }
} }
None => (), for op in operands {
} for e in op {
for op in operands { result.push(*e);
for e in op { }
result.push(*e);
} }
result
} }
result
}
#[allow(dead_code)]
#[test]
fn mergetest() {
use rocksdb_options::Options;
use rocksdb::{DB, DBVector, Writable};
let path = "_rust_rocksdb_mergetest"; #[allow(dead_code)]
let mut opts = Options::new(); #[test]
opts.create_if_missing(true); fn mergetest() {
opts.add_merge_operator("test operator", test_provided_merge); let path = TempDir::new("_rust_rocksdb_mergetest").expect("");
{ let mut opts = Options::new();
let db = DB::open(&opts, path).unwrap(); opts.create_if_missing(true);
opts.add_merge_operator("test operator", test_provided_merge);
let db = DB::open(&opts, path.path().to_str().unwrap()).unwrap();
let p = db.put(b"k1", b"a"); let p = db.put(b"k1", b"a");
assert!(p.is_ok()); assert!(p.is_ok());
let _ = db.merge(b"k1", b"b"); let _ = db.merge(b"k1", b"b");
...@@ -204,7 +203,7 @@ fn mergetest() { ...@@ -204,7 +203,7 @@ fn mergetest() {
None => println!("did not read valid utf-8 out of the db"), None => println!("did not read valid utf-8 out of the db"),
} }
} }
Err(e) => println!("error reading value"), Err(e) => println!("error reading value {:?}", e),
_ => panic!("value not present"), _ => panic!("value not present"),
} }
...@@ -214,5 +213,4 @@ fn mergetest() { ...@@ -214,5 +213,4 @@ fn mergetest() {
assert!(db.delete(b"k1").is_ok()); assert!(db.delete(b"k1").is_ok());
assert!(db.get(b"k1").unwrap().is_none()); assert!(db.get(b"k1").unwrap().is_none());
} }
assert!(DB::destroy(&opts, path).is_ok());
} }
This diff is collapsed.
...@@ -12,8 +12,7 @@ ...@@ -12,8 +12,7 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
// //
extern crate libc; use libc::{c_int, size_t};
use self::libc::{c_int, size_t};
use std::ffi::CString; use std::ffi::CString;
use std::mem; use std::mem;
...@@ -136,12 +135,12 @@ impl Options { ...@@ -136,12 +135,12 @@ impl Options {
} }
} }
pub fn add_merge_operator<'a>(&mut self, pub fn add_merge_operator(&mut self,
name: &str, name: &str,
merge_fn: fn(&[u8], merge_fn: fn(&[u8],
Option<&[u8]>, Option<&[u8]>,
&mut MergeOperands) &mut MergeOperands)
-> Vec<u8>) { -> Vec<u8>) {
let cb = Box::new(MergeOperatorCallback { let cb = Box::new(MergeOperatorCallback {
name: CString::new(name.as_bytes()).unwrap(), name: CString::new(name.as_bytes()).unwrap(),
merge_fn: merge_fn, merge_fn: merge_fn,
...@@ -159,9 +158,9 @@ impl Options { ...@@ -159,9 +158,9 @@ impl Options {
} }
} }
pub fn add_comparator<'a>(&mut self, pub fn add_comparator(&mut self,
name: &str, name: &str,
compare_fn: fn(&[u8], &[u8]) -> i32) { compare_fn: fn(&[u8], &[u8]) -> i32) {
let cb = Box::new(ComparatorCallback { let cb = Box::new(ComparatorCallback {
name: CString::new(name.as_bytes()).unwrap(), name: CString::new(name.as_bytes()).unwrap(),
f: compare_fn, f: compare_fn,
...@@ -193,13 +192,10 @@ impl Options { ...@@ -193,13 +192,10 @@ impl Options {
pub fn set_use_fsync(&mut self, useit: bool) { pub fn set_use_fsync(&mut self, useit: bool) {
unsafe { unsafe {
match useit { if useit {
true => { rocksdb_ffi::rocksdb_options_set_use_fsync(self.inner, 1)
rocksdb_ffi::rocksdb_options_set_use_fsync(self.inner, 1) } else {
} rocksdb_ffi::rocksdb_options_set_use_fsync(self.inner, 0)
false => {
rocksdb_ffi::rocksdb_options_set_use_fsync(self.inner, 0)
}
} }
} }
} }
...@@ -212,13 +208,12 @@ impl Options { ...@@ -212,13 +208,12 @@ impl Options {
pub fn set_disable_data_sync(&mut self, disable: bool) { pub fn set_disable_data_sync(&mut self, disable: bool) {
unsafe { unsafe {
match disable { if disable {
true => rocksdb_ffi::rocksdb_options_set_disable_data_sync(self.inner,
rocksdb_ffi::rocksdb_options_set_disable_data_sync( 1);
self.inner, 1), } else {
false => rocksdb_ffi::rocksdb_options_set_disable_data_sync(self.inner,
rocksdb_ffi::rocksdb_options_set_disable_data_sync( 0);
self.inner, 0),
} }
} }
} }
......
extern crate rocksdb; extern crate rocksdb;
extern crate tempdir;
mod test_iterator; mod test_iterator;
mod test_multithreaded; mod test_multithreaded;
......
...@@ -13,17 +13,19 @@ ...@@ -13,17 +13,19 @@
// limitations under the License. // limitations under the License.
// //
use rocksdb::{DB, MergeOperands, Options, Writable}; use rocksdb::{DB, MergeOperands, Options, Writable};
use tempdir::TempDir;
#[test] #[test]
pub fn test_column_family() { pub fn test_column_family() {
let path = "_rust_rocksdb_cftest"; let path = TempDir::new("_rust_rocksdb_cftest").expect("");
let path_str = path.path().to_str().unwrap();
// should be able to create column families // should be able to create column families
{ {
let mut opts = Options::new(); let mut opts = Options::new();
opts.create_if_missing(true); opts.create_if_missing(true);
opts.add_merge_operator("test operator", test_provided_merge); opts.add_merge_operator("test operator", test_provided_merge);
let mut db = DB::open(&opts, path).unwrap(); let mut db = DB::open(&opts, path_str).unwrap();
let opts = Options::new(); let opts = Options::new();
match db.create_cf("cf1", &opts) { match db.create_cf("cf1", &opts) {
Ok(_) => println!("cf1 created successfully"), Ok(_) => println!("cf1 created successfully"),
...@@ -37,7 +39,7 @@ pub fn test_column_family() { ...@@ -37,7 +39,7 @@ pub fn test_column_family() {
{ {
let mut opts = Options::new(); let mut opts = Options::new();
opts.add_merge_operator("test operator", test_provided_merge); opts.add_merge_operator("test operator", test_provided_merge);
match DB::open(&opts, path) { match DB::open(&opts, path_str) {
Ok(_) => { Ok(_) => {
panic!("should not have opened DB successfully without \ panic!("should not have opened DB successfully without \
specifying column specifying column
...@@ -54,7 +56,7 @@ pub fn test_column_family() { ...@@ -54,7 +56,7 @@ pub fn test_column_family() {
{ {
let mut opts = Options::new(); let mut opts = Options::new();
opts.add_merge_operator("test operator", test_provided_merge); opts.add_merge_operator("test operator", test_provided_merge);
match DB::open_cf(&opts, path, &["cf1"]) { match DB::open_cf(&opts, path_str, &["cf1"]) {
Ok(_) => println!("successfully opened db with column family"), Ok(_) => println!("successfully opened db with column family"),
Err(e) => panic!("failed to open db with column family: {}", e), Err(e) => panic!("failed to open db with column family: {}", e),
} }
...@@ -63,7 +65,7 @@ pub fn test_column_family() { ...@@ -63,7 +65,7 @@ pub fn test_column_family() {
{ {
let mut opts = Options::new(); let mut opts = Options::new();
opts.add_merge_operator("test operator", test_provided_merge); opts.add_merge_operator("test operator", test_provided_merge);
let db = match DB::open_cf(&opts, path, &["cf1"]) { let db = match DB::open_cf(&opts, path_str, &["cf1"]) {
Ok(db) => { Ok(db) => {
println!("successfully opened db with column family"); println!("successfully opened db with column family");
db db
...@@ -76,6 +78,9 @@ pub fn test_column_family() { ...@@ -76,6 +78,9 @@ pub fn test_column_family() {
"v1"); "v1");
let p = db.put_cf(cf1, b"k1", b"a"); let p = db.put_cf(cf1, b"k1", b"a");
assert!(p.is_ok()); assert!(p.is_ok());
/*
// TODO support family merge operator
// have not finished yet, following codes won't work.
db.merge_cf(cf1, b"k1", b"b").unwrap(); db.merge_cf(cf1, b"k1", b"b").unwrap();
db.merge_cf(cf1, b"k1", b"c").unwrap(); db.merge_cf(cf1, b"k1", b"c").unwrap();
db.merge_cf(cf1, b"k1", b"d").unwrap(); db.merge_cf(cf1, b"k1", b"d").unwrap();
...@@ -98,6 +103,7 @@ pub fn test_column_family() { ...@@ -98,6 +103,7 @@ pub fn test_column_family() {
// TODO assert!(r.unwrap().to_utf8().unwrap() == "abcdefgh"); // TODO assert!(r.unwrap().to_utf8().unwrap() == "abcdefgh");
assert!(db.delete(b"k1").is_ok()); assert!(db.delete(b"k1").is_ok());
assert!(db.get(b"k1").unwrap().is_none()); assert!(db.get(b"k1").unwrap().is_none());
*/
} }
// TODO should be able to use writebatch ops with a cf // TODO should be able to use writebatch ops with a cf
{ {
...@@ -107,14 +113,12 @@ pub fn test_column_family() { ...@@ -107,14 +113,12 @@ pub fn test_column_family() {
} }
// should b able to drop a cf // should b able to drop a cf
{ {
let mut db = DB::open_cf(&Options::new(), path, &["cf1"]).unwrap(); let mut db = DB::open_cf(&Options::new(), path_str, &["cf1"]).unwrap();
match db.drop_cf("cf1") { match db.drop_cf("cf1") {
Ok(_) => println!("cf1 successfully dropped."), Ok(_) => println!("cf1 successfully dropped."),
Err(e) => panic!("failed to drop column family: {}", e), Err(e) => panic!("failed to drop column family: {}", e),
} }
} }
assert!(DB::destroy(&Options::new(), path).is_ok());
} }
fn test_provided_merge(_: &[u8], fn test_provided_merge(_: &[u8],
......
use rocksdb::{DB, Direction, IteratorMode, Options, Writable}; use rocksdb::{DB, Direction, IteratorMode, Writable};
use tempdir::TempDir;
fn cba(input: &Box<[u8]>) -> Box<[u8]> { fn cba(input: &Box<[u8]>) -> Box<[u8]> {
input.iter().cloned().collect::<Vec<_>>().into_boxed_slice() input.iter().cloned().collect::<Vec<_>>().into_boxed_slice()
...@@ -6,113 +7,94 @@ fn cba(input: &Box<[u8]>) -> Box<[u8]> { ...@@ -6,113 +7,94 @@ fn cba(input: &Box<[u8]>) -> Box<[u8]> {
#[test] #[test]
pub fn test_iterator() { pub fn test_iterator() {
let path = "_rust_rocksdb_iteratortest"; let path = TempDir::new("_rust_rocksdb_iteratortest").expect("");
{
let k1: Box<[u8]> = b"k1".to_vec().into_boxed_slice(); let k1: Box<[u8]> = b"k1".to_vec().into_boxed_slice();
let k2: Box<[u8]> = b"k2".to_vec().into_boxed_slice(); let k2: Box<[u8]> = b"k2".to_vec().into_boxed_slice();
let k3: Box<[u8]> = b"k3".to_vec().into_boxed_slice(); let k3: Box<[u8]> = b"k3".to_vec().into_boxed_slice();
let k4: Box<[u8]> = b"k4".to_vec().into_boxed_slice(); let k4: Box<[u8]> = b"k4".to_vec().into_boxed_slice();
let v1: Box<[u8]> = b"v1111".to_vec().into_boxed_slice(); let v1: Box<[u8]> = b"v1111".to_vec().into_boxed_slice();
let v2: Box<[u8]> = b"v2222".to_vec().into_boxed_slice(); let v2: Box<[u8]> = b"v2222".to_vec().into_boxed_slice();
let v3: Box<[u8]> = b"v3333".to_vec().into_boxed_slice(); let v3: Box<[u8]> = b"v3333".to_vec().into_boxed_slice();
let v4: Box<[u8]> = b"v4444".to_vec().into_boxed_slice(); let v4: Box<[u8]> = b"v4444".to_vec().into_boxed_slice();
let db = DB::open_default(path).unwrap(); let db = DB::open_default(path.path().to_str().unwrap()).unwrap();
let p = db.put(&*k1, &*v1); let p = db.put(&*k1, &*v1);
assert!(p.is_ok()); assert!(p.is_ok());
let p = db.put(&*k2, &*v2); let p = db.put(&*k2, &*v2);
assert!(p.is_ok()); assert!(p.is_ok());
let p = db.put(&*k3, &*v3); let p = db.put(&*k3, &*v3);
assert!(p.is_ok()); assert!(p.is_ok());
let expected = vec![(cba(&k1), cba(&v1)), let expected = vec![(cba(&k1), cba(&v1)),
(cba(&k2), cba(&v2)), (cba(&k2), cba(&v2)),
(cba(&k3), cba(&v3))]; (cba(&k3), cba(&v3))];
{
let iterator1 = db.iterator(IteratorMode::Start); let iterator1 = db.iterator(IteratorMode::Start);
assert_eq!(iterator1.collect::<Vec<_>>(), expected); assert_eq!(iterator1.collect::<Vec<_>>(), expected);
}
// Test that it's idempotent // Test that it's idempotent
{ let iterator1 = db.iterator(IteratorMode::Start);
let iterator1 = db.iterator(IteratorMode::Start); assert_eq!(iterator1.collect::<Vec<_>>(), expected);
assert_eq!(iterator1.collect::<Vec<_>>(), expected);
} let iterator1 = db.iterator(IteratorMode::Start);
{ assert_eq!(iterator1.collect::<Vec<_>>(), expected);
let iterator1 = db.iterator(IteratorMode::Start);
assert_eq!(iterator1.collect::<Vec<_>>(), expected); let iterator1 = db.iterator(IteratorMode::Start);
} assert_eq!(iterator1.collect::<Vec<_>>(), expected);
{
let iterator1 = db.iterator(IteratorMode::Start); // Test it in reverse a few times
assert_eq!(iterator1.collect::<Vec<_>>(), expected); let iterator1 = db.iterator(IteratorMode::End);
} let mut tmp_vec = iterator1.collect::<Vec<_>>();
// Test it in reverse a few times tmp_vec.reverse();
{ assert_eq!(tmp_vec, expected);
let iterator1 = db.iterator(IteratorMode::End);
let mut tmp_vec = iterator1.collect::<Vec<_>>(); let iterator1 = db.iterator(IteratorMode::End);
tmp_vec.reverse(); let mut tmp_vec = iterator1.collect::<Vec<_>>();
assert_eq!(tmp_vec, expected); tmp_vec.reverse();
} assert_eq!(tmp_vec, expected);
{
let iterator1 = db.iterator(IteratorMode::End); let iterator1 = db.iterator(IteratorMode::End);
let mut tmp_vec = iterator1.collect::<Vec<_>>(); let mut tmp_vec = iterator1.collect::<Vec<_>>();
tmp_vec.reverse(); tmp_vec.reverse();
assert_eq!(tmp_vec, expected); assert_eq!(tmp_vec, expected);
}
{ let iterator1 = db.iterator(IteratorMode::End);
let iterator1 = db.iterator(IteratorMode::End); let mut tmp_vec = iterator1.collect::<Vec<_>>();
let mut tmp_vec = iterator1.collect::<Vec<_>>(); tmp_vec.reverse();
tmp_vec.reverse(); assert_eq!(tmp_vec, expected);
assert_eq!(tmp_vec, expected);
} let iterator1 = db.iterator(IteratorMode::End);
{ let mut tmp_vec = iterator1.collect::<Vec<_>>();
let iterator1 = db.iterator(IteratorMode::End); tmp_vec.reverse();
let mut tmp_vec = iterator1.collect::<Vec<_>>(); assert_eq!(tmp_vec, expected);
tmp_vec.reverse();
assert_eq!(tmp_vec, expected); // Try it forward again
} let iterator1 = db.iterator(IteratorMode::Start);
{ assert_eq!(iterator1.collect::<Vec<_>>(), expected);
let iterator1 = db.iterator(IteratorMode::End);
let mut tmp_vec = iterator1.collect::<Vec<_>>(); let iterator1 = db.iterator(IteratorMode::Start);
tmp_vec.reverse(); assert_eq!(iterator1.collect::<Vec<_>>(), expected);
assert_eq!(tmp_vec, expected);
} let old_iterator = db.iterator(IteratorMode::Start);
// Try it forward again let p = db.put(&*k4, &*v4);
{ assert!(p.is_ok());
let iterator1 = db.iterator(IteratorMode::Start); let expected2 = vec![(cba(&k1), cba(&v1)),
assert_eq!(iterator1.collect::<Vec<_>>(), expected); (cba(&k2), cba(&v2)),
} (cba(&k3), cba(&v3)),
{ (cba(&k4), cba(&v4))];
let iterator1 = db.iterator(IteratorMode::Start); assert_eq!(old_iterator.collect::<Vec<_>>(), expected);
assert_eq!(iterator1.collect::<Vec<_>>(), expected);
} let iterator1 = db.iterator(IteratorMode::Start);
assert_eq!(iterator1.collect::<Vec<_>>(), expected2);
let old_iterator = db.iterator(IteratorMode::Start);
let p = db.put(&*k4, &*v4); let iterator1 = db.iterator(IteratorMode::From(b"k2",
assert!(p.is_ok()); Direction::Forward));
let expected2 = vec![(cba(&k1), cba(&v1)), let expected = vec![(cba(&k2), cba(&v2)),
(cba(&k2), cba(&v2)), (cba(&k3), cba(&v3)),
(cba(&k3), cba(&v3)), (cba(&k4), cba(&v4))];
(cba(&k4), cba(&v4))]; assert_eq!(iterator1.collect::<Vec<_>>(), expected);
{
assert_eq!(old_iterator.collect::<Vec<_>>(), expected); let iterator1 = db.iterator(IteratorMode::From(b"k2",
} Direction::Reverse));
{ let expected = vec![(cba(&k2), cba(&v2)), (cba(&k1), cba(&v1))];
let iterator1 = db.iterator(IteratorMode::Start); assert_eq!(iterator1.collect::<Vec<_>>(), expected);
assert_eq!(iterator1.collect::<Vec<_>>(), expected2);
}
{
let iterator1 = db.iterator(IteratorMode::From(b"k2",
Direction::Forward));
let expected = vec![(cba(&k2), cba(&v2)),
(cba(&k3), cba(&v3)),
(cba(&k4), cba(&v4))];
assert_eq!(iterator1.collect::<Vec<_>>(), expected);
}
{
let iterator1 = db.iterator(IteratorMode::From(b"k2",
Direction::Reverse));
let expected = vec![(cba(&k2), cba(&v2)), (cba(&k1), cba(&v1))];
assert_eq!(iterator1.collect::<Vec<_>>(), expected);
}
}
let opts = Options::new();
assert!(DB::destroy(&opts, path).is_ok());
} }
use rocksdb::{DB, Options, Writable}; use rocksdb::{DB, Writable};
use std::thread; use std::thread;
use std::sync::Arc; use std::sync::Arc;
use tempdir::TempDir;
const N: usize = 100_000; const N: usize = 100_000;
#[test] #[test]
pub fn test_multithreaded() { pub fn test_multithreaded() {
let path = "_rust_rocksdb_multithreadtest"; let path = TempDir::new("_rust_rocksdb_multithreadtest").expect("");
{
let db = DB::open_default(path).unwrap();
let db = Arc::new(db);
db.put(b"key", b"value1").unwrap(); let db = DB::open_default(path.path().to_str().unwrap()).unwrap();
let db = Arc::new(db);
let db1 = db.clone(); db.put(b"key", b"value1").unwrap();
let j1 = thread::spawn(move || {
for _ in 1..N {
db1.put(b"key", b"value1").unwrap();
}
});
let db2 = db.clone(); let db1 = db.clone();
let j2 = thread::spawn(move || { let j1 = thread::spawn(move || {
for _ in 1..N { for _ in 1..N {
db2.put(b"key", b"value2").unwrap(); db1.put(b"key", b"value1").unwrap();
} }
}); });
let db3 = db.clone(); let db2 = db.clone();
let j3 = thread::spawn(move || { let j2 = thread::spawn(move || {
for _ in 1..N { for _ in 1..N {
match db3.get(b"key") { db2.put(b"key", b"value2").unwrap();
Ok(Some(v)) => { }
if &v[..] != b"value1" && &v[..] != b"value2" { });
assert!(false);
} let db3 = db.clone();
} let j3 = thread::spawn(move || {
_ => { for _ in 1..N {
match db3.get(b"key") {
Ok(Some(v)) => {
if &v[..] != b"value1" && &v[..] != b"value2" {
assert!(false); assert!(false);
} }
} }
_ => {
assert!(false);
}
} }
}); }
});
j1.join().unwrap(); j1.join().unwrap();
j2.join().unwrap(); j2.join().unwrap();
j3.join().unwrap(); j3.join().unwrap();
}
assert!(DB::destroy(&Options::new(), path).is_ok());
} }
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment