Commit a09bd206 authored by goroutine's avatar goroutine Committed by GitHub

Merge pull request #44 from zhangjinpeng1987/master

use empty slice as largest/smallest key for compact_range
parents 4ba34cdd b9f0eee4
......@@ -30,6 +30,6 @@ pub use librocksdb_sys::{DBCompactionStyle, DBCompressionType, DBRecoveryMode,
new_bloom_filter, self as rocksdb_ffi};
pub use merge_operator::MergeOperands;
pub use rocksdb::{DB, DBIterator, DBVector, Kv, SeekKey, Writable, WriteBatch,
CFHandle};
CFHandle, Range};
pub use rocksdb_options::{BlockBasedOptions, Options, ReadOptions,
WriteOptions};
......@@ -26,6 +26,7 @@ use std::ops::Deref;
use std::path::Path;
use std::slice;
use std::str::from_utf8;
use std::ptr;
const DEFAULT_COLUMN_FAMILY: &'static str = "default";
......@@ -848,27 +849,31 @@ impl DB {
sizes
}
pub fn compact_range(&self, start_key: &[u8], end_key: &[u8]) {
pub fn compact_range(&self, start_key: Option<&[u8]>, end_key: Option<&[u8]>) {
unsafe {
let (start, s_len) = start_key.map_or((ptr::null(), 0), |k| (k.as_ptr(), k.len()));
let (end, e_len) = end_key.map_or((ptr::null(), 0), |k| (k.as_ptr(), k.len()));
rocksdb_ffi::rocksdb_compact_range(self.inner,
start_key.as_ptr(),
start_key.len() as size_t,
end_key.as_ptr(),
end_key.len());
start,
s_len,
end,
e_len);
}
}
pub fn compact_range_cf(&self,
cf: &CFHandle,
start_key: &[u8],
end_key: &[u8]) {
start_key: Option<&[u8]>,
end_key: Option<&[u8]>) {
unsafe {
let (start, s_len) = start_key.map_or((ptr::null(), 0), |k| (k.as_ptr(), k.len()));
let (end, e_len) = end_key.map_or((ptr::null(), 0), |k| (k.as_ptr(), k.len()));
rocksdb_ffi::rocksdb_compact_range_cf(self.inner,
cf.inner,
start_key.as_ptr(),
start_key.len() as size_t,
end_key.as_ptr(),
end_key.len());
start,
s_len,
end,
e_len);
}
}
......
......@@ -5,3 +5,4 @@ mod test_iterator;
mod test_multithreaded;
mod test_column_family;
mod test_compaction_filter;
mod test_compact_range;
use tempdir::TempDir;
use rocksdb::{DB, Options, Range, Writable};
#[test]
fn test_compact_range() {
let path = TempDir::new("_rust_rocksdb_test_compact_range").expect("");
let mut opts = Options::new();
opts.create_if_missing(true);
let db = DB::open(&opts, path.path().to_str().unwrap()).unwrap();
let samples = vec![
(b"k1".to_vec(), b"value--------1".to_vec()),
(b"k2".to_vec(), b"value--------2".to_vec()),
(b"k3".to_vec(), b"value--------3".to_vec()),
(b"k4".to_vec(), b"value--------4".to_vec()),
(b"k5".to_vec(), b"value--------5".to_vec()),
];
for &(ref k, ref v) in &samples {
db.put(k, v).unwrap();
assert_eq!(v.as_slice(), &*db.get(k).unwrap().unwrap());
}
// flush memtable to sst file
db.flush(true).unwrap();
let old_size = db.get_approximate_sizes(&[Range::new(b"k0", b"k6")])[0];
// delete all and compact whole range
for &(ref k, _) in &samples {
db.delete(k).unwrap()
}
db.compact_range(None, None);
let new_size = db.get_approximate_sizes(&[Range::new(b"k0", b"k6")])[0];
assert!(old_size > new_size);
}
......@@ -47,7 +47,7 @@ fn test_compaction_filter() {
let _snap = db.snapshot();
// Because ignore_snapshots is false, so force compact will not effect
// the keys written before.
db.compact_range(b"key1", b"key3");
db.compact_range(Some(b"key1"), Some(b"key3"));
for &(ref k, ref v) in &samples {
assert_eq!(v.as_slice(), &*db.get(k).unwrap().unwrap());
}
......@@ -66,7 +66,7 @@ fn test_compaction_filter() {
let db = DB::open(&opts, path.path().to_str().unwrap()).unwrap();
let _snap = db.snapshot();
// Because ignore_snapshots is true, so all the keys will be compacted.
db.compact_range(b"key1", b"key3");
db.compact_range(Some(b"key1"), Some(b"key3"));
for &(ref k, _) in &samples {
assert!(db.get(k).unwrap().is_none());
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment