Commit 9cb6718c authored by zhangjinpeng1987's avatar zhangjinpeng1987 Committed by GitHub

Add rate limiter to limit disk IO when doing compaction and flush (#3)

parent 9f606d34
......@@ -41,6 +41,7 @@ pub enum IngestExternalFileOptions {}
pub enum DBBackupEngine {}
pub enum DBRestoreOptions {}
pub enum DBSliceTransform {}
pub enum DBRateLimiter {}
pub fn new_bloom_filter(bits: c_int) -> *mut DBFilterPolicy {
unsafe { crocksdb_filterpolicy_create_bloom(bits) }
......@@ -266,6 +267,12 @@ extern "C" {
prefix_extractor: *mut DBSliceTransform);
pub fn crocksdb_options_set_memtable_prefix_bloom_size_ratio(options: *mut DBOptions,
ratio: c_double);
pub fn crocksdb_options_set_ratelimiter(options: *mut DBOptions, limiter: *mut DBRateLimiter);
pub fn crocksdb_ratelimiter_create(rate_bytes_per_sec: i64,
refill_period_us: i64,
fairness: i32)
-> *mut DBRateLimiter;
pub fn crocksdb_ratelimiter_destroy(limiter: *mut DBRateLimiter);
pub fn crocksdb_filterpolicy_create_bloom_full(bits_per_key: c_int) -> *mut DBFilterPolicy;
pub fn crocksdb_filterpolicy_create_bloom(bits_per_key: c_int) -> *mut DBFilterPolicy;
pub fn crocksdb_open(options: *mut DBOptions,
......
......@@ -18,7 +18,8 @@ use comparator::{self, ComparatorCallback, compare_callback};
use crocksdb_ffi::{self, DBOptions, DBWriteOptions, DBBlockBasedTableOptions, DBReadOptions,
DBRestoreOptions, DBCompressionType, DBRecoveryMode, DBSnapshot, DBInstance,
DBFlushOptions, DBStatisticsTickerType, DBStatisticsHistogramType};
DBFlushOptions, DBStatisticsTickerType, DBStatisticsHistogramType,
DBRateLimiter};
use libc::{self, c_int, size_t, c_void};
use merge_operator::{self, MergeOperatorCallback, full_merge_callback, partial_merge_callback};
use merge_operator::MergeFn;
......@@ -104,6 +105,30 @@ impl BlockBasedOptions {
}
}
pub struct RateLimiter {
inner: *mut DBRateLimiter,
}
impl RateLimiter {
pub fn new(rate_bytes_per_sec: i64, refill_period_us: i64, fairness: i32) -> RateLimiter {
let limiter = unsafe {
crocksdb_ffi::crocksdb_ratelimiter_create(rate_bytes_per_sec,
refill_period_us,
fairness)
};
RateLimiter { inner: limiter }
}
}
impl Drop for RateLimiter {
fn drop(&mut self) {
unsafe { crocksdb_ffi::crocksdb_ratelimiter_destroy(self.inner) }
}
}
const DEFAULT_REFILL_PERIOD_US: i64 = 100 * 1000; // 100ms should work for most cases
const DEFAULT_FAIRNESS: i32 = 10; // should be good by leaving it at default 10
/// The UnsafeSnap must be destroyed by db, it maybe be leaked
/// if not using it properly, hence named as unsafe.
///
......@@ -679,6 +704,15 @@ impl Options {
size as size_t);
}
}
pub fn set_ratelimiter(&mut self, rate_bytes_per_sec: i64) {
let rate_limiter = RateLimiter::new(rate_bytes_per_sec,
DEFAULT_REFILL_PERIOD_US,
DEFAULT_FAIRNESS);
unsafe {
crocksdb_ffi::crocksdb_options_set_ratelimiter(self.inner, rate_limiter.inner);
}
}
}
pub struct FlushOptions {
......
......@@ -107,3 +107,14 @@ fn test_memtable_insert_hint_prefix_extractor() {
assert_eq!(db.get(b"k0-2").unwrap().unwrap(), b"b");
assert_eq!(db.get(b"k0-3").unwrap().unwrap(), b"c");
}
#[test]
fn test_set_ratelimiter() {
let path = TempDir::new("_rust_rocksdb_test_set_rate_limiter").expect("");
let mut opts = Options::new();
opts.create_if_missing(true);
// compaction and flush rate limited below 100MB/sec
opts.set_ratelimiter(100 * 1024 * 1024);
let db = DB::open(opts, path.path().to_str().unwrap()).unwrap();
drop(db);
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment