Commit 7557d4c3 authored by Neil Shen's avatar Neil Shen Committed by GitHub

add backup related ffis (#56)

parent dbe14af7
...@@ -35,6 +35,8 @@ pub enum DBWriteBatch {} ...@@ -35,6 +35,8 @@ pub enum DBWriteBatch {}
pub enum DBComparator {} pub enum DBComparator {}
pub enum DBFlushOptions {} pub enum DBFlushOptions {}
pub enum DBCompactionFilter {} pub enum DBCompactionFilter {}
pub enum DBBackupEngine {}
pub enum DBRestoreOptions {}
pub fn new_bloom_filter(bits: c_int) -> *mut DBFilterPolicy { pub fn new_bloom_filter(bits: c_int) -> *mut DBFilterPolicy {
unsafe { rocksdb_filterpolicy_create_bloom(bits) } unsafe { rocksdb_filterpolicy_create_bloom(bits) }
...@@ -479,6 +481,27 @@ extern "C" { ...@@ -479,6 +481,27 @@ extern "C" {
pub fn rocksdb_compactionfilter_set_ignore_snapshots(filter: *mut DBCompactionFilter, pub fn rocksdb_compactionfilter_set_ignore_snapshots(filter: *mut DBCompactionFilter,
ignore_snapshot: bool); ignore_snapshot: bool);
pub fn rocksdb_compactionfilter_destroy(filter: *mut DBCompactionFilter); pub fn rocksdb_compactionfilter_destroy(filter: *mut DBCompactionFilter);
// Restore Option
pub fn rocksdb_restore_options_create() -> *mut DBRestoreOptions;
pub fn rocksdb_restore_options_destroy(ropts: *mut DBRestoreOptions);
pub fn rocksdb_restore_options_set_keep_log_files(ropts: *mut DBRestoreOptions, v: c_int);
// Backup engine
// TODO: add more ffis about backup engine.
pub fn rocksdb_backup_engine_open(options: *const DBOptions,
path: *const c_char,
err: *mut *mut c_char)
-> *mut DBBackupEngine;
pub fn rocksdb_backup_engine_create_new_backup(be: *mut DBBackupEngine,
db: *mut DBInstance,
err: *mut *mut c_char);
pub fn rocksdb_backup_engine_close(be: *mut DBBackupEngine);
pub fn rocksdb_backup_engine_restore_db_from_latest_backup(be: *mut DBBackupEngine,
db_path: *const c_char,
wal_path: *const c_char,
ropts: *const DBRestoreOptions,
err: *mut *mut c_char);
} }
#[cfg(test)] #[cfg(test)]
......
...@@ -30,5 +30,6 @@ pub use compaction_filter::CompactionFilter; ...@@ -30,5 +30,6 @@ pub use compaction_filter::CompactionFilter;
pub use librocksdb_sys::{DBCompactionStyle, DBCompressionType, DBRecoveryMode, new_bloom_filter, pub use librocksdb_sys::{DBCompactionStyle, DBCompressionType, DBRecoveryMode, new_bloom_filter,
self as rocksdb_ffi}; self as rocksdb_ffi};
pub use merge_operator::MergeOperands; pub use merge_operator::MergeOperands;
pub use rocksdb::{DB, DBIterator, DBVector, Kv, SeekKey, Writable, WriteBatch, CFHandle, Range}; pub use rocksdb::{DB, DBIterator, DBVector, Kv, SeekKey, Writable, WriteBatch, CFHandle, Range,
pub use rocksdb_options::{BlockBasedOptions, Options, ReadOptions, WriteOptions}; BackupEngine};
pub use rocksdb_options::{BlockBasedOptions, Options, ReadOptions, WriteOptions, RestoreOptions};
...@@ -16,8 +16,8 @@ ...@@ -16,8 +16,8 @@
use libc::{self, c_int, c_void, size_t}; use libc::{self, c_int, c_void, size_t};
use rocksdb_ffi::{self, DBWriteBatch, DBCFHandle, DBInstance}; use rocksdb_ffi::{self, DBWriteBatch, DBCFHandle, DBInstance, DBBackupEngine};
use rocksdb_options::{Options, ReadOptions, UnsafeSnap, WriteOptions, FlushOptions}; use rocksdb_options::{Options, ReadOptions, UnsafeSnap, WriteOptions, FlushOptions, RestoreOptions};
use std::collections::BTreeMap; use std::collections::BTreeMap;
use std::collections::btree_map::Entry; use std::collections::btree_map::Entry;
use std::ffi::{CStr, CString}; use std::ffi::{CStr, CString};
...@@ -806,6 +806,45 @@ impl DB { ...@@ -806,6 +806,45 @@ impl DB {
pub fn get_statistics(&self) -> Option<String> { pub fn get_statistics(&self) -> Option<String> {
self.opts.get_statistics() self.opts.get_statistics()
} }
pub fn backup_at(&self, path: &str) -> Result<BackupEngine, String> {
let backup_engine = BackupEngine::open(Options::new(), path).unwrap();
unsafe {
ffi_try!(rocksdb_backup_engine_create_new_backup(backup_engine.inner, self.inner))
}
Ok(backup_engine)
}
pub fn restore_from(backup_engine: &BackupEngine,
restore_db_path: &str,
restore_wal_path: &str,
ropts: &RestoreOptions)
-> Result<DB, String> {
let c_db_path = match CString::new(restore_db_path.as_bytes()) {
Ok(c) => c,
Err(_) => {
return Err("Failed to convert restore_db_path to CString when restoring rocksdb"
.to_owned())
}
};
let c_wal_path = match CString::new(restore_wal_path.as_bytes()) {
Ok(c) => c,
Err(_) => {
return Err("Failed to convert restore_wal_path to CString when restoring rocksdb"
.to_owned())
}
};
unsafe {
ffi_try!(rocksdb_backup_engine_restore_db_from_latest_backup(backup_engine.inner,
c_db_path.as_ptr(),
c_wal_path.as_ptr(),
ropts.inner))
};
DB::open_default(restore_db_path)
}
} }
impl Writable for DB { impl Writable for DB {
...@@ -967,6 +1006,40 @@ impl DBVector { ...@@ -967,6 +1006,40 @@ impl DBVector {
} }
} }
pub struct BackupEngine {
inner: *mut DBBackupEngine,
}
impl BackupEngine {
pub fn open(opts: Options, path: &str) -> Result<BackupEngine, String> {
let cpath = match CString::new(path.as_bytes()) {
Ok(c) => c,
Err(_) => {
return Err("Failed to convert path to CString when opening rocksdb backup engine"
.to_owned())
}
};
if let Err(e) = fs::create_dir_all(path) {
return Err(format!("Failed to create rocksdb backup directory: {:?}", e));
}
let backup_engine =
unsafe { ffi_try!(rocksdb_backup_engine_open(opts.inner, cpath.as_ptr())) };
Ok(BackupEngine { inner: backup_engine })
}
}
impl Drop for BackupEngine {
fn drop(&mut self) {
unsafe {
rocksdb_ffi::rocksdb_backup_engine_close(self.inner);
}
}
}
#[cfg(test)] #[cfg(test)]
mod test { mod test {
use rocksdb_options::*; use rocksdb_options::*;
...@@ -1113,6 +1186,38 @@ mod test { ...@@ -1113,6 +1186,38 @@ mod test {
cfs.sort(); cfs.sort();
assert_eq!(cfs_vec, cfs); assert_eq!(cfs_vec, cfs);
} }
#[test]
fn backup_db_test() {
let key = b"foo";
let value = b"bar";
let db_dir = TempDir::new("_rust_rocksdb_backuptest").unwrap();
let db = DB::open_default(db_dir.path().to_str().unwrap()).unwrap();
let p = db.put(key, value);
assert!(p.is_ok());
// Make a backup.
let backup_dir = TempDir::new("_rust_rocksdb_backuptest_backup").unwrap();
let backup_engine = db.backup_at(backup_dir.path().to_str().unwrap()).unwrap();
// Restore it.
let ropt1 = RestoreOptions::new();
let mut ropt2 = RestoreOptions::new();
ropt2.set_keep_log_files(true);
let ropts = [ropt1, ropt2];
for ropt in &ropts {
let restore_dir = TempDir::new("_rust_rocksdb_backuptest_restore").unwrap();
let restored_db = DB::restore_from(&backup_engine,
restore_dir.path().to_str().unwrap(),
restore_dir.path().to_str().unwrap(),
&ropt)
.unwrap();
let r = restored_db.get(key);
assert!(r.unwrap().unwrap().to_utf8().unwrap() == str::from_utf8(value).unwrap());
}
}
} }
#[test] #[test]
......
...@@ -20,7 +20,8 @@ use merge_operator::{self, MergeOperatorCallback, full_merge_callback, partial_m ...@@ -20,7 +20,8 @@ use merge_operator::{self, MergeOperatorCallback, full_merge_callback, partial_m
use merge_operator::MergeFn; use merge_operator::MergeFn;
use rocksdb_ffi::{self, DBOptions, DBWriteOptions, DBBlockBasedTableOptions, DBReadOptions, use rocksdb_ffi::{self, DBOptions, DBWriteOptions, DBBlockBasedTableOptions, DBReadOptions,
DBCompressionType, DBRecoveryMode, DBSnapshot, DBInstance, DBFlushOptions}; DBRestoreOptions, DBCompressionType, DBRecoveryMode, DBSnapshot, DBInstance,
DBFlushOptions};
use std::ffi::{CStr, CString}; use std::ffi::{CStr, CString};
use std::mem; use std::mem;
...@@ -568,6 +569,31 @@ impl Drop for FlushOptions { ...@@ -568,6 +569,31 @@ impl Drop for FlushOptions {
} }
} }
pub struct RestoreOptions {
pub inner: *mut DBRestoreOptions,
}
impl RestoreOptions {
pub fn new() -> RestoreOptions {
unsafe { RestoreOptions { inner: rocksdb_ffi::rocksdb_restore_options_create() } }
}
pub fn set_keep_log_files(&mut self, flag: bool) {
unsafe {
rocksdb_ffi::rocksdb_restore_options_set_keep_log_files(self.inner,
if flag { 1 } else { 0 })
}
}
}
impl Drop for RestoreOptions {
fn drop(&mut self) {
unsafe {
rocksdb_ffi::rocksdb_restore_options_destroy(self.inner);
}
}
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::Options; use super::Options;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment