Commit eb27fcf7 authored by siddontang's avatar siddontang

*: merge master and fix conflict

parents a7c90f5d ffdce61a
......@@ -2,6 +2,7 @@
name = "librocksdb_sys"
version = "0.1.0"
authors = ["Jay Lee <busyjaylee@gmail.com>"]
build = "build.rs"
[dependencies]
libc = "0.1.8"
......
use std::{env, fs, str};
use std::path::PathBuf;
use std::process::Command;
macro_rules! t {
($e:expr) => (match $e {
Ok(n) => n,
Err(e) => panic!("\n{} failed with {}\n", stringify!($e), e),
})
}
fn main() {
let want_static = env::var("ROCKSDB_SYS_STATIC").map(|s| s == "1").unwrap_or(false);
if !want_static {
return;
}
let target = env::var("TARGET").unwrap();
if !target.contains("linux") && !target.contains("darwin") {
// only linux and apple support static link right now
return;
}
let dst = PathBuf::from(env::var_os("OUT_DIR").unwrap());
let build = dst.join("build");
t!(fs::create_dir_all(&build));
let fest_dir = env::var("CARGO_MANIFEST_DIR").unwrap();
let p = PathBuf::from(fest_dir).join("build.sh");
for lib in &["z", "snappy", "bz2", "lz4", "rocksdb"] {
let lib_name = format!("lib{}.a", lib);
let src = build.join(&lib_name);
let dst = dst.join(&lib_name);
if dst.exists() {
continue;
}
if !src.exists() {
let mut cmd = Command::new(p.as_path());
cmd.current_dir(&build).args(&[format!("compile_{}", lib)]);
if *lib == "rocksdb" {
if let Some(s) = env::var("ROCKSDB_SYS_PORTABLE").ok() {
cmd.env("PORTABLE", s);
}
}
run(&mut cmd);
}
if let Err(e) = fs::rename(src.as_path(), dst.as_path()) {
panic!("failed to move {} to {}: {:?}", src.display(), dst.display(), e);
}
}
println!("cargo:rustc-link-lib=static=rocksdb");
println!("cargo:rustc-link-lib=static=z");
println!("cargo:rustc-link-lib=static=bz2");
println!("cargo:rustc-link-lib=static=lz4");
println!("cargo:rustc-link-lib=static=snappy");
println!("cargo:rustc-link-search=native={}", dst.display());
let mut cpp_linked = false;
if let Ok(libs) = env::var("ROCKSDB_OTHER_STATIC") {
for lib in libs.split(":") {
if lib == "stdc++" {
cpp_linked = true;
}
println!("cargo:rustc-link-lib=static={}", lib);
}
if let Ok(pathes) = env::var("ROCKSDB_OTHER_STATIC_PATH") {
for p in pathes.split(":") {
println!("cargo:rustc-link-search=native={}", p);
}
}
}
if !cpp_linked {
let output = Command::new(p.as_path()).arg("find_stdcxx").output().unwrap();
if output.status.success() && !output.stdout.is_empty() {
if let Ok(path_str) = str::from_utf8(&output.stdout) {
let path = PathBuf::from(path_str);
if path.is_absolute() {
println!("cargo:rustc-link-lib=static=stdc++");
println!("cargo:rustc-link-search=native={}", path.parent().unwrap().display());
return;
}
}
}
println!("failed to detect libstdc++.a: {:?}, fallback to dynamic", output);
println!("cargo:rustc-link-lib=stdc++");
}
}
fn run(cmd: &mut Command) {
println!("running: {:?}", cmd);
let status = match cmd.status() {
Ok(s) => s,
Err(e) => panic!("{:?} failed: {}", cmd, e),
};
if !status.success() {
panic!("{:?} failed: {}", cmd, status);
}
}
#!/usr/bin/env bash
set -e
con=1
if [[ -f /proc/cpuinfo ]]; then
con=`grep -c processor /proc/cpuinfo`
else
con=`sysctl -n hw.ncpu 2>/dev/null || echo 1`
fi
function error() {
echo $@ >&2
return 1
}
function md5_check() {
if which md5sum &>/dev/null; then
hash=`md5sum $1 | cut -d ' ' -f 1`
elif which openssl &>/dev/null; then
hash=`openssl md5 -hex $1 | cut -d ' ' -f 2`
else
error can\'t find hash tool.
fi
[[ "$hash" == "$2" ]] || error $1: hash not correct, expect $2, got $hash
}
function download() {
if [[ -f $2 ]] && md5_check $2 $3; then
return
fi
if which wget &>/dev/null; then
wget $1 -O $2
elif which curl &>/dev/null; then
curl -L $1 -o $2
else
error can\'t find wget and curl.
fi
md5_check $2 $3
}
function compile_z() {
if [[ -f libz.a ]]; then
return
fi
rm -rf zlib-1.2.8
download http://zlib.net/zlib-1.2.8.tar.gz zlib-1.2.8.tar.gz 44d667c142d7cda120332623eab69f40
tar xf zlib-1.2.8.tar.gz
cd zlib-1.2.8
CFLAGS='-fPIC' ./configure --static
make -j $con
cp libz.a ../
cd ..
}
function compile_bz2() {
if [[ -f libbz2.a ]]; then
return
fi
rm -rf bzip2-1.0.6
download http://www.bzip.org/1.0.6/bzip2-1.0.6.tar.gz bzip2-1.0.6.tar.gz 00b516f4704d4a7cb50a1d97e6e8e15b
tar xvzf bzip2-1.0.6.tar.gz
cd bzip2-1.0.6
make CFLAGS='-fPIC -O2 -g -D_FILE_OFFSET_BITS=64' -j $con
cp libbz2.a ../
cd ..
}
function compile_snappy() {
if [[ -f libsnappy.a ]]; then
return
fi
rm -rf snappy-1.1.1
download http://pkgs.fedoraproject.org/repo/pkgs/snappy/snappy-1.1.1.tar.gz/8887e3b7253b22a31f5486bca3cbc1c2/snappy-1.1.1.tar.gz snappy-1.1.1.tar.gz 8887e3b7253b22a31f5486bca3cbc1c2
tar xvzf snappy-1.1.1.tar.gz
cd snappy-1.1.1
./configure --with-pic --enable-static
make -j $con
mv .libs/libsnappy.a ../
cd ..
}
function compile_lz4() {
if [[ -f liblz4.a ]]; then
return
fi
rm -rf lz4-r127
download https://github.com/Cyan4973/lz4/archive/r131.tar.gz lz4-r131.tar.gz 42b09fab42331da9d3fb33bd5c560de9
tar xvzf lz4-r131.tar.gz
cd lz4-r131/lib
make CFLAGS='-fPIC' all -j $con
mv liblz4.a ../../
cd ../..
}
function compile_rocksdb() {
if [[ -f librocksdb.a ]]; then
return
fi
version=4.9.fb
echo building rocksdb-$version
rm -rf rocksdb-$version
download https://github.com/facebook/rocksdb/archive/$version.tar.gz rocksdb-$version.tar.gz 75f00635d4dcf0200db54a9244ac5f1d
tar xf rocksdb-$version.tar.gz
wd=`pwd`
cd rocksdb-$version
export EXTRA_CFLAGS="-fPIC -I${wd}/zlib-1.2.8 -I${wd}/bzip2-1.0.6 -I${wd}/snappy-1.1.1 -I${wd}/lz4-r131/lib"
export EXTRA_CXXFLAGS="-DZLIB -DBZIP2 -DSNAPPY -DLZ4 $EXTRA_CFLAGS"
make static_lib -j $con
mv librocksdb.a ../
}
function find_stdcxx() {
if g++ --version &>/dev/null; then
CXX=g++
elif clang++ --version &>/dev/null; then
CXX=clang++
else
error failed to find valid cxx compiler.
fi
$CXX --print-file-name libstdc++.a
}
if [[ $# -ne 1 ]]; then
error $0 [compile_bz2\|compile_z\|compile_lz4\|compile_rocksdb\|compile_snappy\|find_stdcxx]
fi
$1
......@@ -17,7 +17,7 @@ extern crate libc;
#[cfg(test)]
extern crate tempdir;
use libc::{c_char, c_int, c_void, size_t, uint64_t};
use libc::{c_char, c_uchar, c_int, c_void, size_t, uint64_t};
use std::ffi::CStr;
use std::str::from_utf8;
......@@ -72,6 +72,7 @@ pub fn new_cache(capacity: size_t) -> DBCache {
unsafe { rocksdb_cache_create_lru(capacity) }
}
#[derive(Copy, Clone)]
#[repr(C)]
pub enum DBCompressionType {
DBNo = 0,
......@@ -123,6 +124,8 @@ extern "C" {
pub fn rocksdb_block_based_options_set_block_restart_interval(
block_options: DBBlockBasedTableOptions,
block_restart_interval: c_int);
pub fn rocksdb_block_based_options_set_cache_index_and_filter_blocks(
block_options: DBBlockBasedTableOptions, v: c_uchar);
pub fn rocksdb_block_based_options_set_filter_policy(
block_options: DBBlockBasedTableOptions,
filter_policy: DBFilterPolicy);
......@@ -186,6 +189,9 @@ extern "C" {
cs: DBCompactionStyle);
pub fn rocksdb_options_set_compression(options: DBOptions,
compression_style_no: DBCompressionType);
pub fn rocksdb_options_set_compression_per_level(options: DBOptions,
level_values: *const DBCompressionType,
num_levels: size_t);
pub fn rocksdb_options_set_max_background_compactions(
options: DBOptions, max_bg_compactions: c_int);
pub fn rocksdb_options_set_max_background_flushes(options: DBOptions,
......@@ -193,6 +199,9 @@ extern "C" {
pub fn rocksdb_options_set_filter_deletes(options: DBOptions, v: bool);
pub fn rocksdb_options_set_disable_auto_compactions(options: DBOptions,
v: c_int);
pub fn rocksdb_options_set_report_bg_io_stats(options: DBOptions, v: c_int);
pub fn rocksdb_filterpolicy_create_bloom_full(bits_per_key: c_int)
-> DBFilterPolicy;
pub fn rocksdb_filterpolicy_create_bloom(bits_per_key: c_int)
-> DBFilterPolicy;
pub fn rocksdb_open(options: DBOptions,
......@@ -460,12 +469,20 @@ extern "C" {
range_limit_key: *const u8,
range_limit_key_len: size_t,
err: *mut *const i8);
pub fn rocksdb_property_value(db: DBInstance,
propname: *const c_char)
-> *mut c_char;
pub fn rocksdb_property_value_cf(db: DBInstance,
cf: DBCFHandle,
propname: *const c_char)
-> *mut c_char;
}
#[cfg(test)]
mod test {
use super::*;
use std::ffi::CString;
use std::ffi::{CStr, CString};
use libc::{self, c_void};
use tempdir::TempDir;
#[test]
......@@ -542,6 +559,21 @@ mod test {
&mut err);
assert!(err.is_null(), error_message(err));
let propname = CString::new("rocksdb.total-sst-files-size")
.unwrap();
let value = rocksdb_property_value(db, propname.as_ptr());
assert!(!value.is_null());
let sst_size =
CStr::from_ptr(value).to_str().unwrap().parse::<u64>().unwrap();
assert!(sst_size > 0);
libc::free(value as *mut c_void);
let propname = CString::new("fake_key").unwrap();
let value = rocksdb_property_value(db, propname.as_ptr());
assert!(value.is_null());
libc::free(value as *mut c_void);
rocksdb_close(db);
rocksdb_destroy_db(opts, cpath_ptr, &mut err);
assert!(err.is_null());
......
......@@ -12,7 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//
#![feature(plugin)]
extern crate libc;
......@@ -27,6 +26,7 @@ pub mod comparator;
pub use librocksdb_sys::{DBCompactionStyle, DBComparator, DBCompressionType,
new_bloom_filter, self as rocksdb_ffi};
pub use rocksdb::{DB, DBIterator, DBVector, Kv, SeekKey, Writable, WriteBatch};
pub use rocksdb::{DB, DBIterator, DBVector, Kv, ReadOptions, SeekKey,
Writable, WriteBatch};
pub use rocksdb_options::{BlockBasedOptions, Options, WriteOptions};
pub use merge_operator::MergeOperands;
......@@ -143,7 +143,7 @@ fn main() {
#[cfg(test)]
mod tests {
use rocksdb::{BlockBasedOptions, DB, Options};
use rocksdb::{BlockBasedOptions, DB, DBCompressionType, Options};
use rocksdb::DBCompactionStyle::DBUniversal;
#[allow(dead_code)]
......@@ -151,6 +151,15 @@ mod tests {
opts: &mut Options,
blockopts: &mut BlockBasedOptions)
-> DB {
let per_level_compression: [DBCompressionType; 7] =
[DBCompressionType::DBNo,
DBCompressionType::DBNo,
DBCompressionType::DBNo,
DBCompressionType::DBLz4,
DBCompressionType::DBLz4,
DBCompressionType::DBLz4,
DBCompressionType::DBLz4];
opts.create_if_missing(true);
opts.set_max_open_files(10000);
opts.set_use_fsync(false);
......@@ -168,7 +177,11 @@ mod tests {
opts.set_max_background_compactions(4);
opts.set_max_background_flushes(4);
opts.set_filter_deletes(false);
opts.set_report_bg_io_stats(true);
opts.compression_per_level(&per_level_compression);
blockopts.set_block_size(524288);
blockopts.set_cache_index_and_filter_blocks(true);
blockopts.set_bloom_filter(10, false);
opts.set_block_based_table_factory(blockopts);
opts.set_disable_auto_compactions(true);
......
This diff is collapsed.
......@@ -90,6 +90,28 @@ impl BlockBasedOptions {
rocksdb_ffi::rocksdb_block_based_options_set_block_cache(self.inner, cache);
}
}
pub fn set_bloom_filter(&mut self,
bits_per_key: c_int,
block_based: bool) {
unsafe {
let bloom = if block_based {
rocksdb_ffi::rocksdb_filterpolicy_create_bloom(bits_per_key)
} else {
rocksdb_ffi::rocksdb_filterpolicy_create_bloom_full(bits_per_key)
};
rocksdb_ffi::rocksdb_block_based_options_set_filter_policy(self.inner,
bloom);
}
}
pub fn set_cache_index_and_filter_blocks(&mut self, v: bool) {
unsafe {
rocksdb_ffi::rocksdb_block_based_options_set_cache_index_and_filter_blocks(self.inner,
v as u8);
}
}
}
// TODO figure out how to create these in a Rusty way
......@@ -162,6 +184,15 @@ impl Options {
}
}
pub fn compression_per_level(&mut self,
level_types: &[DBCompressionType]) {
unsafe {
rocksdb_ffi::rocksdb_options_set_compression_per_level(self.inner,
level_types.as_ptr(),
level_types.len() as size_t)
}
}
pub fn add_merge_operator(&mut self, name: &str, merge_fn: MergeFn) {
let cb = Box::new(MergeOperatorCallback {
name: CString::new(name.as_bytes()).unwrap(),
......@@ -359,6 +390,18 @@ impl Options {
rocksdb_ffi::rocksdb_options_set_block_based_table_factory(self.inner, factory.inner);
}
}
pub fn set_report_bg_io_stats(&mut self, enable: bool) {
unsafe {
if enable {
rocksdb_ffi::rocksdb_options_set_report_bg_io_stats(self.inner,
1);
} else {
rocksdb_ffi::rocksdb_options_set_report_bg_io_stats(self.inner,
0);
}
}
}
}
impl Default for WriteOptions {
......
......@@ -33,6 +33,7 @@ pub fn test_column_family() {
panic!("could not create column family: {}", e);
}
}
assert_eq!(db.cf_names(), vec!["cf1", "default"]);
}
// should fail to open db without specifying same column families
......@@ -56,7 +57,7 @@ pub fn test_column_family() {
{
let mut opts = Options::new();
opts.add_merge_operator("test operator", test_provided_merge);
match DB::open_cf(&opts, path_str, &["cf1"]) {
match DB::open_cf(&opts, path_str, &["cf1"], &[&opts]) {
Ok(_) => println!("successfully opened db with column family"),
Err(e) => panic!("failed to open db with column family: {}", e),
}
......@@ -65,7 +66,7 @@ pub fn test_column_family() {
{
let mut opts = Options::new();
opts.add_merge_operator("test operator", test_provided_merge);
let db = match DB::open_cf(&opts, path_str, &["cf1"]) {
let db = match DB::open_cf(&opts, path_str, &["cf1"], &[&opts]) {
Ok(db) => {
println!("successfully opened db with column family");
db
......@@ -113,7 +114,11 @@ pub fn test_column_family() {
}
// should b able to drop a cf
{
let mut db = DB::open_cf(&Options::new(), path_str, &["cf1"]).unwrap();
let mut db = DB::open_cf(&Options::new(),
path_str,
&["cf1"],
&[&Options::new()])
.unwrap();
match db.drop_cf("cf1") {
Ok(_) => println!("cf1 successfully dropped."),
Err(e) => panic!("failed to drop column family: {}", e),
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment