Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in
Toggle navigation
R
rust-rocksdb
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
fangzongwu
rust-rocksdb
Commits
edf9421f
Commit
edf9421f
authored
Nov 08, 2015
by
Tyler Neely
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
rustfmt
parent
ad201263
Show whitespace changes
Inline
Side-by-side
Showing
12 changed files
with
710 additions
and
526 deletions
+710
-526
.gitignore
.gitignore
+4
-0
rustfmt.toml
rustfmt.toml
+3
-0
comparator.rs
src/comparator.rs
+24
-22
ffi.rs
src/ffi.rs
+160
-144
lib.rs
src/lib.rs
+18
-18
main.rs
src/main.rs
+78
-72
merge_operator.rs
src/merge_operator.rs
+31
-31
rocksdb.rs
src/rocksdb.rs
+269
-129
rocksdb_options.rs
src/rocksdb_options.rs
+72
-66
test_column_family.rs
test/test_column_family.rs
+28
-27
test_iterator.rs
test/test_iterator.rs
+19
-13
test_multithreaded.rs
test/test_multithreaded.rs
+4
-4
No files found.
.gitignore
View file @
edf9421f
...
...
@@ -3,3 +3,7 @@
target
Cargo.lock
*.orig
*.bk
_rust_rocksdb*
*rlib
tags
rustfmt.toml
0 → 100644
View file @
edf9421f
reorder_imports
=
true
max_width
=
80
ideal_width
=
80
src/comparator.rs
View file @
edf9421f
/
*
Copyright 2014 Tyler Neely
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*
/
/
/
//
Copyright 2014 Tyler Neely
//
//
Licensed under the Apache License, Version 2.0 (the "License");
//
you may not use this file except in compliance with the License.
//
You may obtain a copy of the License at
//
//
http://www.apache.org/licenses/LICENSE-2.0
//
//
Unless required by applicable law or agreed to in writing, software
//
distributed under the License is distributed on an "AS IS" BASIS,
//
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//
See the License for the specific language governing permissions and
//
limitations under the License.
/
/
extern
crate
libc
;
use
self
::
libc
::{
c_char
,
c_int
,
c_void
,
size_t
};
use
std
::
ffi
::
CString
;
...
...
@@ -30,7 +30,7 @@ pub struct ComparatorCallback {
pub
extern
"C"
fn
destructor_callback
(
raw_cb
:
*
mut
c_void
)
{
// turn this back into a local variable so rust will reclaim it
let
_
:
Box
<
ComparatorCallback
>
=
unsafe
{
mem
::
transmute
(
raw_cb
)
};
let
_
:
Box
<
ComparatorCallback
>
=
unsafe
{
mem
::
transmute
(
raw_cb
)
};
}
pub
extern
"C"
fn
name_callback
(
raw_cb
:
*
mut
c_void
)
->
*
const
c_char
{
...
...
@@ -51,8 +51,10 @@ pub extern "C" fn compare_callback(raw_cb: *mut c_void,
unsafe
{
let
cb
:
&
mut
ComparatorCallback
=
&
mut
*
(
raw_cb
as
*
mut
ComparatorCallback
);
let
a
:
&
[
u8
]
=
slice
::
from_raw_parts
(
a_raw
as
*
const
u8
,
a_len
as
usize
);
let
b
:
&
[
u8
]
=
slice
::
from_raw_parts
(
b_raw
as
*
const
u8
,
b_len
as
usize
);
let
a
:
&
[
u8
]
=
slice
::
from_raw_parts
(
a_raw
as
*
const
u8
,
a_len
as
usize
);
let
b
:
&
[
u8
]
=
slice
::
from_raw_parts
(
b_raw
as
*
const
u8
,
b_len
as
usize
);
(
cb
.f
)(
a
,
b
)
}
}
...
...
@@ -67,9 +69,9 @@ fn test_reverse_compare(a: &[u8], b: &[u8]) -> c_int {
}
}
//#[allow(dead_code)]
//#[test]
//fn compare_works() {
//
#[allow(dead_code)]
//
#[test]
//
fn compare_works() {
// let path = "_rust_rocksdb_comparetest";
// let mut opts = Options::new();
// opts.create_if_missing(true);
...
...
@@ -79,4 +81,4 @@ fn test_reverse_compare(a: &[u8], b: &[u8]) -> c_int {
// // TODO add interesting test
// }
// assert!(DB::destroy(&opts, path).is_ok());
//}
//
}
src/ffi.rs
View file @
edf9421f
/
*
Copyright 2014 Tyler Neely
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*
/
/
/
//
Copyright 2014 Tyler Neely
//
//
Licensed under the Apache License, Version 2.0 (the "License");
//
you may not use this file except in compliance with the License.
//
You may obtain a copy of the License at
//
//
http://www.apache.org/licenses/LICENSE-2.0
//
//
Unless required by applicable law or agreed to in writing, software
//
distributed under the License is distributed on an "AS IS" BASIS,
//
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//
See the License for the specific language governing permissions and
//
limitations under the License.
/
/
extern
crate
libc
;
use
self
::
libc
::{
c_char
,
c_int
,
c_void
,
size_t
};
use
std
::
ffi
::{
CStr
ing
,
CStr
};
use
std
::
ffi
::{
CStr
,
CString
};
use
std
::
str
::
from_utf8
;
#[derive(Copy,
Clone)]
...
...
@@ -59,15 +59,11 @@ pub struct DBWriteBatch(pub *const c_void);
pub
struct
DBComparator
(
pub
*
const
c_void
);
pub
fn
new_bloom_filter
(
bits
:
c_int
)
->
DBFilterPolicy
{
unsafe
{
rocksdb_filterpolicy_create_bloom
(
bits
)
}
unsafe
{
rocksdb_filterpolicy_create_bloom
(
bits
)
}
}
pub
fn
new_cache
(
capacity
:
size_t
)
->
DBCache
{
unsafe
{
rocksdb_cache_create_lru
(
capacity
)
}
unsafe
{
rocksdb_cache_create_lru
(
capacity
)
}
}
#[repr(C)]
...
...
@@ -96,13 +92,14 @@ pub enum DBUniversalCompactionStyle {
pub
fn
error_message
(
ptr
:
*
const
i8
)
->
String
{
let
c_str
=
unsafe
{
CStr
::
from_ptr
(
ptr
)
};
let
s
=
from_utf8
(
c_str
.to_bytes
())
.unwrap
()
.to_owned
();
unsafe
{
unsafe
{
libc
::
free
(
ptr
as
*
mut
libc
::
c_void
);
}
s
}
//TODO audit the use of boolean arguments, b/c I think they need to be u8 instead...
// TODO audit the use of boolean arguments, b/c I think they need to be u8
// instead...
#[link(name
=
"rocksdb"
)]
extern
{
pub
fn
rocksdb_options_create
()
->
DBOptions
;
...
...
@@ -135,150 +132,154 @@ extern {
pub
fn
rocksdb_options_set_block_based_table_factory
(
options
:
DBOptions
,
block_options
:
DBBlockBasedTableOptions
);
pub
fn
rocksdb_options_increase_parallelism
(
options
:
DBOptions
,
threads
:
c_int
);
pub
fn
rocksdb_options_increase_parallelism
(
options
:
DBOptions
,
threads
:
c_int
);
pub
fn
rocksdb_options_optimize_level_style_compaction
(
options
:
DBOptions
,
memtable_memory_budget
:
c_int
);
pub
fn
rocksdb_options_set_create_if_missing
(
options
:
DBOptions
,
v
:
bool
);
pub
fn
rocksdb_options_set_max_open_files
(
options
:
DBOptions
,
files
:
c_int
);
pub
fn
rocksdb_options_set_use_fsync
(
options
:
DBOptions
,
v
:
c_int
);
pub
fn
rocksdb_options_set_bytes_per_sync
(
options
:
DBOptions
,
bytes
:
u64
);
pub
fn
rocksdb_options_set_disable_data_sync
(
options
:
DBOptions
,
v
:
c_int
);
pub
fn
rocksdb_options_optimize_for_point_lookup
(
options
:
DBOptions
,
block_cache_size_mb
:
u64
);
pub
fn
rocksdb_options_set_table_cache_numshardbits
(
options
:
DBOptions
,
bits
:
c_int
);
pub
fn
rocksdb_options_set_max_write_buffer_number
(
options
:
DBOptions
,
bufno
:
c_int
);
pub
fn
rocksdb_options_set_create_if_missing
(
options
:
DBOptions
,
v
:
bool
);
pub
fn
rocksdb_options_set_max_open_files
(
options
:
DBOptions
,
files
:
c_int
);
pub
fn
rocksdb_options_set_use_fsync
(
options
:
DBOptions
,
v
:
c_int
);
pub
fn
rocksdb_options_set_bytes_per_sync
(
options
:
DBOptions
,
bytes
:
u64
);
pub
fn
rocksdb_options_set_disable_data_sync
(
options
:
DBOptions
,
v
:
c_int
);
pub
fn
rocksdb_options_optimize_for_point_lookup
(
options
:
DBOptions
,
block_cache_size_mb
:
u64
);
pub
fn
rocksdb_options_set_table_cache_numshardbits
(
options
:
DBOptions
,
bits
:
c_int
);
pub
fn
rocksdb_options_set_max_write_buffer_number
(
options
:
DBOptions
,
bufno
:
c_int
);
pub
fn
rocksdb_options_set_min_write_buffer_number_to_merge
(
options
:
DBOptions
,
bufno
:
c_int
);
pub
fn
rocksdb_options_set_level0_file_num_compaction_trigger
(
options
:
DBOptions
,
no
:
c_int
);
pub
fn
rocksdb_options_set_level0_slowdown_writes_trigger
(
options
:
DBOptions
,
no
:
c_int
);
pub
fn
rocksdb_options_set_level0_stop_writes_trigger
(
options
:
DBOptions
,
no
:
c_int
);
pub
fn
rocksdb_options_set_write_buffer_size
(
options
:
DBOptions
,
bytes
:
u64
);
pub
fn
rocksdb_options_set_target_file_size_base
(
options
:
DBOptions
,
bytes
:
u64
);
pub
fn
rocksdb_options_set_target_file_size_multiplier
(
options
:
DBOptions
,
mul
:
c_int
);
pub
fn
rocksdb_options_set_max_log_file_size
(
options
:
DBOptions
,
bytes
:
u64
);
pub
fn
rocksdb_options_set_max_manifest_file_size
(
options
:
DBOptions
,
bytes
:
u64
);
pub
fn
rocksdb_options_set_hash_skip_list_rep
(
options
:
DBOptions
,
bytes
:
u64
,
a1
:
i32
,
a2
:
i32
);
pub
fn
rocksdb_options_set_compaction_style
(
options
:
DBOptions
,
cs
:
DBCompactionStyle
);
pub
fn
rocksdb_options_set_compression
(
options
:
DBOptions
,
compression_style_no
:
c_int
);
pub
fn
rocksdb_options_set_level0_stop_writes_trigger
(
options
:
DBOptions
,
no
:
c_int
);
pub
fn
rocksdb_options_set_write_buffer_size
(
options
:
DBOptions
,
bytes
:
u64
);
pub
fn
rocksdb_options_set_target_file_size_base
(
options
:
DBOptions
,
bytes
:
u64
);
pub
fn
rocksdb_options_set_target_file_size_multiplier
(
options
:
DBOptions
,
mul
:
c_int
);
pub
fn
rocksdb_options_set_max_log_file_size
(
options
:
DBOptions
,
bytes
:
u64
);
pub
fn
rocksdb_options_set_max_manifest_file_size
(
options
:
DBOptions
,
bytes
:
u64
);
pub
fn
rocksdb_options_set_hash_skip_list_rep
(
options
:
DBOptions
,
bytes
:
u64
,
a1
:
i32
,
a2
:
i32
);
pub
fn
rocksdb_options_set_compaction_style
(
options
:
DBOptions
,
cs
:
DBCompactionStyle
);
pub
fn
rocksdb_options_set_compression
(
options
:
DBOptions
,
compression_style_no
:
c_int
);
pub
fn
rocksdb_options_set_max_background_compactions
(
options
:
DBOptions
,
max_bg_compactions
:
c_int
);
pub
fn
rocksdb_options_set_max_background_flushes
(
options
:
DBOptions
,
max_bg_flushes
:
c_int
);
pub
fn
rocksdb_options_set_filter_deletes
(
options
:
DBOptions
,
v
:
bool
);
pub
fn
rocksdb_options_set_disable_auto_compactions
(
options
:
DBOptions
,
v
:
c_int
);
pub
fn
rocksdb_filterpolicy_create_bloom
(
bits_per_key
:
c_int
)
->
DBFilterPolicy
;
pub
fn
rocksdb_options_set_max_background_flushes
(
options
:
DBOptions
,
max_bg_flushes
:
c_int
);
pub
fn
rocksdb_options_set_filter_deletes
(
options
:
DBOptions
,
v
:
bool
);
pub
fn
rocksdb_options_set_disable_auto_compactions
(
options
:
DBOptions
,
v
:
c_int
);
pub
fn
rocksdb_filterpolicy_create_bloom
(
bits_per_key
:
c_int
)
->
DBFilterPolicy
;
pub
fn
rocksdb_open
(
options
:
DBOptions
,
path
:
*
const
i8
,
err
:
*
mut
*
const
i8
)
->
DBInstance
;
err
:
*
mut
*
const
i8
)
->
DBInstance
;
pub
fn
rocksdb_writeoptions_create
()
->
DBWriteOptions
;
pub
fn
rocksdb_writeoptions_destroy
(
writeopts
:
DBWriteOptions
);
pub
fn
rocksdb_put
(
db
:
DBInstance
,
writeopts
:
DBWriteOptions
,
k
:
*
const
u8
,
kLen
:
size_t
,
v
:
*
const
u8
,
vLen
:
size_t
,
k
:
*
const
u8
,
kLen
:
size_t
,
v
:
*
const
u8
,
vLen
:
size_t
,
err
:
*
mut
*
const
i8
);
pub
fn
rocksdb_put_cf
(
db
:
DBInstance
,
writeopts
:
DBWriteOptions
,
cf
:
DBCFHandle
,
k
:
*
const
u8
,
kLen
:
size_t
,
v
:
*
const
u8
,
vLen
:
size_t
,
k
:
*
const
u8
,
kLen
:
size_t
,
v
:
*
const
u8
,
vLen
:
size_t
,
err
:
*
mut
*
const
i8
);
pub
fn
rocksdb_readoptions_create
()
->
DBReadOptions
;
pub
fn
rocksdb_readoptions_destroy
(
readopts
:
DBReadOptions
);
pub
fn
rocksdb_readoptions_set_verify_checksums
(
readopts
:
DBReadOptions
,
pub
fn
rocksdb_readoptions_set_verify_checksums
(
readopts
:
DBReadOptions
,
v
:
bool
);
pub
fn
rocksdb_readoptions_set_fill_cache
(
readopts
:
DBReadOptions
,
pub
fn
rocksdb_readoptions_set_fill_cache
(
readopts
:
DBReadOptions
,
v
:
bool
);
pub
fn
rocksdb_readoptions_set_snapshot
(
readopts
:
DBReadOptions
,
pub
fn
rocksdb_readoptions_set_snapshot
(
readopts
:
DBReadOptions
,
snapshot
:
DBSnapshot
);
//TODO how do I make this a const ref?
pub
fn
rocksdb_readoptions_set_iterate_upper_bound
(
readopts
:
DBReadOptions
,
pub
fn
rocksdb_readoptions_set_iterate_upper_bound
(
readopts
:
DBReadOptions
,
k
:
*
const
u8
,
kLen
:
size_t
);
pub
fn
rocksdb_readoptions_set_read_tier
(
readopts
:
DBReadOptions
,
pub
fn
rocksdb_readoptions_set_read_tier
(
readopts
:
DBReadOptions
,
tier
:
c_int
);
pub
fn
rocksdb_readoptions_set_tailing
(
readopts
:
DBReadOptions
,
v
:
bool
);
pub
fn
rocksdb_readoptions_set_tailing
(
readopts
:
DBReadOptions
,
v
:
bool
);
pub
fn
rocksdb_get
(
db
:
DBInstance
,
readopts
:
DBReadOptions
,
k
:
*
const
u8
,
kLen
:
size_t
,
k
:
*
const
u8
,
kLen
:
size_t
,
valLen
:
*
const
size_t
,
err
:
*
mut
*
const
i8
)
->
*
mut
c_void
;
err
:
*
mut
*
const
i8
)
->
*
mut
c_void
;
pub
fn
rocksdb_get_cf
(
db
:
DBInstance
,
readopts
:
DBReadOptions
,
cf_handle
:
DBCFHandle
,
k
:
*
const
u8
,
kLen
:
size_t
,
k
:
*
const
u8
,
kLen
:
size_t
,
valLen
:
*
const
size_t
,
err
:
*
mut
*
const
i8
)
->
*
mut
c_void
;
err
:
*
mut
*
const
i8
)
->
*
mut
c_void
;
pub
fn
rocksdb_create_iterator
(
db
:
DBInstance
,
readopts
:
DBReadOptions
)
->
DBIterator
;
readopts
:
DBReadOptions
)
->
DBIterator
;
pub
fn
rocksdb_create_iterator_cf
(
db
:
DBInstance
,
readopts
:
DBReadOptions
,
cf_handle
:
DBCFHandle
)
->
DBIterator
;
cf_handle
:
DBCFHandle
)
->
DBIterator
;
pub
fn
rocksdb_create_snapshot
(
db
:
DBInstance
)
->
DBSnapshot
;
pub
fn
rocksdb_release_snapshot
(
db
:
DBInstance
,
snapshot
:
DBSnapshot
);
pub
fn
rocksdb_release_snapshot
(
db
:
DBInstance
,
snapshot
:
DBSnapshot
);
pub
fn
rocksdb_delete
(
db
:
DBInstance
,
writeopts
:
DBWriteOptions
,
k
:
*
const
u8
,
kLen
:
size_t
,
err
:
*
mut
*
const
i8
)
->
*
mut
c_void
;
k
:
*
const
u8
,
kLen
:
size_t
,
err
:
*
mut
*
const
i8
)
->
*
mut
c_void
;
pub
fn
rocksdb_delete_cf
(
db
:
DBInstance
,
writeopts
:
DBWriteOptions
,
cf
:
DBCFHandle
,
k
:
*
const
u8
,
kLen
:
size_t
,
err
:
*
mut
*
const
i8
)
->
*
mut
c_void
;
k
:
*
const
u8
,
kLen
:
size_t
,
err
:
*
mut
*
const
i8
)
->
*
mut
c_void
;
pub
fn
rocksdb_close
(
db
:
DBInstance
);
pub
fn
rocksdb_destroy_db
(
options
:
DBOptions
,
path
:
*
const
i8
,
err
:
*
mut
*
const
i8
);
path
:
*
const
i8
,
err
:
*
mut
*
const
i8
);
pub
fn
rocksdb_repair_db
(
options
:
DBOptions
,
path
:
*
const
i8
,
err
:
*
mut
*
const
i8
);
path
:
*
const
i8
,
err
:
*
mut
*
const
i8
);
// Merge
pub
fn
rocksdb_merge
(
db
:
DBInstance
,
writeopts
:
DBWriteOptions
,
k
:
*
const
u8
,
kLen
:
size_t
,
v
:
*
const
u8
,
vLen
:
size_t
,
k
:
*
const
u8
,
kLen
:
size_t
,
v
:
*
const
u8
,
vLen
:
size_t
,
err
:
*
mut
*
const
i8
);
pub
fn
rocksdb_merge_cf
(
db
:
DBInstance
,
writeopts
:
DBWriteOptions
,
cf
:
DBCFHandle
,
k
:
*
const
u8
,
kLen
:
size_t
,
v
:
*
const
u8
,
vLen
:
size_t
,
k
:
*
const
u8
,
kLen
:
size_t
,
v
:
*
const
u8
,
vLen
:
size_t
,
err
:
*
mut
*
const
i8
);
pub
fn
rocksdb_mergeoperator_create
(
state
:
*
mut
c_void
,
...
...
@@ -313,50 +314,53 @@ extern {
pub
fn
rocksdb_iter_valid
(
iter
:
DBIterator
)
->
bool
;
pub
fn
rocksdb_iter_seek_to_first
(
iter
:
DBIterator
);
pub
fn
rocksdb_iter_seek_to_last
(
iter
:
DBIterator
);
pub
fn
rocksdb_iter_seek
(
iter
:
DBIterator
,
key
:
*
const
u8
,
klen
:
size_t
);
pub
fn
rocksdb_iter_seek
(
iter
:
DBIterator
,
key
:
*
const
u8
,
klen
:
size_t
);
pub
fn
rocksdb_iter_next
(
iter
:
DBIterator
);
pub
fn
rocksdb_iter_prev
(
iter
:
DBIterator
);
pub
fn
rocksdb_iter_key
(
iter
:
DBIterator
,
klen
:
*
mut
size_t
)
->
*
mut
u8
;
pub
fn
rocksdb_iter_value
(
iter
:
DBIterator
,
vlen
:
*
mut
size_t
)
->
*
mut
u8
;
pub
fn
rocksdb_iter_get_error
(
iter
:
DBIterator
,
err
:
*
mut
*
const
u8
);
pub
fn
rocksdb_iter_key
(
iter
:
DBIterator
,
klen
:
*
mut
size_t
)
->
*
mut
u8
;
pub
fn
rocksdb_iter_value
(
iter
:
DBIterator
,
vlen
:
*
mut
size_t
)
->
*
mut
u8
;
pub
fn
rocksdb_iter_get_error
(
iter
:
DBIterator
,
err
:
*
mut
*
const
u8
);
// Write batch
pub
fn
rocksdb_write
(
db
:
DBInstance
,
writeopts
:
DBWriteOptions
,
batch
:
DBWriteBatch
,
batch
:
DBWriteBatch
,
err
:
*
mut
*
const
i8
);
pub
fn
rocksdb_writebatch_create
()
->
DBWriteBatch
;
pub
fn
rocksdb_writebatch_create_from
(
rep
:
*
const
u8
,
size
:
size_t
)
->
DBWriteBatch
;
size
:
size_t
)
->
DBWriteBatch
;
pub
fn
rocksdb_writebatch_destroy
(
batch
:
DBWriteBatch
);
pub
fn
rocksdb_writebatch_clear
(
batch
:
DBWriteBatch
);
pub
fn
rocksdb_writebatch_count
(
batch
:
DBWriteBatch
)
->
c_int
;
pub
fn
rocksdb_writebatch_put
(
batch
:
DBWriteBatch
,
key
:
*
const
u8
,
klen
:
size_t
,
val
:
*
const
u8
,
vlen
:
size_t
);
key
:
*
const
u8
,
klen
:
size_t
,
val
:
*
const
u8
,
vlen
:
size_t
);
pub
fn
rocksdb_writebatch_put_cf
(
batch
:
DBWriteBatch
,
cf
:
DBCFHandle
,
key
:
*
const
u8
,
klen
:
size_t
,
val
:
*
const
u8
,
vlen
:
size_t
);
pub
fn
rocksdb_writebatch_merge
(
batch
:
DBWriteBatch
,
key
:
*
const
u8
,
klen
:
size_t
,
val
:
*
const
u8
,
vlen
:
size_t
);
pub
fn
rocksdb_writebatch_merge_cf
(
batch
:
DBWriteBatch
,
key
:
*
const
u8
,
klen
:
size_t
,
val
:
*
const
u8
,
vlen
:
size_t
);
pub
fn
rocksdb_writebatch_merge
(
batch
:
DBWriteBatch
,
key
:
*
const
u8
,
klen
:
size_t
,
val
:
*
const
u8
,
vlen
:
size_t
);
pub
fn
rocksdb_writebatch_merge_cf
(
batch
:
DBWriteBatch
,
cf
:
DBCFHandle
,
key
:
*
const
u8
,
klen
:
size_t
,
val
:
*
const
u8
,
vlen
:
size_t
);
pub
fn
rocksdb_writebatch_delete
(
batch
:
DBWriteBatch
,
key
:
*
const
u8
,
klen
:
size_t
);
pub
fn
rocksdb_writebatch_delete_cf
(
batch
:
DBWriteBatch
,
key
:
*
const
u8
,
klen
:
size_t
,
val
:
*
const
u8
,
vlen
:
size_t
);
pub
fn
rocksdb_writebatch_delete
(
batch
:
DBWriteBatch
,
key
:
*
const
u8
,
klen
:
size_t
);
pub
fn
rocksdb_writebatch_delete_cf
(
batch
:
DBWriteBatch
,
cf
:
DBCFHandle
,
key
:
*
const
u8
,
klen
:
size_t
);
key
:
*
const
u8
,
klen
:
size_t
);
pub
fn
rocksdb_writebatch_iterate
(
batch
:
DBWriteBatch
,
state
:
*
mut
c_void
,
...
...
@@ -366,7 +370,8 @@ extern {
deleted_fn
:
extern
fn
(
state
:
*
mut
c_void
,
k
:
*
const
u8
,
klen
:
size_t
));
pub
fn
rocksdb_writebatch_data
(
batch
:
DBWriteBatch
,
size
:
*
mut
size_t
)
->
*
const
u8
;
size
:
*
mut
size_t
)
->
*
const
u8
;
// Comparator
pub
fn
rocksdb_options_set_comparator
(
options
:
DBOptions
,
...
...
@@ -394,8 +399,8 @@ extern {
pub
fn
rocksdb_create_column_family
(
db
:
DBInstance
,
column_family_options
:
DBOptions
,
column_family_name
:
*
const
i8
,
err
:
*
mut
*
const
i8
)
->
DBCFHandle
;
err
:
*
mut
*
const
i8
)
->
DBCFHandle
;
pub
fn
rocksdb_drop_column_family
(
db
:
DBInstance
,
column_family_handle
:
DBCFHandle
,
err
:
*
mut
*
const
i8
);
...
...
@@ -430,7 +435,13 @@ fn internal() {
let
key
=
b
"name
\x00
"
;
let
val
=
b
"spacejam
\x00
"
;
rocksdb_put
(
db
,
writeopts
.clone
(),
key
.as_ptr
(),
4
,
val
.as_ptr
(),
8
,
err_ptr
);
rocksdb_put
(
db
,
writeopts
.clone
(),
key
.as_ptr
(),
4
,
val
.as_ptr
(),
8
,
err_ptr
);
rocksdb_writeoptions_destroy
(
writeopts
);
assert
!
(
err
.is_null
());
...
...
@@ -439,7 +450,12 @@ fn internal() {
let
val_len
:
size_t
=
0
;
let
val_len_ptr
=
&
val_len
as
*
const
size_t
;
rocksdb_get
(
db
,
readopts
.clone
(),
key
.as_ptr
(),
4
,
val_len_ptr
,
err_ptr
);
rocksdb_get
(
db
,
readopts
.clone
(),
key
.as_ptr
(),
4
,
val_len_ptr
,
err_ptr
);
rocksdb_readoptions_destroy
(
readopts
);
assert
!
(
err
.is_null
());
rocksdb_close
(
db
);
...
...
src/lib.rs
View file @
edf9421f
/
*
Copyright 2014 Tyler Neely
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*
/
/
/
//
Copyright 2014 Tyler Neely
//
//
Licensed under the Apache License, Version 2.0 (the "License");
//
you may not use this file except in compliance with the License.
//
You may obtain a copy of the License at
//
//
http://www.apache.org/licenses/LICENSE-2.0
//
//
Unless required by applicable law or agreed to in writing, software
//
distributed under the License is distributed on an "AS IS" BASIS,
//
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//
See the License for the specific language governing permissions and
//
limitations under the License.
/
/
pub
use
ffi
as
rocksdb_ffi
;
pub
use
ffi
::{
new_bloom_filter
,
DBCompactionStyle
,
DBComparato
r
};
pub
use
rocksdb
::{
DB
,
DBVector
,
WriteBatch
,
Writable
,
Direction
};
pub
use
rocksdb_options
::{
Options
,
BlockBased
Options
};
pub
use
ffi
::{
DBCompactionStyle
,
DBComparator
,
new_bloom_filte
r
};
pub
use
rocksdb
::{
DB
,
DBVector
,
Direction
,
Writable
,
WriteBatch
};
pub
use
rocksdb_options
::{
BlockBasedOptions
,
Options
};
pub
use
merge_operator
::
MergeOperands
;
pub
mod
rocksdb
;
pub
mod
ffi
;
...
...
src/main.rs
View file @
edf9421f
/
*
Copyright 2014 Tyler Neely
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*
/
/
/
//
Copyright 2014 Tyler Neely
//
//
Licensed under the Apache License, Version 2.0 (the "License");
//
you may not use this file except in compliance with the License.
//
You may obtain a copy of the License at
//
//
http://www.apache.org/licenses/LICENSE-2.0
//
//
Unless required by applicable law or agreed to in writing, software
//
distributed under the License is distributed on an "AS IS" BASIS,
//
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//
See the License for the specific language governing permissions and
//
limitations under the License.
/
/
extern
crate
rocksdb
;
use
rocksdb
::{
Options
,
DB
,
MergeOperands
,
Writable
,
};
use
rocksdb
::{
DB
,
MergeOperands
,
Options
,
Writable
};
//fn snapshot_test() {
//
fn snapshot_test() {
// let path = "_rust_rocksdb_iteratortest";
// {
// let mut db = DB::open_default(path).unwrap();
...
...
@@ -30,18 +30,21 @@ use rocksdb::{Options, DB, MergeOperands, Writable, };
// let mut view1 = snap.iterator();
// println!("See the output of the first iter");
// for (k,v) in view1.from_start() {
// println!("Hello {}: {}", std::str::from_utf8(k).unwrap(), std::str::from_utf8(v).unwrap());
// println!("Hello {}: {}", std::str::from_utf8(k).unwrap(),
// std::str::from_utf8(v).unwrap());
// };
// for (k,v) in view1.from_start() {
// println!("Hello {}: {}", std::str::from_utf8(k).unwrap(), std::str::from_utf8(v).unwrap());
// println!("Hello {}: {}", std::str::from_utf8(k).unwrap(),
// std::str::from_utf8(v).unwrap());
// };
// for (k,v) in view1.from_end() {
// println!("Hello {}: {}", std::str::from_utf8(k).unwrap(), std::str::from_utf8(v).unwrap());
// println!("Hello {}: {}", std::str::from_utf8(k).unwrap(),
// std::str::from_utf8(v).unwrap());
// };
// }
// let opts = Options::new();
// assert!(DB::destroy(&opts, path).is_ok());
//}
//
}
#[cfg(not(feature
=
"valgrind"
))]
fn
main
()
{
...
...
@@ -51,12 +54,10 @@ fn main() {
match
db
.get
(
b
"my key"
)
{
Ok
(
Some
(
value
))
=>
{
match
value
.to_utf8
()
{
Some
(
v
)
=>
println!
(
"retrieved utf8 value: {}"
,
v
),
None
=>
println!
(
"did not read valid utf-8 out of the db"
),
Some
(
v
)
=>
println!
(
"retrieved utf8 value: {}"
,
v
),
None
=>
println!
(
"did not read valid utf-8 out of the db"
),
}
}
},
Err
(
e
)
=>
println!
(
"error retrieving value: {}"
,
e
),
_
=>
panic!
(
"value not present!"
),
}
...
...
@@ -66,8 +67,10 @@ fn main() {
custom_merge
();
}
fn
concat_merge
(
_
:
&
[
u8
],
existing_val
:
Option
<&
[
u8
]
>
,
operands
:
&
mut
MergeOperands
)
->
Vec
<
u8
>
{
fn
concat_merge
(
_
:
&
[
u8
],
existing_val
:
Option
<&
[
u8
]
>
,
operands
:
&
mut
MergeOperands
)
->
Vec
<
u8
>
{
let
mut
result
:
Vec
<
u8
>
=
Vec
::
with_capacity
(
operands
.size_hint
()
.
0
);
match
existing_val
{
Some
(
v
)
=>
for
e
in
v
{
...
...
@@ -99,10 +102,8 @@ fn custom_merge() {
match
db
.get
(
b
"k1"
)
{
Ok
(
Some
(
value
))
=>
{
match
value
.to_utf8
()
{
Some
(
v
)
=>
println!
(
"retrieved utf8 value: {}"
,
v
),
None
=>
println!
(
"did not read valid utf-8 out of the db"
),
Some
(
v
)
=>
println!
(
"retrieved utf8 value: {}"
,
v
),
None
=>
println!
(
"did not read valid utf-8 out of the db"
),
}
}
Err
(
e
)
=>
println!
(
"error retrieving value: {}"
,
e
),
...
...
@@ -126,13 +127,14 @@ fn main() {
db
.merge
(
b
"k1"
,
b
"d"
);
db
.merge
(
b
"k1"
,
b
"efg"
);
db
.merge
(
b
"k1"
,
b
"h"
);
db
.get
(
b
"k1"
)
.map
(
|
value
|
{
db
.get
(
b
"k1"
)
.map
(|
value
|
{
match
value
.to_utf8
()
{
Some
(
v
)
=>
(),
None
=>
panic!
(
"value corrupted"
),
}
})
.or_else
(
|
e
|
{
panic!
(
"error retrieving value: {}"
,
e
)
}
);
.or_else
(|
e
|
panic!
(
"error retrieving value: {}"
,
e
)
);
db
.delete
(
b
"k1"
);
}
}
...
...
@@ -142,10 +144,14 @@ fn main() {
mod
tests
{
use
std
::
thread
::
sleep_ms
;
use
rocksdb
::{
BlockBasedOptions
,
Options
,
DB
,
MergeOperands
,
new_bloom_filter
,
Writable
};
use
rocksdb
::{
BlockBasedOptions
,
DB
,
MergeOperands
,
Options
,
Writable
,
new_bloom_filter
};
use
rocksdb
::
DBCompactionStyle
::
DBUniversalCompaction
;
fn
tuned_for_somebody_elses_disk
(
path
:
&
str
,
opts
:
&
mut
Options
,
blockopts
:
&
mut
BlockBasedOptions
)
->
DB
{
fn
tuned_for_somebody_elses_disk
(
path
:
&
str
,
opts
:
&
mut
Options
,
blockopts
:
&
mut
BlockBasedOptions
)
->
DB
{
opts
.create_if_missing
(
true
);
opts
.set_max_open_files
(
10000
);
opts
.set_use_fsync
(
false
);
...
...
@@ -168,44 +174,44 @@ mod tests {
opts
.set_disable_auto_compactions
(
true
);
let
filter
=
new_bloom_filter
(
10
);
//opts.set_filter(filter);
//
opts.set_filter(filter);
DB
::
open
(
&
opts
,
path
)
.unwrap
()
}
/*
TODO(tyler) unstable
#[bench]
fn a_writes(b: &mut Bencher) {
// dirty hack due to parallel tests causing contention.
sleep_ms(1000);
let path = "_rust_rocksdb_optimizetest";
let mut opts = Options::new();
let mut blockopts = BlockBasedOptions::new();
let mut db = tuned_for_somebody_elses_disk(path, &mut opts, &mut blockopts);
let mut i = 0 as u64;
b.iter(|| {
db.put(i.to_string().as_bytes(), b"v1111");
i += 1;
});
}
#[bench]
fn b_reads(b: &mut Bencher) {
let path = "_rust_rocksdb_optimizetest";
let mut opts = Options::new();
let mut blockopts = BlockBasedOptions::new();
{
let db = tuned_for_somebody_elses_disk(path, &mut opts, &mut blockopts);
let mut i = 0 as u64;
b.iter(|| {
db.get(i.to_string().as_bytes()).on_error( |e| {
println!("error: {}", e);
e
});
i += 1;
});
}
DB::destroy(&opts, path).is_ok();
}
*
/
//
TODO(tyler) unstable
//
#[bench]
//
fn a_writes(b: &mut Bencher) {
// dirty hack due to parallel tests causing contention.
//
sleep_ms(1000);
//
let path = "_rust_rocksdb_optimizetest";
//
let mut opts = Options::new();
//
let mut blockopts = BlockBasedOptions::new();
//
let mut db = tuned_for_somebody_elses_disk(path, &mut opts, &mut blockopts);
//
let mut i = 0 as u64;
//
b.iter(|| {
//
db.put(i.to_string().as_bytes(), b"v1111");
//
i += 1;
//
});
//
}
//
//
#[bench]
//
fn b_reads(b: &mut Bencher) {
//
let path = "_rust_rocksdb_optimizetest";
//
let mut opts = Options::new();
//
let mut blockopts = BlockBasedOptions::new();
//
{
//
let db = tuned_for_somebody_elses_disk(path, &mut opts, &mut blockopts);
//
let mut i = 0 as u64;
//
b.iter(|| {
//
db.get(i.to_string().as_bytes()).on_error( |e| {
//
println!("error: {}", e);
//
e
//
});
//
i += 1;
//
});
//
}
//
DB::destroy(&opts, path).is_ok();
//
}
/
/
}
src/merge_operator.rs
View file @
edf9421f
/
*
Copyright 2014 Tyler Neely
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*
/
/
/
//
Copyright 2014 Tyler Neely
//
//
Licensed under the Apache License, Version 2.0 (the "License");
//
you may not use this file except in compliance with the License.
//
You may obtain a copy of the License at
//
//
http://www.apache.org/licenses/LICENSE-2.0
//
//
Unless required by applicable law or agreed to in writing, software
//
distributed under the License is distributed on an "AS IS" BASIS,
//
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//
See the License for the specific language governing permissions and
//
limitations under the License.
/
/
extern
crate
libc
;
use
self
::
libc
::{
c_char
,
c_int
,
c_void
,
size_t
};
use
std
::
ffi
::
CString
;
...
...
@@ -30,7 +30,7 @@ pub struct MergeOperatorCallback {
pub
extern
"C"
fn
destructor_callback
(
raw_cb
:
*
mut
c_void
)
{
// turn this back into a local variable so rust will reclaim it
let
_
:
Box
<
MergeOperatorCallback
>
=
unsafe
{
mem
::
transmute
(
raw_cb
)
};
let
_
:
Box
<
MergeOperatorCallback
>
=
unsafe
{
mem
::
transmute
(
raw_cb
)
};
}
...
...
@@ -57,17 +57,16 @@ pub extern "C" fn full_merge_callback(raw_cb: *mut c_void,
unsafe
{
let
cb
:
&
mut
MergeOperatorCallback
=
&
mut
*
(
raw_cb
as
*
mut
MergeOperatorCallback
);
let
operands
=
&
mut
MergeOperands
::
new
(
operands_list
,
let
operands
=
&
mut
MergeOperands
::
new
(
operands_list
,
operands_list_len
,
num_operands
);
let
key
:
&
[
u8
]
=
slice
::
from_raw_parts
(
raw_key
as
*
const
u8
,
key_len
as
usize
);
let
key
:
&
[
u8
]
=
slice
::
from_raw_parts
(
raw_key
as
*
const
u8
,
key_len
as
usize
);
let
oldval
:
&
[
u8
]
=
slice
::
from_raw_parts
(
existing_value
as
*
const
u8
,
existing_value_len
as
usize
);
let
mut
result
=
(
cb
.merge_fn
)(
key
,
Some
(
oldval
),
operands
);
let
mut
result
=
(
cb
.merge_fn
)(
key
,
Some
(
oldval
),
operands
);
result
.shrink_to_fit
();
//TODO(tan) investigate zero-copy techniques to improve performance
//
TODO(tan) investigate zero-copy techniques to improve performance
let
buf
=
libc
::
malloc
(
result
.len
()
as
size_t
);
assert
!
(
!
buf
.is_null
());
*
new_value_length
=
result
.len
()
as
size_t
;
...
...
@@ -92,10 +91,11 @@ pub extern "C" fn partial_merge_callback(raw_cb: *mut c_void,
let
operands
=
&
mut
MergeOperands
::
new
(
operands_list
,
operands_list_len
,
num_operands
);
let
key
:
&
[
u8
]
=
slice
::
from_raw_parts
(
raw_key
as
*
const
u8
,
key_len
as
usize
);
let
key
:
&
[
u8
]
=
slice
::
from_raw_parts
(
raw_key
as
*
const
u8
,
key_len
as
usize
);
let
mut
result
=
(
cb
.merge_fn
)(
key
,
None
,
operands
);
result
.shrink_to_fit
();
//TODO(tan) investigate zero-copy techniques to improve performance
//
TODO(tan) investigate zero-copy techniques to improve performance
let
buf
=
libc
::
malloc
(
result
.len
()
as
size_t
);
assert
!
(
!
buf
.is_null
());
*
new_value_length
=
1
as
size_t
;
...
...
@@ -168,7 +168,7 @@ fn test_provided_merge(new_key: &[u8],
for
e
in
v
{
result
.push
(
*
e
);
}
}
,
}
None
=>
(),
}
for
op
in
operands
{
...
...
@@ -199,13 +199,13 @@ fn mergetest() {
match
db
.get
(
b
"k1"
)
{
Ok
(
Some
(
value
))
=>
{
match
value
.to_utf8
()
{
Some
(
v
)
=>
println!
(
"retrieved utf8 value: {}"
,
v
),
None
=>
println!
(
"did not read valid utf-8 out of the db"
),
Some
(
v
)
=>
println!
(
"retrieved utf8 value: {}"
,
v
),
None
=>
println!
(
"did not read valid utf-8 out of the db"
),
}
}
Err
(
e
)
=>
{
println!
(
"error reading value"
)
}
},
Err
(
e
)
=>
{
println!
(
"error reading value"
)},
_
=>
panic!
(
"value not present"
),
}
...
...
src/rocksdb.rs
View file @
edf9421f
/
*
Copyright 2014 Tyler Neely
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*
/
/
/
//
Copyright 2014 Tyler Neely
//
//
Licensed under the Apache License, Version 2.0 (the "License");
//
you may not use this file except in compliance with the License.
//
You may obtain a copy of the License at
//
//
http://www.apache.org/licenses/LICENSE-2.0
//
//
Unless required by applicable law or agreed to in writing, software
//
distributed under the License is distributed on an "AS IS" BASIS,
//
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//
See the License for the specific language governing permissions and
//
limitations under the License.
/
/
extern
crate
libc
;
use
std
::
collections
::
BTreeMap
;
use
std
::
ffi
::{
CStr
ing
,
CStr
};
use
std
::
ffi
::{
CStr
,
CString
};
use
std
::
fs
;
use
std
::
io
;
use
std
::
ops
::
Deref
;
...
...
@@ -87,12 +87,21 @@ impl <'a> Iterator for SubDBIterator<'a> {
let
key_len_ptr
:
*
mut
size_t
=
&
mut
key_len
;
let
mut
val_len
:
size_t
=
0
;
let
val_len_ptr
:
*
mut
size_t
=
&
mut
val_len
;
let
key_ptr
=
unsafe
{
rocksdb_ffi
::
rocksdb_iter_key
(
native_iter
,
key_len_ptr
)
};
let
key
=
unsafe
{
slice
::
from_raw_parts
(
key_ptr
,
key_len
as
usize
)
};
let
val_ptr
=
unsafe
{
rocksdb_ffi
::
rocksdb_iter_value
(
native_iter
,
val_len_ptr
)
};
let
val
=
unsafe
{
slice
::
from_raw_parts
(
val_ptr
,
val_len
as
usize
)
};
let
key_ptr
=
unsafe
{
rocksdb_ffi
::
rocksdb_iter_key
(
native_iter
,
key_len_ptr
)
};
let
key
=
unsafe
{
slice
::
from_raw_parts
(
key_ptr
,
key_len
as
usize
)
};
let
val_ptr
=
unsafe
{
rocksdb_ffi
::
rocksdb_iter_value
(
native_iter
,
val_len_ptr
)
};
let
val
=
unsafe
{
slice
::
from_raw_parts
(
val_ptr
,
val_len
as
usize
)
};
Some
((
key
.to_vec
()
.into_boxed_slice
(),
val
.to_vec
()
.into_boxed_slice
()))
Some
((
key
.to_vec
()
.into_boxed_slice
(),
val
.to_vec
()
.into_boxed_slice
()))
}
else
{
None
}
...
...
@@ -100,26 +109,41 @@ impl <'a> Iterator for SubDBIterator<'a> {
}
impl
DBIterator
{
//TODO alias db & opts to different lifetimes, and DBIterator to the db's lifetime
// TODO alias db & opts to different lifetimes, and DBIterator to the db's
// lifetime
fn
new
(
db
:
&
DB
,
readopts
:
&
ReadOptions
)
->
DBIterator
{
unsafe
{
let
iterator
=
rocksdb_ffi
::
rocksdb_create_iterator
(
db
.inner
,
readopts
.inner
);
let
iterator
=
rocksdb_ffi
::
rocksdb_create_iterator
(
db
.inner
,
readopts
.inner
);
rocksdb_ffi
::
rocksdb_iter_seek_to_first
(
iterator
);
DBIterator
{
inner
:
iterator
,
direction
:
Direction
::
forward
,
just_seeked
:
true
}
DBIterator
{
inner
:
iterator
,
direction
:
Direction
::
forward
,
just_seeked
:
true
,
}
}
}
fn
new_cf
(
db
:
&
DB
,
cf_name
:
&
str
,
readopts
:
&
ReadOptions
)
->
Result
<
DBIterator
,
String
>
{
fn
new_cf
(
db
:
&
DB
,
cf_name
:
&
str
,
readopts
:
&
ReadOptions
)
->
Result
<
DBIterator
,
String
>
{
let
cf
=
db
.cfs
.get
(
cf_name
);
if
cf
.is_none
()
{
return
Err
(
format!
(
"Invalid column family: {}"
,
cf_name
)
.to_string
());
return
Err
(
format!
(
"Invalid column family: {}"
,
cf_name
)
.to_string
());
}
unsafe
{
let
iterator
=
rocksdb_ffi
::
rocksdb_create_iterator_cf
(
db
.inner
,
let
iterator
=
rocksdb_ffi
::
rocksdb_create_iterator_cf
(
db
.inner
,
readopts
.inner
,
*
cf
.unwrap
());
rocksdb_ffi
::
rocksdb_iter_seek_to_first
(
iterator
);
Ok
(
DBIterator
{
inner
:
iterator
,
direction
:
Direction
::
forward
,
just_seeked
:
true
})
Ok
(
DBIterator
{
inner
:
iterator
,
direction
:
Direction
::
forward
,
just_seeked
:
true
,
})
}
}
...
...
@@ -127,24 +151,35 @@ impl DBIterator {
self
.just_seeked
=
true
;
unsafe
{
rocksdb_ffi
::
rocksdb_iter_seek_to_first
(
self
.inner
);
};
SubDBIterator
{
iter
:
self
,
direction
:
Direction
::
forward
}
}
SubDBIterator
{
iter
:
self
,
direction
:
Direction
::
forward
,
}
}
pub
fn
from_end
(
&
mut
self
)
->
SubDBIterator
{
self
.just_seeked
=
true
;
unsafe
{
rocksdb_ffi
::
rocksdb_iter_seek_to_last
(
self
.inner
);
};
SubDBIterator
{
iter
:
self
,
direction
:
Direction
::
reverse
}
}
SubDBIterator
{
iter
:
self
,
direction
:
Direction
::
reverse
,
}
}
pub
fn
from
(
&
mut
self
,
key
:
&
[
u8
],
dir
:
Direction
)
->
SubDBIterator
{
self
.just_seeked
=
true
;
unsafe
{
rocksdb_ffi
::
rocksdb_iter_seek
(
self
.inner
,
key
.as_ptr
(),
key
.len
()
as
size_t
);
rocksdb_ffi
::
rocksdb_iter_seek
(
self
.inner
,
key
.as_ptr
(),
key
.len
()
as
size_t
);
}
SubDBIterator
{
iter
:
self
,
direction
:
dir
,
}
SubDBIterator
{
iter
:
self
,
direction
:
dir
}
}
}
...
...
@@ -158,8 +193,13 @@ impl Drop for DBIterator {
impl
<
'a
>
Snapshot
<
'a
>
{
pub
fn
new
(
db
:
&
DB
)
->
Snapshot
{
let
snapshot
=
unsafe
{
rocksdb_ffi
::
rocksdb_create_snapshot
(
db
.inner
)
};
Snapshot
{
db
:
db
,
inner
:
snapshot
}
let
snapshot
=
unsafe
{
rocksdb_ffi
::
rocksdb_create_snapshot
(
db
.inner
)
};
Snapshot
{
db
:
db
,
inner
:
snapshot
,
}
}
pub
fn
iterator
(
&
self
)
->
DBIterator
{
...
...
@@ -180,9 +220,17 @@ impl <'a> Drop for Snapshot<'a> {
// This is for the DB and write batches to share the same API
pub
trait
Writable
{
fn
put
(
&
self
,
key
:
&
[
u8
],
value
:
&
[
u8
])
->
Result
<
(),
String
>
;
fn
put_cf
(
&
self
,
cf
:
DBCFHandle
,
key
:
&
[
u8
],
value
:
&
[
u8
])
->
Result
<
(),
String
>
;
fn
put_cf
(
&
self
,
cf
:
DBCFHandle
,
key
:
&
[
u8
],
value
:
&
[
u8
])
->
Result
<
(),
String
>
;
fn
merge
(
&
self
,
key
:
&
[
u8
],
value
:
&
[
u8
])
->
Result
<
(),
String
>
;
fn
merge_cf
(
&
self
,
cf
:
DBCFHandle
,
key
:
&
[
u8
],
value
:
&
[
u8
])
->
Result
<
(),
String
>
;
fn
merge_cf
(
&
self
,
cf
:
DBCFHandle
,
key
:
&
[
u8
],
value
:
&
[
u8
])
->
Result
<
(),
String
>
;
fn
delete
(
&
self
,
key
:
&
[
u8
])
->
Result
<
(),
String
>
;
fn
delete_cf
(
&
self
,
cf
:
DBCFHandle
,
key
:
&
[
u8
])
->
Result
<
(),
String
>
;
}
...
...
@@ -198,17 +246,22 @@ impl DB {
DB
::
open_cf
(
opts
,
path
,
&
[])
}
pub
fn
open_cf
(
opts
:
&
Options
,
path
:
&
str
,
cfs
:
&
[
&
str
])
->
Result
<
DB
,
String
>
{
pub
fn
open_cf
(
opts
:
&
Options
,
path
:
&
str
,
cfs
:
&
[
&
str
])
->
Result
<
DB
,
String
>
{
let
cpath
=
match
CString
::
new
(
path
.as_bytes
())
{
Ok
(
c
)
=>
c
,
Err
(
_
)
=>
return
Err
(
"Failed to convert path to CString when opening rocksdb"
.to_string
()),
Err
(
_
)
=>
return
Err
(
"Failed to convert path to CString when
\
opening rocksdb"
.to_string
()),
};
let
cpath_ptr
=
cpath
.as_ptr
();
let
ospath
=
Path
::
new
(
path
);
match
fs
::
create_dir_all
(
&
ospath
)
{
Err
(
e
)
=>
return
Err
(
"Failed to create rocksdb directory."
.to_string
()),
Err
(
e
)
=>
return
Err
(
"Failed to create rocksdb directory."
.to_string
()),
Ok
(
_
)
=>
(),
}
...
...
@@ -230,24 +283,28 @@ impl DB {
// We need to store our CStrings in an intermediate vector
// so that their pointers remain valid.
let
c_cfs
:
Vec
<
CString
>
=
cfs_v
.iter
()
.map
(
|
cf
|
{
CString
::
new
(
cf
.as_bytes
())
.unwrap
()
})
.collect
();
let
c_cfs
:
Vec
<
CString
>
=
cfs_v
.iter
()
.map
(|
cf
|
{
CString
::
new
(
cf
.as_bytes
())
.unwrap
()
})
.collect
();
let
cfnames
:
Vec
<*
const
i8
>
=
c_cfs
.iter
()
.map
(
|
cf
|
{
cf
.as_ptr
(
)
})
.collect
();
let
cfnames
:
Vec
<*
const
i8
>
=
c_cfs
.iter
()
.map
(|
cf
|
cf
.as_ptr
()
)
.collect
();
// These handles will be populated by DB.
let
mut
cfhandles
:
Vec
<
rocksdb_ffi
::
DBCFHandle
>
=
cfs_v
.iter
()
.map
(
|
_
|
{
rocksdb_ffi
::
DBCFHandle
(
0
as
*
mut
c_void
)
})
.collect
();
cfs_v
.iter
()
.map
(|
_
|
rocksdb_ffi
::
DBCFHandle
(
0
as
*
mut
c_void
)
)
.collect
();
// TODO(tyler) allow options to be passed in.
let
cfopts
:
Vec
<
rocksdb_ffi
::
DBOptions
>
=
cfs_v
.iter
()
.map
(
|
_
|
{
unsafe
{
rocksdb_ffi
::
rocksdb_options_create
()
}
})
.collect
();
let
cfopts
:
Vec
<
rocksdb_ffi
::
DBOptions
>
=
cfs_v
.iter
()
.map
(|
_
|
unsafe
{
rocksdb_ffi
::
rocksdb_options_create
()
})
.collect
();
// Prepare to ship to C.
let
copts
:
*
const
rocksdb_ffi
::
DBOptions
=
cfopts
.as_ptr
();
...
...
@@ -262,7 +319,8 @@ impl DB {
for
handle
in
cfhandles
.iter
()
{
if
handle
.
0
.is_null
()
{
return
Err
(
"Received null column family handle from DB."
.to_string
());
return
Err
(
"Received null column family handle from DB."
.to_string
());
}
}
...
...
@@ -278,7 +336,10 @@ impl DB {
return
Err
(
"Could not initialize database."
.to_string
());
}
Ok
(
DB
{
inner
:
db
,
cfs
:
cfMap
})
Ok
(
DB
{
inner
:
db
,
cfs
:
cfMap
,
})
}
pub
fn
destroy
(
opts
:
&
Options
,
path
:
&
str
)
->
Result
<
(),
String
>
{
...
...
@@ -318,31 +379,40 @@ impl DB {
let
mut
err
:
*
const
i8
=
0
as
*
const
i8
;
let
err_ptr
:
*
mut
*
const
i8
=
&
mut
err
;
unsafe
{
rocksdb_ffi
::
rocksdb_write
(
self
.inner
,
writeopts
.clone
(),
batch
.inner
,
err_ptr
);
rocksdb_ffi
::
rocksdb_write
(
self
.inner
,
writeopts
.clone
(),
batch
.inner
,
err_ptr
);
rocksdb_ffi
::
rocksdb_writeoptions_destroy
(
writeopts
);
}
if
!
err
.is_null
()
{
return
Err
(
error_message
(
err
));
}
return
Ok
(())
return
Ok
(())
;
}
pub
fn
get
(
&
self
,
key
:
&
[
u8
])
->
Result
<
Option
<
DBVector
>
,
String
>
{
unsafe
{
let
readopts
=
rocksdb_ffi
::
rocksdb_readoptions_create
();
if
readopts
.
0
.is_null
()
{
return
Err
(
"Unable to create rocksdb read
\
options. This is a fairly trivial call, and its failure
\
may be indicative of a mis-compiled or mis-loaded rocksdb
\
library."
.to_string
());
return
Err
(
"Unable to create rocksdb read options. This is
\
a fairly trivial call, and its failure may be
\
indicative of a mis-compiled or mis-loaded
\
rocksdb library."
.to_string
());
}
let
val_len
:
size_t
=
0
;
let
val_len_ptr
=
&
val_len
as
*
const
size_t
;
let
mut
err
:
*
const
i8
=
0
as
*
const
i8
;
let
err_ptr
:
*
mut
*
const
i8
=
&
mut
err
;
let
val
=
rocksdb_ffi
::
rocksdb_get
(
self
.inner
,
readopts
.clone
(),
key
.as_ptr
(),
key
.len
()
as
size_t
,
val_len_ptr
,
err_ptr
)
as
*
mut
u8
;
let
val
=
rocksdb_ffi
::
rocksdb_get
(
self
.inner
,
readopts
.clone
(),
key
.as_ptr
(),
key
.len
()
as
size_t
,
val_len_ptr
,
err_ptr
)
as
*
mut
u8
;
rocksdb_ffi
::
rocksdb_readoptions_destroy
(
readopts
);
if
!
err
.is_null
()
{
return
Err
(
error_message
(
err
));
...
...
@@ -356,22 +426,31 @@ impl DB {
}
}
pub
fn
get_cf
(
&
self
,
cf
:
DBCFHandle
,
key
:
&
[
u8
])
->
Result
<
Option
<
DBVector
>
,
String
>
{
pub
fn
get_cf
(
&
self
,
cf
:
DBCFHandle
,
key
:
&
[
u8
])
->
Result
<
Option
<
DBVector
>
,
String
>
{
unsafe
{
let
readopts
=
rocksdb_ffi
::
rocksdb_readoptions_create
();
if
readopts
.
0
.is_null
()
{
return
Err
(
"Unable to create rocksdb read
\
options. This is a fairly trivial call, and its failure
\
may be indicative of a mis-compiled or mis-loaded rocksdb
\
library."
.to_string
());
return
Err
(
"Unable to create rocksdb read options. This is
\
a fairly trivial call, and its failure may be
\
indicative of a mis-compiled or mis-loaded
\
rocksdb library."
.to_string
());
}
let
val_len
:
size_t
=
0
;
let
val_len_ptr
=
&
val_len
as
*
const
size_t
;
let
mut
err
:
*
const
i8
=
0
as
*
const
i8
;
let
err_ptr
:
*
mut
*
const
i8
=
&
mut
err
;
let
val
=
rocksdb_ffi
::
rocksdb_get_cf
(
self
.inner
,
readopts
.clone
(),
cf
,
key
.as_ptr
(),
key
.len
()
as
size_t
,
val_len_ptr
,
let
val
=
rocksdb_ffi
::
rocksdb_get_cf
(
self
.inner
,
readopts
.clone
(),
cf
,
key
.as_ptr
(),
key
.len
()
as
size_t
,
val_len_ptr
,
err_ptr
)
as
*
mut
u8
;
rocksdb_ffi
::
rocksdb_readoptions_destroy
(
readopts
);
if
!
err
.is_null
()
{
...
...
@@ -386,18 +465,25 @@ impl DB {
}
}
pub
fn
create_cf
(
&
mut
self
,
name
:
&
str
,
opts
:
&
Options
)
->
Result
<
DBCFHandle
,
String
>
{
pub
fn
create_cf
(
&
mut
self
,
name
:
&
str
,
opts
:
&
Options
)
->
Result
<
DBCFHandle
,
String
>
{
let
cname
=
match
CString
::
new
(
name
.as_bytes
())
{
Ok
(
c
)
=>
c
,
Err
(
_
)
=>
return
Err
(
"Failed to convert path to CString when opening rocksdb"
.to_string
()),
Err
(
_
)
=>
return
Err
(
"Failed to convert path to CString when
\
opening rocksdb"
.to_string
()),
};
let
cname_ptr
=
cname
.as_ptr
();
let
mut
err
:
*
const
i8
=
0
as
*
const
i8
;
let
err_ptr
:
*
mut
*
const
i8
=
&
mut
err
;
let
cf_handler
=
unsafe
{
let
cf_handler
=
rocksdb_ffi
::
rocksdb_create_column_family
(
self
.inner
,
opts
.inner
,
cname_ptr
,
err_ptr
);
let
cf_handler
=
rocksdb_ffi
::
rocksdb_create_column_family
(
self
.inner
,
opts
.inner
,
cname_ptr
,
err_ptr
);
self
.cfs
.insert
(
name
.to_string
(),
cf_handler
);
cf_handler
};
...
...
@@ -416,7 +502,9 @@ impl DB {
let
mut
err
:
*
const
i8
=
0
as
*
const
i8
;
let
err_ptr
:
*
mut
*
const
i8
=
&
mut
err
;
unsafe
{
rocksdb_ffi
::
rocksdb_drop_column_family
(
self
.inner
,
*
cf
.unwrap
(),
err_ptr
);
rocksdb_ffi
::
rocksdb_drop_column_family
(
self
.inner
,
*
cf
.unwrap
(),
err_ptr
);
}
if
!
err
.is_null
()
{
return
Err
(
error_message
(
err
));
...
...
@@ -450,9 +538,13 @@ impl Writable for DB {
let
writeopts
=
rocksdb_ffi
::
rocksdb_writeoptions_create
();
let
mut
err
:
*
const
i8
=
0
as
*
const
i8
;
let
err_ptr
:
*
mut
*
const
i8
=
&
mut
err
;
rocksdb_ffi
::
rocksdb_put
(
self
.inner
,
writeopts
.clone
(),
key
.as_ptr
(),
key
.len
()
as
size_t
,
value
.as_ptr
(),
value
.len
()
as
size_t
,
err_ptr
);
rocksdb_ffi
::
rocksdb_put
(
self
.inner
,
writeopts
.clone
(),
key
.as_ptr
(),
key
.len
()
as
size_t
,
value
.as_ptr
(),
value
.len
()
as
size_t
,
err_ptr
);
rocksdb_ffi
::
rocksdb_writeoptions_destroy
(
writeopts
);
if
!
err
.is_null
()
{
return
Err
(
error_message
(
err
));
...
...
@@ -461,14 +553,23 @@ impl Writable for DB {
}
}
fn
put_cf
(
&
self
,
cf
:
DBCFHandle
,
key
:
&
[
u8
],
value
:
&
[
u8
])
->
Result
<
(),
String
>
{
fn
put_cf
(
&
self
,
cf
:
DBCFHandle
,
key
:
&
[
u8
],
value
:
&
[
u8
])
->
Result
<
(),
String
>
{
unsafe
{
let
writeopts
=
rocksdb_ffi
::
rocksdb_writeoptions_create
();
let
mut
err
:
*
const
i8
=
0
as
*
const
i8
;
let
err_ptr
:
*
mut
*
const
i8
=
&
mut
err
;
rocksdb_ffi
::
rocksdb_put_cf
(
self
.inner
,
writeopts
.clone
(),
cf
,
key
.as_ptr
(),
key
.len
()
as
size_t
,
value
.as_ptr
(),
value
.len
()
as
size_t
,
err_ptr
);
rocksdb_ffi
::
rocksdb_put_cf
(
self
.inner
,
writeopts
.clone
(),
cf
,
key
.as_ptr
(),
key
.len
()
as
size_t
,
value
.as_ptr
(),
value
.len
()
as
size_t
,
err_ptr
);
rocksdb_ffi
::
rocksdb_writeoptions_destroy
(
writeopts
);
if
!
err
.is_null
()
{
return
Err
(
error_message
(
err
));
...
...
@@ -482,9 +583,13 @@ impl Writable for DB {
let
writeopts
=
rocksdb_ffi
::
rocksdb_writeoptions_create
();
let
mut
err
:
*
const
i8
=
0
as
*
const
i8
;
let
err_ptr
:
*
mut
*
const
i8
=
&
mut
err
;
rocksdb_ffi
::
rocksdb_merge
(
self
.inner
,
writeopts
.clone
(),
key
.as_ptr
(),
key
.len
()
as
size_t
,
value
.as_ptr
(),
value
.len
()
as
size_t
,
err_ptr
);
rocksdb_ffi
::
rocksdb_merge
(
self
.inner
,
writeopts
.clone
(),
key
.as_ptr
(),
key
.len
()
as
size_t
,
value
.as_ptr
(),
value
.len
()
as
size_t
,
err_ptr
);
rocksdb_ffi
::
rocksdb_writeoptions_destroy
(
writeopts
);
if
!
err
.is_null
()
{
return
Err
(
error_message
(
err
));
...
...
@@ -493,15 +598,23 @@ impl Writable for DB {
}
}
fn
merge_cf
(
&
self
,
cf
:
DBCFHandle
,
key
:
&
[
u8
],
value
:
&
[
u8
])
->
Result
<
(),
String
>
{
fn
merge_cf
(
&
self
,
cf
:
DBCFHandle
,
key
:
&
[
u8
],
value
:
&
[
u8
])
->
Result
<
(),
String
>
{
unsafe
{
let
writeopts
=
rocksdb_ffi
::
rocksdb_writeoptions_create
();
let
mut
err
:
*
const
i8
=
0
as
*
const
i8
;
let
err_ptr
:
*
mut
*
const
i8
=
&
mut
err
;
rocksdb_ffi
::
rocksdb_merge_cf
(
self
.inner
,
writeopts
.clone
(),
cf
,
key
.as_ptr
(),
key
.len
()
as
size_t
,
value
.as_ptr
(),
value
.len
()
as
size_t
,
err_ptr
);
rocksdb_ffi
::
rocksdb_merge_cf
(
self
.inner
,
writeopts
.clone
(),
cf
,
key
.as_ptr
(),
key
.len
()
as
size_t
,
value
.as_ptr
(),
value
.len
()
as
size_t
,
err_ptr
);
rocksdb_ffi
::
rocksdb_writeoptions_destroy
(
writeopts
);
if
!
err
.is_null
()
{
return
Err
(
error_message
(
err
));
...
...
@@ -515,8 +628,11 @@ impl Writable for DB {
let
writeopts
=
rocksdb_ffi
::
rocksdb_writeoptions_create
();
let
mut
err
:
*
const
i8
=
0
as
*
const
i8
;
let
err_ptr
:
*
mut
*
const
i8
=
&
mut
err
;
rocksdb_ffi
::
rocksdb_delete
(
self
.inner
,
writeopts
.clone
(),
key
.as_ptr
(),
key
.len
()
as
size_t
,
err_ptr
);
rocksdb_ffi
::
rocksdb_delete
(
self
.inner
,
writeopts
.clone
(),
key
.as_ptr
(),
key
.len
()
as
size_t
,
err_ptr
);
rocksdb_ffi
::
rocksdb_writeoptions_destroy
(
writeopts
);
if
!
err
.is_null
()
{
return
Err
(
error_message
(
err
));
...
...
@@ -530,9 +646,12 @@ impl Writable for DB {
let
writeopts
=
rocksdb_ffi
::
rocksdb_writeoptions_create
();
let
mut
err
:
*
const
i8
=
0
as
*
const
i8
;
let
err_ptr
:
*
mut
*
const
i8
=
&
mut
err
;
rocksdb_ffi
::
rocksdb_delete_cf
(
self
.inner
,
writeopts
.clone
(),
cf
,
key
.as_ptr
(),
key
.len
()
as
size_t
,
err_ptr
);
rocksdb_ffi
::
rocksdb_delete_cf
(
self
.inner
,
writeopts
.clone
(),
cf
,
key
.as_ptr
(),
key
.len
()
as
size_t
,
err_ptr
);
rocksdb_ffi
::
rocksdb_writeoptions_destroy
(
writeopts
);
if
!
err
.is_null
()
{
return
Err
(
error_message
(
err
));
...
...
@@ -545,18 +664,14 @@ impl Writable for DB {
impl
WriteBatch
{
pub
fn
new
()
->
WriteBatch
{
WriteBatch
{
inner
:
unsafe
{
rocksdb_ffi
::
rocksdb_writebatch_create
()
},
inner
:
unsafe
{
rocksdb_ffi
::
rocksdb_writebatch_create
()
},
}
}
}
impl
Drop
for
WriteBatch
{
fn
drop
(
&
mut
self
)
{
unsafe
{
rocksdb_ffi
::
rocksdb_writebatch_destroy
(
self
.inner
)
}
unsafe
{
rocksdb_ffi
::
rocksdb_writebatch_destroy
(
self
.inner
)
}
}
}
...
...
@@ -574,17 +689,26 @@ impl Drop for DB {
impl
Writable
for
WriteBatch
{
fn
put
(
&
self
,
key
:
&
[
u8
],
value
:
&
[
u8
])
->
Result
<
(),
String
>
{
unsafe
{
rocksdb_ffi
::
rocksdb_writebatch_put
(
self
.inner
,
key
.as_ptr
(),
key
.len
()
as
size_t
,
value
.as_ptr
(),
rocksdb_ffi
::
rocksdb_writebatch_put
(
self
.inner
,
key
.as_ptr
(),
key
.len
()
as
size_t
,
value
.as_ptr
(),
value
.len
()
as
size_t
);
Ok
(())
}
}
fn
put_cf
(
&
self
,
cf
:
DBCFHandle
,
key
:
&
[
u8
],
value
:
&
[
u8
])
->
Result
<
(),
String
>
{
fn
put_cf
(
&
self
,
cf
:
DBCFHandle
,
key
:
&
[
u8
],
value
:
&
[
u8
])
->
Result
<
(),
String
>
{
unsafe
{
rocksdb_ffi
::
rocksdb_writebatch_put_cf
(
self
.inner
,
cf
,
key
.as_ptr
(),
key
.len
()
as
size_t
,
value
.as_ptr
(),
rocksdb_ffi
::
rocksdb_writebatch_put_cf
(
self
.inner
,
cf
,
key
.as_ptr
(),
key
.len
()
as
size_t
,
value
.as_ptr
(),
value
.len
()
as
size_t
);
Ok
(())
}
...
...
@@ -592,17 +716,26 @@ impl Writable for WriteBatch {
fn
merge
(
&
self
,
key
:
&
[
u8
],
value
:
&
[
u8
])
->
Result
<
(),
String
>
{
unsafe
{
rocksdb_ffi
::
rocksdb_writebatch_merge
(
self
.inner
,
key
.as_ptr
(),
key
.len
()
as
size_t
,
value
.as_ptr
(),
rocksdb_ffi
::
rocksdb_writebatch_merge
(
self
.inner
,
key
.as_ptr
(),
key
.len
()
as
size_t
,
value
.as_ptr
(),
value
.len
()
as
size_t
);
Ok
(())
}
}
fn
merge_cf
(
&
self
,
cf
:
DBCFHandle
,
key
:
&
[
u8
],
value
:
&
[
u8
])
->
Result
<
(),
String
>
{
fn
merge_cf
(
&
self
,
cf
:
DBCFHandle
,
key
:
&
[
u8
],
value
:
&
[
u8
])
->
Result
<
(),
String
>
{
unsafe
{
rocksdb_ffi
::
rocksdb_writebatch_merge_cf
(
self
.inner
,
cf
,
key
.as_ptr
(),
key
.len
()
as
size_t
,
value
.as_ptr
(),
rocksdb_ffi
::
rocksdb_writebatch_merge_cf
(
self
.inner
,
cf
,
key
.as_ptr
(),
key
.len
()
as
size_t
,
value
.as_ptr
(),
value
.len
()
as
size_t
);
Ok
(())
}
...
...
@@ -610,7 +743,8 @@ impl Writable for WriteBatch {
fn
delete
(
&
self
,
key
:
&
[
u8
])
->
Result
<
(),
String
>
{
unsafe
{
rocksdb_ffi
::
rocksdb_writebatch_delete
(
self
.inner
,
key
.as_ptr
(),
rocksdb_ffi
::
rocksdb_writebatch_delete
(
self
.inner
,
key
.as_ptr
(),
key
.len
()
as
size_t
);
Ok
(())
}
...
...
@@ -619,7 +753,8 @@ impl Writable for WriteBatch {
fn
delete_cf
(
&
self
,
cf
:
DBCFHandle
,
key
:
&
[
u8
])
->
Result
<
(),
String
>
{
unsafe
{
rocksdb_ffi
::
rocksdb_writebatch_delete_cf
(
self
.inner
,
cf
,
key
.as_ptr
(),
cf
,
key
.as_ptr
(),
key
.len
()
as
size_t
);
Ok
(())
}
...
...
@@ -628,21 +763,19 @@ impl Writable for WriteBatch {
impl
Drop
for
ReadOptions
{
fn
drop
(
&
mut
self
)
{
unsafe
{
rocksdb_ffi
::
rocksdb_readoptions_destroy
(
self
.inner
)
}
unsafe
{
rocksdb_ffi
::
rocksdb_readoptions_destroy
(
self
.inner
)
}
}
}
impl
ReadOptions
{
fn
new
()
->
ReadOptions
{
unsafe
{
ReadOptions
{
inner
:
rocksdb_ffi
::
rocksdb_readoptions_create
()
}
ReadOptions
{
inner
:
rocksdb_ffi
::
rocksdb_readoptions_create
()
}
}
}
// TODO add snapshot setting here
// TODO add snapshot wrapper structs with proper destructors;
// that struct needs an "iterator" impl too.
// TODO add snapshot setting here
// TODO add snapshot wrapper structs with proper destructors;
// that struct needs an "iterator" impl too.
fn
fill_cache
(
&
mut
self
,
v
:
bool
)
{
unsafe
{
rocksdb_ffi
::
rocksdb_readoptions_set_fill_cache
(
self
.inner
,
v
);
...
...
@@ -651,7 +784,8 @@ impl ReadOptions {
fn
set_snapshot
(
&
mut
self
,
snapshot
:
&
Snapshot
)
{
unsafe
{
rocksdb_ffi
::
rocksdb_readoptions_set_snapshot
(
self
.inner
,
snapshot
.inner
);
rocksdb_ffi
::
rocksdb_readoptions_set_snapshot
(
self
.inner
,
snapshot
.inner
);
}
}
}
...
...
@@ -715,8 +849,10 @@ fn errors_do_stuff() {
let
opts
=
Options
::
new
();
// The DB will still be open when we try to destroy and the lock should fail
match
DB
::
destroy
(
&
opts
,
path
)
{
Err
(
ref
s
)
=>
assert
!
(
s
==
"IO error: lock _rust_rocksdb_error/LOCK: No locks available"
),
Ok
(
_
)
=>
panic!
(
"should fail"
)
Err
(
ref
s
)
=>
assert
!
(
s
==
"IO error: lock _rust_rocksdb_error/LOCK: No
\
locks available"
),
Ok
(
_
)
=>
panic!
(
"should fail"
),
}
}
...
...
@@ -725,7 +861,8 @@ fn writebatch_works() {
let
path
=
"_rust_rocksdb_writebacktest"
;
{
let
mut
db
=
DB
::
open_default
(
path
)
.unwrap
();
{
// test put
{
// test put
let
mut
batch
=
WriteBatch
::
new
();
assert
!
(
db
.get
(
b
"k1"
)
.unwrap
()
.is_none
());
batch
.put
(
b
"k1"
,
b
"v1111"
);
...
...
@@ -735,7 +872,8 @@ fn writebatch_works() {
let
r
:
Result
<
Option
<
DBVector
>
,
String
>
=
db
.get
(
b
"k1"
);
assert
!
(
r
.unwrap
()
.unwrap
()
.to_utf8
()
.unwrap
()
==
"v1111"
);
}
{
// test delete
{
// test delete
let
mut
batch
=
WriteBatch
::
new
();
batch
.delete
(
b
"k1"
);
let
p
=
db
.write
(
batch
);
...
...
@@ -759,8 +897,10 @@ fn iterator_test() {
let
p
=
db
.put
(
b
"k3"
,
b
"v3333"
);
assert
!
(
p
.is_ok
());
let
mut
iter
=
db
.iterator
();
for
(
k
,
v
)
in
iter
.from_start
()
{
println!
(
"Hello {}: {}"
,
from_utf8
(
&*
k
)
.unwrap
(),
from_utf8
(
&*
v
)
.unwrap
());
for
(
k
,
v
)
in
iter
.from_start
()
{
println!
(
"Hello {}: {}"
,
from_utf8
(
&*
k
)
.unwrap
(),
from_utf8
(
&*
v
)
.unwrap
());
}
}
let
opts
=
Options
::
new
();
...
...
src/rocksdb_options.rs
View file @
edf9421f
/
*
Copyright 2014 Tyler Neely
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*
/
/
/
//
Copyright 2014 Tyler Neely
//
//
Licensed under the Apache License, Version 2.0 (the "License");
//
you may not use this file except in compliance with the License.
//
You may obtain a copy of the License at
//
//
http://www.apache.org/licenses/LICENSE-2.0
//
//
Unless required by applicable law or agreed to in writing, software
//
distributed under the License is distributed on an "AS IS" BASIS,
//
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//
See the License for the specific language governing permissions and
//
limitations under the License.
/
/
extern
crate
libc
;
use
self
::
libc
::{
c_int
,
size_t
};
use
std
::
ffi
::
CString
;
use
std
::
mem
;
use
rocksdb_ffi
;
use
merge_operator
::{
self
,
MergeOpera
torCallback
,
MergeOperands
,
use
merge_operator
::{
self
,
MergeOpera
nds
,
MergeOperatorCallback
,
full_merge_callback
,
partial_merge_callback
};
use
comparator
::{
self
,
ComparatorCallback
,
compare_callback
};
...
...
@@ -49,7 +49,9 @@ impl Drop for BlockBasedOptions {
impl
BlockBasedOptions
{
pub
fn
new
()
->
BlockBasedOptions
{
let
block_opts
=
unsafe
{
rocksdb_ffi
::
rocksdb_block_based_options_create
()
};
let
block_opts
=
unsafe
{
rocksdb_ffi
::
rocksdb_block_based_options_create
()
};
let
rocksdb_ffi
::
DBBlockBasedTableOptions
(
opt_ptr
)
=
block_opts
;
if
opt_ptr
.is_null
()
{
panic!
(
"Could not create rocksdb block based options"
.to_string
());
...
...
@@ -59,34 +61,35 @@ impl BlockBasedOptions {
pub
fn
set_block_size
(
&
mut
self
,
size
:
u64
)
{
unsafe
{
rocksdb_ffi
::
rocksdb_block_based_options_set_block_size
(
self
.inner
,
size
);
rocksdb_ffi
::
rocksdb_block_based_options_set_block_size
(
self
.inner
,
size
);
}
}
}
//TODO figure out how to create these in a Rusty way
////pub fn set_filter(&mut self, filter: rocksdb_ffi::DBFilterPolicy) {
//// unsafe {
//// rocksdb_ffi::rocksdb_block_based_options_set_filter_policy(
//// self.inner, filter);
//// }
////}
////pub fn set_cache(&mut self, cache: rocksdb_ffi::DBCache) {
//// unsafe {
//// rocksdb_ffi::rocksdb_block_based_options_set_block_cache(
//// self.inner, cache);
//// }
////}
////pub fn set_cache_compressed(&mut self, cache: rocksdb_ffi::DBCache) {
//// unsafe {
//// rocksdb_ffi::rocksdb_block_based_options_set_block_cache_compressed(
//// self.inner, cache);
//// }
////}
// TODO figure out how to create these in a Rusty way
// /pub fn set_filter(&mut self, filter: rocksdb_ffi::DBFilterPolicy) {
// / unsafe {
// / rocksdb_ffi::rocksdb_block_based_options_set_filter_policy(
// / self.inner, filter);
// / }
// /}
/// /pub fn set_cache(&mut self, cache: rocksdb_ffi::DBCache) {
/// / unsafe {
/// / rocksdb_ffi::rocksdb_block_based_options_set_block_cache(
/// / self.inner, cache);
/// / }
/// /}
/// /pub fn set_cache_compressed(&mut self, cache: rocksdb_ffi::DBCache) {
/// / unsafe {
/// / rocksdb_ffi::
/// rocksdb_block_based_options_set_block_cache_compressed(
/// / self.inner, cache);
/// / }
/// /}
}
impl
Options
{
pub
fn
new
()
->
Options
{
...
...
@@ -96,18 +99,19 @@ impl Options {
if
opt_ptr
.is_null
()
{
panic!
(
"Could not create rocksdb options"
.to_string
());
}
Options
{
inner
:
opts
,
}
Options
{
inner
:
opts
}
}
}
pub
fn
increase_parallelism
(
&
mut
self
,
parallelism
:
i32
)
{
unsafe
{
rocksdb_ffi
::
rocksdb_options_increase_parallelism
(
self
.inner
,
parallelism
);
rocksdb_ffi
::
rocksdb_options_increase_parallelism
(
self
.inner
,
parallelism
);
}
}
pub
fn
optimize_level_style_compaction
(
&
mut
self
,
memtable_memory_budget
:
i32
)
{
pub
fn
optimize_level_style_compaction
(
&
mut
self
,
memtable_memory_budget
:
i32
)
{
unsafe
{
rocksdb_ffi
::
rocksdb_options_optimize_level_style_compaction
(
self
.inner
,
memtable_memory_budget
);
...
...
@@ -141,7 +145,9 @@ impl Options {
}
}
pub
fn
add_comparator
<
'a
>
(
&
mut
self
,
name
:
&
str
,
compare_fn
:
fn
(
&
[
u8
],
&
[
u8
])
->
i32
)
{
pub
fn
add_comparator
<
'a
>
(
&
mut
self
,
name
:
&
str
,
compare_fn
:
fn
(
&
[
u8
],
&
[
u8
])
->
i32
)
{
let
cb
=
Box
::
new
(
ComparatorCallback
{
name
:
CString
::
new
(
name
.as_bytes
())
.unwrap
(),
f
:
compare_fn
,
...
...
@@ -160,8 +166,8 @@ impl Options {
pub
fn
set_block_cache_size_mb
(
&
mut
self
,
cache_size
:
u64
)
{
unsafe
{
rocksdb_ffi
::
rocksdb_options_optimize_for_point_lookup
(
self
.inner
,
cache_size
);
rocksdb_ffi
::
rocksdb_options_optimize_for_point_lookup
(
self
.inner
,
cache_size
);
}
}
...
...
@@ -184,8 +190,7 @@ impl Options {
pub
fn
set_bytes_per_sync
(
&
mut
self
,
nbytes
:
u64
)
{
unsafe
{
rocksdb_ffi
::
rocksdb_options_set_bytes_per_sync
(
self
.inner
,
nbytes
);
rocksdb_ffi
::
rocksdb_options_set_bytes_per_sync
(
self
.inner
,
nbytes
);
}
}
...
...
@@ -204,8 +209,8 @@ impl Options {
pub
fn
set_table_cache_num_shard_bits
(
&
mut
self
,
nbits
:
c_int
)
{
unsafe
{
rocksdb_ffi
::
rocksdb_options_set_table_cache_numshardbits
(
self
.inner
,
nbits
);
rocksdb_ffi
::
rocksdb_options_set_table_cache_numshardbits
(
self
.inner
,
nbits
);
}
}
...
...
@@ -218,22 +223,22 @@ impl Options {
pub
fn
set_max_write_buffer_number
(
&
mut
self
,
nbuf
:
c_int
)
{
unsafe
{
rocksdb_ffi
::
rocksdb_options_set_max_write_buffer_number
(
self
.inner
,
nbuf
);
rocksdb_ffi
::
rocksdb_options_set_max_write_buffer_number
(
self
.inner
,
nbuf
);
}
}
pub
fn
set_write_buffer_size
(
&
mut
self
,
size
:
size_t
)
{
unsafe
{
rocksdb_ffi
::
rocksdb_options_set_write_buffer_size
(
self
.inner
,
size
);
rocksdb_ffi
::
rocksdb_options_set_write_buffer_size
(
self
.inner
,
size
);
}
}
pub
fn
set_target_file_size_base
(
&
mut
self
,
size
:
u64
)
{
unsafe
{
rocksdb_ffi
::
rocksdb_options_set_target_file_size_base
(
self
.inner
,
size
);
rocksdb_ffi
::
rocksdb_options_set_target_file_size_base
(
self
.inner
,
size
);
}
}
...
...
@@ -258,10 +263,11 @@ impl Options {
}
}
pub
fn
set_compaction_style
(
&
mut
self
,
style
:
rocksdb_ffi
::
DBCompactionStyle
)
{
pub
fn
set_compaction_style
(
&
mut
self
,
style
:
rocksdb_ffi
::
DBCompactionStyle
)
{
unsafe
{
rocksdb_ffi
::
rocksdb_options_set_compaction_style
(
self
.inner
,
style
);
rocksdb_ffi
::
rocksdb_options_set_compaction_style
(
self
.inner
,
style
);
}
}
...
...
@@ -274,15 +280,14 @@ impl Options {
pub
fn
set_max_background_flushes
(
&
mut
self
,
n
:
c_int
)
{
unsafe
{
rocksdb_ffi
::
rocksdb_options_set_max_background_flushes
(
self
.inner
,
n
);
rocksdb_ffi
::
rocksdb_options_set_max_background_flushes
(
self
.inner
,
n
);
}
}
pub
fn
set_filter_deletes
(
&
mut
self
,
filter
:
bool
)
{
unsafe
{
rocksdb_ffi
::
rocksdb_options_set_filter_deletes
(
self
.inner
,
filter
);
rocksdb_ffi
::
rocksdb_options_set_filter_deletes
(
self
.inner
,
filter
);
}
}
...
...
@@ -299,7 +304,8 @@ impl Options {
}
}
pub
fn
set_block_based_table_factory
(
&
mut
self
,
factory
:
&
BlockBasedOptions
)
{
pub
fn
set_block_based_table_factory
(
&
mut
self
,
factory
:
&
BlockBasedOptions
)
{
unsafe
{
rocksdb_ffi
::
rocksdb_options_set_block_based_table_factory
(
self
.inner
,
factory
.inner
);
}
...
...
test/test_column_family.rs
View file @
edf9421f
/
*
Copyright 2014 Tyler Neely
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*
/
use
rocksdb
::{
Options
,
DB
,
Writable
,
MergeOperands
};
/
/
//
Copyright 2014 Tyler Neely
//
//
Licensed under the Apache License, Version 2.0 (the "License");
//
you may not use this file except in compliance with the License.
//
You may obtain a copy of the License at
//
//
http://www.apache.org/licenses/LICENSE-2.0
//
//
Unless required by applicable law or agreed to in writing, software
//
distributed under the License is distributed on an "AS IS" BASIS,
//
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//
See the License for the specific language governing permissions and
//
limitations under the License.
/
/
use
rocksdb
::{
DB
,
MergeOperands
,
Options
,
Writable
};
#[test]
pub
fn
test_column_family
()
{
...
...
@@ -30,7 +30,7 @@ pub fn test_column_family() {
Ok
(
_
)
=>
println!
(
"cf1 created successfully"
),
Err
(
e
)
=>
{
panic!
(
"could not create column family: {}"
,
e
);
}
,
}
}
}
...
...
@@ -39,9 +39,11 @@ pub fn test_column_family() {
let
mut
opts
=
Options
::
new
();
opts
.add_merge_operator
(
"test operator"
,
test_provided_merge
);
match
DB
::
open
(
&
opts
,
path
)
{
Ok
(
_
)
=>
panic!
(
"should not have opened DB successfully without specifying column
Ok
(
_
)
=>
panic!
(
"should not have opened DB successfully without
\
specifying column
families"
),
Err
(
e
)
=>
assert
!
(
e
.starts_with
(
"Invalid argument: You have to open all column families."
)),
Err
(
e
)
=>
assert
!
(
e
.starts_with
(
"Invalid argument: You have to
\
open all column families."
)),
}
}
...
...
@@ -62,12 +64,13 @@ pub fn test_column_family() {
Ok
(
db
)
=>
{
println!
(
"successfully opened db with column family"
);
db
}
,
}
Err
(
e
)
=>
panic!
(
"failed to open db with column family: {}"
,
e
),
};
let
cf1
=
*
db
.cf_handle
(
"cf1"
)
.unwrap
();
assert
!
(
db
.put_cf
(
cf1
,
b
"k1"
,
b
"v1"
)
.is_ok
());
assert
!
(
db
.get_cf
(
cf1
,
b
"k1"
)
.unwrap
()
.unwrap
()
.to_utf8
()
.unwrap
()
==
"v1"
);
assert
!
(
db
.get_cf
(
cf1
,
b
"k1"
)
.unwrap
()
.unwrap
()
.to_utf8
()
.unwrap
()
==
"v1"
);
let
p
=
db
.put_cf
(
cf1
,
b
"k1"
,
b
"a"
);
assert
!
(
p
.is_ok
());
db
.merge_cf
(
cf1
,
b
"k1"
,
b
"b"
)
.unwrap
();
...
...
@@ -80,12 +83,10 @@ pub fn test_column_family() {
match
db
.get
(
b
"k1"
)
{
Ok
(
Some
(
value
))
=>
{
match
value
.to_utf8
()
{
Some
(
v
)
=>
println!
(
"retrieved utf8 value: {}"
,
v
),
None
=>
println!
(
"did not read valid utf-8 out of the db"
),
Some
(
v
)
=>
println!
(
"retrieved utf8 value: {}"
,
v
),
None
=>
println!
(
"did not read valid utf-8 out of the db"
),
}
}
},
Err
(
e
)
=>
println!
(
"error reading value"
),
_
=>
panic!
(
"value not present!"
),
}
...
...
@@ -124,7 +125,7 @@ fn test_provided_merge(_: &[u8],
for
e
in
v
{
result
.push
(
*
e
);
}
}
,
}
None
=>
(),
}
for
op
in
operands
{
...
...
test/test_iterator.rs
View file @
edf9421f
use
rocksdb
::{
Options
,
DB
,
Writable
,
Direction
};
use
rocksdb
::{
DB
,
Direction
,
Options
,
Writable
};
fn
cba
(
input
:
&
Box
<
[
u8
]
>
)
->
Box
<
[
u8
]
>
{
input
.iter
()
.cloned
()
.collect
::
<
Vec
<
_
>>
()
.into_boxed_slice
()
...
...
@@ -8,14 +8,14 @@ fn cba(input: &Box<[u8]>) -> Box<[u8]> {
pub
fn
test_iterator
()
{
let
path
=
"_rust_rocksdb_iteratortest"
;
{
let
k1
:
Box
<
[
u8
]
>
=
b
"k1"
.to_vec
()
.into_boxed_slice
();
let
k2
:
Box
<
[
u8
]
>
=
b
"k2"
.to_vec
()
.into_boxed_slice
();
let
k3
:
Box
<
[
u8
]
>
=
b
"k3"
.to_vec
()
.into_boxed_slice
();
let
k4
:
Box
<
[
u8
]
>
=
b
"k4"
.to_vec
()
.into_boxed_slice
();
let
v1
:
Box
<
[
u8
]
>
=
b
"v1111"
.to_vec
()
.into_boxed_slice
();
let
v2
:
Box
<
[
u8
]
>
=
b
"v2222"
.to_vec
()
.into_boxed_slice
();
let
v3
:
Box
<
[
u8
]
>
=
b
"v3333"
.to_vec
()
.into_boxed_slice
();
let
v4
:
Box
<
[
u8
]
>
=
b
"v4444"
.to_vec
()
.into_boxed_slice
();
let
k1
:
Box
<
[
u8
]
>
=
b
"k1"
.to_vec
()
.into_boxed_slice
();
let
k2
:
Box
<
[
u8
]
>
=
b
"k2"
.to_vec
()
.into_boxed_slice
();
let
k3
:
Box
<
[
u8
]
>
=
b
"k3"
.to_vec
()
.into_boxed_slice
();
let
k4
:
Box
<
[
u8
]
>
=
b
"k4"
.to_vec
()
.into_boxed_slice
();
let
v1
:
Box
<
[
u8
]
>
=
b
"v1111"
.to_vec
()
.into_boxed_slice
();
let
v2
:
Box
<
[
u8
]
>
=
b
"v2222"
.to_vec
()
.into_boxed_slice
();
let
v3
:
Box
<
[
u8
]
>
=
b
"v3333"
.to_vec
()
.into_boxed_slice
();
let
v4
:
Box
<
[
u8
]
>
=
b
"v4444"
.to_vec
()
.into_boxed_slice
();
let
db
=
DB
::
open_default
(
path
)
.unwrap
();
let
p
=
db
.put
(
&*
k1
,
&*
v1
);
assert
!
(
p
.is_ok
());
...
...
@@ -24,7 +24,9 @@ pub fn test_iterator() {
let
p
=
db
.put
(
&*
k3
,
&*
v3
);
assert
!
(
p
.is_ok
());
let
mut
view1
=
db
.iterator
();
let
expected
=
vec!
[(
cba
(
&
k1
),
cba
(
&
v1
)),
(
cba
(
&
k2
),
cba
(
&
v2
)),
(
cba
(
&
k3
),
cba
(
&
v3
))];
let
expected
=
vec!
[(
cba
(
&
k1
),
cba
(
&
v1
)),
(
cba
(
&
k2
),
cba
(
&
v2
)),
(
cba
(
&
k3
),
cba
(
&
v3
))];
{
let
iterator1
=
view1
.from_start
();
assert_eq!
(
iterator1
.collect
::
<
Vec
<
_
>>
(),
expected
);
...
...
@@ -86,7 +88,10 @@ pub fn test_iterator() {
let
p
=
db
.put
(
&*
k4
,
&*
v4
);
assert
!
(
p
.is_ok
());
let
mut
view3
=
db
.iterator
();
let
expected2
=
vec!
[(
cba
(
&
k1
),
cba
(
&
v1
)),
(
cba
(
&
k2
),
cba
(
&
v2
)),
(
cba
(
&
k3
),
cba
(
&
v3
)),
(
cba
(
&
k4
),
cba
(
&
v4
))];
let
expected2
=
vec!
[(
cba
(
&
k1
),
cba
(
&
v1
)),
(
cba
(
&
k2
),
cba
(
&
v2
)),
(
cba
(
&
k3
),
cba
(
&
v3
)),
(
cba
(
&
k4
),
cba
(
&
v4
))];
{
let
iterator1
=
view1
.from_start
();
assert_eq!
(
iterator1
.collect
::
<
Vec
<
_
>>
(),
expected
);
...
...
@@ -97,7 +102,9 @@ pub fn test_iterator() {
}
{
let
iterator1
=
view3
.from
(
b
"k2"
,
Direction
::
forward
);
let
expected
=
vec!
[(
cba
(
&
k2
),
cba
(
&
v2
)),
(
cba
(
&
k3
),
cba
(
&
v3
)),
(
cba
(
&
k4
),
cba
(
&
v4
))];
let
expected
=
vec!
[(
cba
(
&
k2
),
cba
(
&
v2
)),
(
cba
(
&
k3
),
cba
(
&
v3
)),
(
cba
(
&
k4
),
cba
(
&
v4
))];
assert_eq!
(
iterator1
.collect
::
<
Vec
<
_
>>
(),
expected
);
}
{
...
...
@@ -109,4 +116,3 @@ pub fn test_iterator() {
let
opts
=
Options
::
new
();
assert
!
(
DB
::
destroy
(
&
opts
,
path
)
.is_ok
());
}
test/test_multithreaded.rs
View file @
edf9421f
use
rocksdb
::{
Options
,
DB
,
Writable
};
use
rocksdb
::{
DB
,
Options
,
Writable
};
use
std
::
thread
;
use
std
::
sync
::
Arc
;
...
...
@@ -14,21 +14,21 @@ pub fn test_multithreaded() {
db
.put
(
b
"key"
,
b
"value1"
)
.unwrap
();
let
db1
=
db
.clone
();
let
j1
=
thread
::
spawn
(
move
||
{
let
j1
=
thread
::
spawn
(
move
||
{
for
_
in
1
..
N
{
db1
.put
(
b
"key"
,
b
"value1"
)
.unwrap
();
}
});
let
db2
=
db
.clone
();
let
j2
=
thread
::
spawn
(
move
||
{
let
j2
=
thread
::
spawn
(
move
||
{
for
_
in
1
..
N
{
db2
.put
(
b
"key"
,
b
"value2"
)
.unwrap
();
}
});
let
db3
=
db
.clone
();
let
j3
=
thread
::
spawn
(
move
||
{
let
j3
=
thread
::
spawn
(
move
||
{
for
_
in
1
..
N
{
match
db3
.get
(
b
"key"
)
{
Ok
(
Some
(
v
))
=>
{
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment