Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in
Toggle navigation
R
rust-rocksdb
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
fangzongwu
rust-rocksdb
Commits
130c764e
Commit
130c764e
authored
Jul 24, 2017
by
follitude
Committed by
zhangjinpeng1987
Jul 24, 2017
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
refactor options (#102)
parent
240e5b66
Hide whitespace changes
Inline
Side-by-side
Showing
18 changed files
with
758 additions
and
619 deletions
+758
-619
lib.rs
librocksdb_sys/src/lib.rs
+94
-97
lib.rs
src/lib.rs
+3
-2
main.rs
src/main.rs
+24
-23
merge_operator.rs
src/merge_operator.rs
+9
-4
rocksdb.rs
src/rocksdb.rs
+46
-41
rocksdb_options.rs
src/rocksdb_options.rs
+359
-318
test_column_family.rs
tests/test_column_family.rs
+22
-17
test_compact_range.rs
tests/test_compact_range.rs
+2
-2
test_compaction_filter.rs
tests/test_compaction_filter.rs
+17
-9
test_delete_range.rs
tests/test_delete_range.rs
+5
-5
test_event_listener.rs
tests/test_event_listener.rs
+2
-2
test_ingest_external_file.rs
tests/test_ingest_external_file.rs
+11
-8
test_iterator.rs
tests/test_iterator.rs
+21
-11
test_prefix_extractor.rs
tests/test_prefix_extractor.rs
+11
-6
test_rocksdb_options.rs
tests/test_rocksdb_options.rs
+108
-62
test_slice_transform.rs
tests/test_slice_transform.rs
+12
-6
test_statistics.rs
tests/test_statistics.rs
+1
-1
test_table_properties.rs
tests/test_table_properties.rs
+11
-5
No files found.
librocksdb_sys/src/lib.rs
View file @
130c764e
...
@@ -20,7 +20,7 @@ extern crate tempdir;
...
@@ -20,7 +20,7 @@ extern crate tempdir;
use
libc
::{
c_char
,
c_uchar
,
c_int
,
c_void
,
size_t
,
uint8_t
,
uint32_t
,
uint64_t
,
c_double
};
use
libc
::{
c_char
,
c_uchar
,
c_int
,
c_void
,
size_t
,
uint8_t
,
uint32_t
,
uint64_t
,
c_double
};
use
std
::
ffi
::
CStr
;
use
std
::
ffi
::
CStr
;
pub
enum
DB
Options
{}
pub
enum
Options
{}
pub
enum
DBInstance
{}
pub
enum
DBInstance
{}
pub
enum
DBWriteOptions
{}
pub
enum
DBWriteOptions
{}
pub
enum
DBReadOptions
{}
pub
enum
DBReadOptions
{}
...
@@ -241,10 +241,10 @@ macro_rules! ffi_try {
...
@@ -241,10 +241,10 @@ macro_rules! ffi_try {
// TODO audit the use of boolean arguments, b/c I think they need to be u8
// TODO audit the use of boolean arguments, b/c I think they need to be u8
// instead...
// instead...
extern
"C"
{
extern
"C"
{
pub
fn
crocksdb_get_options_cf
(
db
:
*
mut
DBInstance
,
cf
:
*
mut
DBCFHandle
)
->
*
mut
DB
Options
;
pub
fn
crocksdb_get_options_cf
(
db
:
*
mut
DBInstance
,
cf
:
*
mut
DBCFHandle
)
->
*
mut
Options
;
pub
fn
crocksdb_options_create
()
->
*
mut
DB
Options
;
pub
fn
crocksdb_options_create
()
->
*
mut
Options
;
pub
fn
crocksdb_options_copy
(
opts
:
*
const
DBOptions
)
->
*
mut
DB
Options
;
pub
fn
crocksdb_options_copy
(
opts
:
*
const
Options
)
->
*
mut
Options
;
pub
fn
crocksdb_options_destroy
(
opts
:
*
mut
DB
Options
);
pub
fn
crocksdb_options_destroy
(
opts
:
*
mut
Options
);
pub
fn
crocksdb_cache_create_lru
(
capacity
:
size_t
)
->
*
mut
DBCache
;
pub
fn
crocksdb_cache_create_lru
(
capacity
:
size_t
)
->
*
mut
DBCache
;
pub
fn
crocksdb_cache_destroy
(
cache
:
*
mut
DBCache
);
pub
fn
crocksdb_cache_destroy
(
cache
:
*
mut
DBCache
);
pub
fn
crocksdb_block_based_options_create
()
->
*
mut
DBBlockBasedTableOptions
;
pub
fn
crocksdb_block_based_options_create
()
->
*
mut
DBBlockBasedTableOptions
;
...
@@ -273,86 +273,84 @@ extern "C" {
...
@@ -273,86 +273,84 @@ extern "C" {
pub
fn
crocksdb_block_based_options_set_whole_key_filtering
(
pub
fn
crocksdb_block_based_options_set_whole_key_filtering
(
ck_options
:
*
mut
DBBlockBasedTableOptions
,
doit
:
bool
);
ck_options
:
*
mut
DBBlockBasedTableOptions
,
doit
:
bool
);
pub
fn
crocksdb_options_set_block_based_table_factory
(
pub
fn
crocksdb_options_set_block_based_table_factory
(
options
:
*
mut
DB
Options
,
options
:
*
mut
Options
,
block_options
:
*
mut
DBBlockBasedTableOptions
);
block_options
:
*
mut
DBBlockBasedTableOptions
);
pub
fn
crocksdb_block_based_options_set_pin_l0_filter_and_index_blocks_in_cache
(
pub
fn
crocksdb_block_based_options_set_pin_l0_filter_and_index_blocks_in_cache
(
block_options
:
*
mut
DBBlockBasedTableOptions
,
v
:
c_uchar
);
block_options
:
*
mut
DBBlockBasedTableOptions
,
v
:
c_uchar
);
pub
fn
crocksdb_options_increase_parallelism
(
options
:
*
mut
DB
Options
,
threads
:
c_int
);
pub
fn
crocksdb_options_increase_parallelism
(
options
:
*
mut
Options
,
threads
:
c_int
);
pub
fn
crocksdb_options_optimize_level_style_compaction
(
options
:
*
mut
DB
Options
,
pub
fn
crocksdb_options_optimize_level_style_compaction
(
options
:
*
mut
Options
,
memtable_memory_budget
:
c_int
);
memtable_memory_budget
:
c_int
);
pub
fn
crocksdb_options_set_compaction_filter
(
options
:
*
mut
DB
Options
,
pub
fn
crocksdb_options_set_compaction_filter
(
options
:
*
mut
Options
,
filter
:
*
mut
DBCompactionFilter
);
filter
:
*
mut
DBCompactionFilter
);
pub
fn
crocksdb_options_set_create_if_missing
(
options
:
*
mut
DB
Options
,
v
:
bool
);
pub
fn
crocksdb_options_set_create_if_missing
(
options
:
*
mut
Options
,
v
:
bool
);
pub
fn
crocksdb_options_set_max_open_files
(
options
:
*
mut
DB
Options
,
files
:
c_int
);
pub
fn
crocksdb_options_set_max_open_files
(
options
:
*
mut
Options
,
files
:
c_int
);
pub
fn
crocksdb_options_set_max_total_wal_size
(
options
:
*
mut
DB
Options
,
size
:
u64
);
pub
fn
crocksdb_options_set_max_total_wal_size
(
options
:
*
mut
Options
,
size
:
u64
);
pub
fn
crocksdb_options_set_use_fsync
(
options
:
*
mut
DB
Options
,
v
:
c_int
);
pub
fn
crocksdb_options_set_use_fsync
(
options
:
*
mut
Options
,
v
:
c_int
);
pub
fn
crocksdb_options_set_bytes_per_sync
(
options
:
*
mut
DB
Options
,
bytes
:
u64
);
pub
fn
crocksdb_options_set_bytes_per_sync
(
options
:
*
mut
Options
,
bytes
:
u64
);
pub
fn
crocksdb_options_set_enable_pipelined_write
(
options
:
*
mut
DB
Options
,
v
:
bool
);
pub
fn
crocksdb_options_set_enable_pipelined_write
(
options
:
*
mut
Options
,
v
:
bool
);
pub
fn
crocksdb_options_set_allow_concurrent_memtable_write
(
options
:
*
mut
DB
Options
,
v
:
bool
);
pub
fn
crocksdb_options_set_allow_concurrent_memtable_write
(
options
:
*
mut
Options
,
v
:
bool
);
pub
fn
crocksdb_options_optimize_for_point_lookup
(
options
:
*
mut
DB
Options
,
pub
fn
crocksdb_options_optimize_for_point_lookup
(
options
:
*
mut
Options
,
block_cache_size_mb
:
u64
);
block_cache_size_mb
:
u64
);
pub
fn
crocksdb_options_set_table_cache_numshardbits
(
options
:
*
mut
DB
Options
,
bits
:
c_int
);
pub
fn
crocksdb_options_set_table_cache_numshardbits
(
options
:
*
mut
Options
,
bits
:
c_int
);
pub
fn
crocksdb_options_set_writable_file_max_buffer_size
(
options
:
*
mut
DB
Options
,
pub
fn
crocksdb_options_set_writable_file_max_buffer_size
(
options
:
*
mut
Options
,
nbytes
:
c_int
);
nbytes
:
c_int
);
pub
fn
crocksdb_options_set_max_write_buffer_number
(
options
:
*
mut
DB
Options
,
bufno
:
c_int
);
pub
fn
crocksdb_options_set_max_write_buffer_number
(
options
:
*
mut
Options
,
bufno
:
c_int
);
pub
fn
crocksdb_options_set_min_write_buffer_number_to_merge
(
options
:
*
mut
DB
Options
,
pub
fn
crocksdb_options_set_min_write_buffer_number_to_merge
(
options
:
*
mut
Options
,
bufno
:
c_int
);
bufno
:
c_int
);
pub
fn
crocksdb_options_set_level0_file_num_compaction_trigger
(
options
:
*
mut
DB
Options
,
pub
fn
crocksdb_options_set_level0_file_num_compaction_trigger
(
options
:
*
mut
Options
,
no
:
c_int
);
no
:
c_int
);
pub
fn
crocksdb_options_set_level0_slowdown_writes_trigger
(
options
:
*
mut
DBOptions
,
pub
fn
crocksdb_options_set_level0_slowdown_writes_trigger
(
options
:
*
mut
Options
,
no
:
c_int
);
no
:
c_int
);
pub
fn
crocksdb_options_set_level0_stop_writes_trigger
(
options
:
*
mut
Options
,
no
:
c_int
);
pub
fn
crocksdb_options_set_level0_stop_writes_trigger
(
options
:
*
mut
DBOptions
,
no
:
c_int
);
pub
fn
crocksdb_options_set_write_buffer_size
(
options
:
*
mut
Options
,
bytes
:
u64
);
pub
fn
crocksdb_options_set_write_buffer_size
(
options
:
*
mut
DBOptions
,
bytes
:
u64
);
pub
fn
crocksdb_options_set_target_file_size_base
(
options
:
*
mut
Options
,
bytes
:
u64
);
pub
fn
crocksdb_options_set_target_file_size_base
(
options
:
*
mut
DBOptions
,
bytes
:
u64
);
pub
fn
crocksdb_options_set_target_file_size_multiplier
(
options
:
*
mut
Options
,
mul
:
c_int
);
pub
fn
crocksdb_options_set_target_file_size_multiplier
(
options
:
*
mut
DBOptions
,
mul
:
c_int
);
pub
fn
crocksdb_options_set_max_bytes_for_level_base
(
options
:
*
mut
Options
,
bytes
:
u64
);
pub
fn
crocksdb_options_set_max_bytes_for_level_base
(
options
:
*
mut
DBOptions
,
bytes
:
u64
);
pub
fn
crocksdb_options_set_max_bytes_for_level_multiplier
(
options
:
*
mut
Options
,
mul
:
c_int
);
pub
fn
crocksdb_options_set_max_bytes_for_level_multiplier
(
options
:
*
mut
DBOptions
,
pub
fn
crocksdb_options_set_max_compaction_bytes
(
options
:
*
mut
Options
,
bytes
:
uint64_t
);
mul
:
c_int
);
pub
fn
crocksdb_options_set_max_log_file_size
(
options
:
*
mut
Options
,
bytes
:
size_t
);
pub
fn
crocksdb_options_set_max_compaction_bytes
(
options
:
*
mut
DBOptions
,
bytes
:
uint64_t
);
pub
fn
crocksdb_options_set_log_file_time_to_roll
(
options
:
*
mut
Options
,
bytes
:
size_t
);
pub
fn
crocksdb_options_set_max_log_file_size
(
options
:
*
mut
DBOptions
,
bytes
:
size_t
);
pub
fn
crocksdb_options_set_info_log_level
(
options
:
*
mut
Options
,
level
:
DBInfoLogLevel
);
pub
fn
crocksdb_options_set_log_file_time_to_roll
(
options
:
*
mut
DBOptions
,
bytes
:
size_t
);
pub
fn
crocksdb_options_set_keep_log_file_num
(
options
:
*
mut
Options
,
num
:
size_t
);
pub
fn
crocksdb_options_set_info_log_level
(
options
:
*
mut
DBOptions
,
level
:
DBInfoLogLevel
);
pub
fn
crocksdb_options_set_max_manifest_file_size
(
options
:
*
mut
Options
,
bytes
:
u64
);
pub
fn
crocksdb_options_set_keep_log_file_num
(
options
:
*
mut
DBOptions
,
num
:
size_t
);
pub
fn
crocksdb_options_set_hash_skip_list_rep
(
options
:
*
mut
Options
,
pub
fn
crocksdb_options_set_max_manifest_file_size
(
options
:
*
mut
DBOptions
,
bytes
:
u64
);
pub
fn
crocksdb_options_set_hash_skip_list_rep
(
options
:
*
mut
DBOptions
,
bytes
:
u64
,
bytes
:
u64
,
a1
:
i32
,
a1
:
i32
,
a2
:
i32
);
a2
:
i32
);
pub
fn
crocksdb_options_set_compaction_style
(
options
:
*
mut
DB
Options
,
cs
:
DBCompactionStyle
);
pub
fn
crocksdb_options_set_compaction_style
(
options
:
*
mut
Options
,
cs
:
DBCompactionStyle
);
pub
fn
crocksdb_options_set_compression
(
options
:
*
mut
DB
Options
,
pub
fn
crocksdb_options_set_compression
(
options
:
*
mut
Options
,
compression_style_no
:
DBCompressionType
);
compression_style_no
:
DBCompressionType
);
pub
fn
crocksdb_options_get_compression
(
options
:
*
mut
DB
Options
)
->
DBCompressionType
;
pub
fn
crocksdb_options_get_compression
(
options
:
*
mut
Options
)
->
DBCompressionType
;
pub
fn
crocksdb_options_set_compression_per_level
(
options
:
*
mut
DB
Options
,
pub
fn
crocksdb_options_set_compression_per_level
(
options
:
*
mut
Options
,
level_values
:
*
const
DBCompressionType
,
level_values
:
*
const
DBCompressionType
,
num_levels
:
size_t
);
num_levels
:
size_t
);
pub
fn
crocksdb_options_get_compression_level_number
(
options
:
*
mut
DB
Options
)
->
size_t
;
pub
fn
crocksdb_options_get_compression_level_number
(
options
:
*
mut
Options
)
->
size_t
;
pub
fn
crocksdb_options_get_compression_per_level
(
options
:
*
mut
DB
Options
,
pub
fn
crocksdb_options_get_compression_per_level
(
options
:
*
mut
Options
,
level_values
:
*
mut
DBCompressionType
);
level_values
:
*
mut
DBCompressionType
);
pub
fn
crocksdb_set_bottommost_compression
(
options
:
*
mut
DB
Options
,
c
:
DBCompressionType
);
pub
fn
crocksdb_set_bottommost_compression
(
options
:
*
mut
Options
,
c
:
DBCompressionType
);
pub
fn
crocksdb_options_set_base_background_compactions
(
optinos
:
*
mut
DB
Options
,
pub
fn
crocksdb_options_set_base_background_compactions
(
optinos
:
*
mut
Options
,
base_bg_compactions
:
c_int
);
base_bg_compactions
:
c_int
);
pub
fn
crocksdb_options_set_max_background_compactions
(
options
:
*
mut
DB
Options
,
pub
fn
crocksdb_options_set_max_background_compactions
(
options
:
*
mut
Options
,
max_bg_compactions
:
c_int
);
max_bg_compactions
:
c_int
);
pub
fn
crocksdb_options_set_max_background_flushes
(
options
:
*
mut
DB
Options
,
pub
fn
crocksdb_options_set_max_background_flushes
(
options
:
*
mut
Options
,
max_bg_flushes
:
c_int
);
max_bg_flushes
:
c_int
);
pub
fn
crocksdb_options_set_disable_auto_compactions
(
options
:
*
mut
DB
Options
,
v
:
c_int
);
pub
fn
crocksdb_options_set_disable_auto_compactions
(
options
:
*
mut
Options
,
v
:
c_int
);
pub
fn
crocksdb_options_set_report_bg_io_stats
(
options
:
*
mut
DB
Options
,
v
:
c_int
);
pub
fn
crocksdb_options_set_report_bg_io_stats
(
options
:
*
mut
Options
,
v
:
c_int
);
pub
fn
crocksdb_options_set_compaction_readahead_size
(
options
:
*
mut
DB
Options
,
v
:
size_t
);
pub
fn
crocksdb_options_set_compaction_readahead_size
(
options
:
*
mut
Options
,
v
:
size_t
);
pub
fn
crocksdb_options_set_wal_recovery_mode
(
options
:
*
mut
DB
Options
,
mode
:
DBRecoveryMode
);
pub
fn
crocksdb_options_set_wal_recovery_mode
(
options
:
*
mut
Options
,
mode
:
DBRecoveryMode
);
pub
fn
crocksdb_options_set_max_subcompactions
(
options
:
*
mut
DB
Options
,
v
:
u32
);
pub
fn
crocksdb_options_set_max_subcompactions
(
options
:
*
mut
Options
,
v
:
u32
);
pub
fn
crocksdb_options_set_wal_bytes_per_sync
(
options
:
*
mut
DB
Options
,
v
:
u64
);
pub
fn
crocksdb_options_set_wal_bytes_per_sync
(
options
:
*
mut
Options
,
v
:
u64
);
pub
fn
crocksdb_options_enable_statistics
(
options
:
*
mut
DB
Options
);
pub
fn
crocksdb_options_enable_statistics
(
options
:
*
mut
Options
);
pub
fn
crocksdb_options_statistics_get_string
(
options
:
*
mut
DB
Options
)
->
*
const
c_char
;
pub
fn
crocksdb_options_statistics_get_string
(
options
:
*
mut
Options
)
->
*
const
c_char
;
pub
fn
crocksdb_options_statistics_get_ticker_count
(
options
:
*
mut
DB
Options
,
pub
fn
crocksdb_options_statistics_get_ticker_count
(
options
:
*
mut
Options
,
ticker_type
:
DBStatisticsTickerType
)
ticker_type
:
DBStatisticsTickerType
)
->
u64
;
->
u64
;
pub
fn
crocksdb_options_statistics_get_and_reset_ticker_count
(
options
:
*
mut
DB
Options
,
pub
fn
crocksdb_options_statistics_get_and_reset_ticker_count
(
options
:
*
mut
Options
,
ticker_type
:
DBStatisticsTickerType
)
ticker_type
:
DBStatisticsTickerType
)
->
u64
;
->
u64
;
pub
fn
crocksdb_options_statistics_get_histogram_string
(
options
:
*
mut
DB
Options
,
pub
fn
crocksdb_options_statistics_get_histogram_string
(
options
:
*
mut
Options
,
hist_type
:
DBStatisticsHistogramType
)
hist_type
:
DBStatisticsHistogramType
)
->
*
const
c_char
;
->
*
const
c_char
;
pub
fn
crocksdb_options_statistics_get_histogram
(
options
:
*
mut
DB
Options
,
pub
fn
crocksdb_options_statistics_get_histogram
(
options
:
*
mut
Options
,
hist_type
:
DBStatisticsHistogramType
,
hist_type
:
DBStatisticsHistogramType
,
median
:
*
mut
c_double
,
median
:
*
mut
c_double
,
percentile95
:
*
mut
c_double
,
percentile95
:
*
mut
c_double
,
...
@@ -360,42 +358,41 @@ extern "C" {
...
@@ -360,42 +358,41 @@ extern "C" {
average
:
*
mut
c_double
,
average
:
*
mut
c_double
,
standard_deviation
:
*
mut
c_double
)
standard_deviation
:
*
mut
c_double
)
->
bool
;
->
bool
;
pub
fn
crocksdb_options_set_stats_dump_period_sec
(
options
:
*
mut
DB
Options
,
v
:
usize
);
pub
fn
crocksdb_options_set_stats_dump_period_sec
(
options
:
*
mut
Options
,
v
:
usize
);
pub
fn
crocksdb_options_set_num_levels
(
options
:
*
mut
DB
Options
,
v
:
c_int
);
pub
fn
crocksdb_options_set_num_levels
(
options
:
*
mut
Options
,
v
:
c_int
);
pub
fn
crocksdb_options_set_db_log_dir
(
options
:
*
mut
DB
Options
,
path
:
*
const
c_char
);
pub
fn
crocksdb_options_set_db_log_dir
(
options
:
*
mut
Options
,
path
:
*
const
c_char
);
pub
fn
crocksdb_options_set_wal_dir
(
options
:
*
mut
DB
Options
,
path
:
*
const
c_char
);
pub
fn
crocksdb_options_set_wal_dir
(
options
:
*
mut
Options
,
path
:
*
const
c_char
);
pub
fn
crocksdb_options_set_wal_ttl_seconds
(
options
:
*
mut
DB
Options
,
ttl
:
u64
);
pub
fn
crocksdb_options_set_wal_ttl_seconds
(
options
:
*
mut
Options
,
ttl
:
u64
);
pub
fn
crocksdb_options_set_wal_size_limit_mb
(
options
:
*
mut
DB
Options
,
limit
:
u64
);
pub
fn
crocksdb_options_set_wal_size_limit_mb
(
options
:
*
mut
Options
,
limit
:
u64
);
pub
fn
crocksdb_options_set_use_direct_reads
(
options
:
*
mut
DB
Options
,
v
:
bool
);
pub
fn
crocksdb_options_set_use_direct_reads
(
options
:
*
mut
Options
,
v
:
bool
);
pub
fn
crocksdb_options_set_use_direct_io_for_flush_and_compaction
(
options
:
*
mut
DB
Options
,
pub
fn
crocksdb_options_set_use_direct_io_for_flush_and_compaction
(
options
:
*
mut
Options
,
v
:
bool
);
v
:
bool
);
pub
fn
crocksdb_options_set_prefix_extractor
(
options
:
*
mut
DB
Options
,
pub
fn
crocksdb_options_set_prefix_extractor
(
options
:
*
mut
Options
,
prefix_extractor
:
*
mut
DBSliceTransform
);
prefix_extractor
:
*
mut
DBSliceTransform
);
pub
fn
crocksdb_options_set_optimize_filters_for_hits
(
options
:
*
mut
DB
Options
,
v
:
bool
);
pub
fn
crocksdb_options_set_optimize_filters_for_hits
(
options
:
*
mut
Options
,
v
:
bool
);
pub
fn
crocksdb_options_set_level_compaction_dynamic_level_bytes
(
options
:
*
mut
DB
Options
,
pub
fn
crocksdb_options_set_level_compaction_dynamic_level_bytes
(
options
:
*
mut
Options
,
v
:
bool
);
v
:
bool
);
pub
fn
crocksdb_options_set_memtable_insert_with_hint_prefix_extractor
(
options
:
*
mut
DB
Options
,
pub
fn
crocksdb_options_set_memtable_insert_with_hint_prefix_extractor
(
options
:
*
mut
Options
,
prefix_extractor
:
*
mut
DBSliceTransform
);
prefix_extractor
:
*
mut
DBSliceTransform
);
pub
fn
crocksdb_options_set_memtable_prefix_bloom_size_ratio
(
options
:
*
mut
DB
Options
,
pub
fn
crocksdb_options_set_memtable_prefix_bloom_size_ratio
(
options
:
*
mut
Options
,
ratio
:
c_double
);
ratio
:
c_double
);
pub
fn
crocksdb_options_set_delayed_write_rate
(
options
:
*
mut
DB
Options
,
rate
:
u64
);
pub
fn
crocksdb_options_set_delayed_write_rate
(
options
:
*
mut
Options
,
rate
:
u64
);
pub
fn
crocksdb_options_set_ratelimiter
(
options
:
*
mut
DB
Options
,
limiter
:
*
mut
DBRateLimiter
);
pub
fn
crocksdb_options_set_ratelimiter
(
options
:
*
mut
Options
,
limiter
:
*
mut
DBRateLimiter
);
pub
fn
crocksdb_options_set_info_log
(
options
:
*
mut
DB
Options
,
logger
:
*
mut
DBLogger
);
pub
fn
crocksdb_options_set_info_log
(
options
:
*
mut
Options
,
logger
:
*
mut
DBLogger
);
pub
fn
crocksdb_options_get_block_cache_usage
(
options
:
*
const
DB
Options
)
->
usize
;
pub
fn
crocksdb_options_get_block_cache_usage
(
options
:
*
const
Options
)
->
usize
;
pub
fn
crocksdb_ratelimiter_create
(
rate_bytes_per_sec
:
i64
,
pub
fn
crocksdb_ratelimiter_create
(
rate_bytes_per_sec
:
i64
,
refill_period_us
:
i64
,
refill_period_us
:
i64
,
fairness
:
i32
)
fairness
:
i32
)
->
*
mut
DBRateLimiter
;
->
*
mut
DBRateLimiter
;
pub
fn
crocksdb_ratelimiter_destroy
(
limiter
:
*
mut
DBRateLimiter
);
pub
fn
crocksdb_ratelimiter_destroy
(
limiter
:
*
mut
DBRateLimiter
);
pub
fn
crocksdb_options_set_soft_pending_compaction_bytes_limit
(
options
:
*
mut
DB
Options
,
pub
fn
crocksdb_options_set_soft_pending_compaction_bytes_limit
(
options
:
*
mut
Options
,
v
:
u64
);
v
:
u64
);
pub
fn
crocksdb_options_set_hard_pending_compaction_bytes_limit
(
options
:
*
mut
DB
Options
,
pub
fn
crocksdb_options_set_hard_pending_compaction_bytes_limit
(
options
:
*
mut
Options
,
v
:
u64
);
v
:
u64
);
pub
fn
crocksdb_options_set_compaction_priority
(
options
:
*
mut
DBOptions
,
pub
fn
crocksdb_options_set_compaction_priority
(
options
:
*
mut
Options
,
v
:
CompactionPriority
);
v
:
CompactionPriority
);
pub
fn
crocksdb_filterpolicy_create_bloom_full
(
bits_per_key
:
c_int
)
->
*
mut
DBFilterPolicy
;
pub
fn
crocksdb_filterpolicy_create_bloom_full
(
bits_per_key
:
c_int
)
->
*
mut
DBFilterPolicy
;
pub
fn
crocksdb_filterpolicy_create_bloom
(
bits_per_key
:
c_int
)
->
*
mut
DBFilterPolicy
;
pub
fn
crocksdb_filterpolicy_create_bloom
(
bits_per_key
:
c_int
)
->
*
mut
DBFilterPolicy
;
pub
fn
crocksdb_open
(
options
:
*
mut
DB
Options
,
pub
fn
crocksdb_open
(
options
:
*
mut
Options
,
path
:
*
const
c_char
,
path
:
*
const
c_char
,
err
:
*
mut
*
mut
c_char
)
err
:
*
mut
*
mut
c_char
)
->
*
mut
DBInstance
;
->
*
mut
DBInstance
;
...
@@ -490,10 +487,10 @@ extern "C" {
...
@@ -490,10 +487,10 @@ extern "C" {
pub
fn
crocksdb_close
(
db
:
*
mut
DBInstance
);
pub
fn
crocksdb_close
(
db
:
*
mut
DBInstance
);
pub
fn
crocksdb_pause_bg_work
(
db
:
*
mut
DBInstance
);
pub
fn
crocksdb_pause_bg_work
(
db
:
*
mut
DBInstance
);
pub
fn
crocksdb_continue_bg_work
(
db
:
*
mut
DBInstance
);
pub
fn
crocksdb_continue_bg_work
(
db
:
*
mut
DBInstance
);
pub
fn
crocksdb_destroy_db
(
options
:
*
const
DB
Options
,
pub
fn
crocksdb_destroy_db
(
options
:
*
const
Options
,
path
:
*
const
c_char
,
path
:
*
const
c_char
,
err
:
*
mut
*
mut
c_char
);
err
:
*
mut
*
mut
c_char
);
pub
fn
crocksdb_repair_db
(
options
:
*
const
DB
Options
,
pub
fn
crocksdb_repair_db
(
options
:
*
const
Options
,
path
:
*
const
c_char
,
path
:
*
const
c_char
,
err
:
*
mut
*
mut
c_char
);
err
:
*
mut
*
mut
c_char
);
// Merge
// Merge
...
@@ -538,7 +535,7 @@ extern "C" {
...
@@ -538,7 +535,7 @@ extern "C" {
name_fn
:
extern
fn
(
*
mut
c_void
)
->
*
const
c_char
,
name_fn
:
extern
fn
(
*
mut
c_void
)
->
*
const
c_char
,
)
->
*
mut
DBMergeOperator
;
)
->
*
mut
DBMergeOperator
;
pub
fn
crocksdb_mergeoperator_destroy
(
mo
:
*
mut
DBMergeOperator
);
pub
fn
crocksdb_mergeoperator_destroy
(
mo
:
*
mut
DBMergeOperator
);
pub
fn
crocksdb_options_set_merge_operator
(
options
:
*
mut
DB
Options
,
mo
:
*
mut
DBMergeOperator
);
pub
fn
crocksdb_options_set_merge_operator
(
options
:
*
mut
Options
,
mo
:
*
mut
DBMergeOperator
);
// Iterator
// Iterator
pub
fn
crocksdb_iter_destroy
(
iter
:
*
mut
DBIterator
);
pub
fn
crocksdb_iter_destroy
(
iter
:
*
mut
DBIterator
);
pub
fn
crocksdb_iter_valid
(
iter
:
*
const
DBIterator
)
->
bool
;
pub
fn
crocksdb_iter_valid
(
iter
:
*
const
DBIterator
)
->
bool
;
...
@@ -623,7 +620,7 @@ extern "C" {
...
@@ -623,7 +620,7 @@ extern "C" {
err
:
*
mut
*
mut
c_char
);
err
:
*
mut
*
mut
c_char
);
// Comparator
// Comparator
pub
fn
crocksdb_options_set_comparator
(
options
:
*
mut
DB
Options
,
cb
:
*
mut
DBComparator
);
pub
fn
crocksdb_options_set_comparator
(
options
:
*
mut
Options
,
cb
:
*
mut
DBComparator
);
pub
fn
crocksdb_comparator_create
(
state
:
*
mut
c_void
,
pub
fn
crocksdb_comparator_create
(
state
:
*
mut
c_void
,
destroy
:
extern
"C"
fn
(
*
mut
c_void
)
->
(),
destroy
:
extern
"C"
fn
(
*
mut
c_void
)
->
(),
compare
:
extern
"C"
fn
(
arg
:
*
mut
c_void
,
compare
:
extern
"C"
fn
(
arg
:
*
mut
c_void
,
...
@@ -637,16 +634,16 @@ extern "C" {
...
@@ -637,16 +634,16 @@ extern "C" {
pub
fn
crocksdb_comparator_destroy
(
cmp
:
*
mut
DBComparator
);
pub
fn
crocksdb_comparator_destroy
(
cmp
:
*
mut
DBComparator
);
// Column Family
// Column Family
pub
fn
crocksdb_open_column_families
(
options
:
*
const
DB
Options
,
pub
fn
crocksdb_open_column_families
(
options
:
*
const
Options
,
path
:
*
const
c_char
,
path
:
*
const
c_char
,
num_column_families
:
c_int
,
num_column_families
:
c_int
,
column_family_names
:
*
const
*
const
c_char
,
column_family_names
:
*
const
*
const
c_char
,
column_family_options
:
*
const
*
const
DB
Options
,
column_family_options
:
*
const
*
const
Options
,
column_family_handles
:
*
const
*
mut
DBCFHandle
,
column_family_handles
:
*
const
*
mut
DBCFHandle
,
err
:
*
mut
*
mut
c_char
)
err
:
*
mut
*
mut
c_char
)
->
*
mut
DBInstance
;
->
*
mut
DBInstance
;
pub
fn
crocksdb_create_column_family
(
db
:
*
mut
DBInstance
,
pub
fn
crocksdb_create_column_family
(
db
:
*
mut
DBInstance
,
column_family_options
:
*
const
DB
Options
,
column_family_options
:
*
const
Options
,
column_family_name
:
*
const
c_char
,
column_family_name
:
*
const
c_char
,
err
:
*
mut
*
mut
c_char
)
err
:
*
mut
*
mut
c_char
)
->
*
mut
DBCFHandle
;
->
*
mut
DBCFHandle
;
...
@@ -655,7 +652,7 @@ extern "C" {
...
@@ -655,7 +652,7 @@ extern "C" {
err
:
*
mut
*
mut
c_char
);
err
:
*
mut
*
mut
c_char
);
pub
fn
crocksdb_column_family_handle_id
(
column_family_handle
:
*
mut
DBCFHandle
)
->
u32
;
pub
fn
crocksdb_column_family_handle_id
(
column_family_handle
:
*
mut
DBCFHandle
)
->
u32
;
pub
fn
crocksdb_column_family_handle_destroy
(
column_family_handle
:
*
mut
DBCFHandle
);
pub
fn
crocksdb_column_family_handle_destroy
(
column_family_handle
:
*
mut
DBCFHandle
);
pub
fn
crocksdb_list_column_families
(
db
:
*
const
DB
Options
,
pub
fn
crocksdb_list_column_families
(
db
:
*
const
Options
,
path
:
*
const
c_char
,
path
:
*
const
c_char
,
lencf
:
*
mut
size_t
,
lencf
:
*
mut
size_t
,
err
:
*
mut
*
mut
c_char
)
err
:
*
mut
*
mut
c_char
)
...
@@ -768,14 +765,14 @@ extern "C" {
...
@@ -768,14 +765,14 @@ extern "C" {
// SstFileWriter
// SstFileWriter
pub
fn
crocksdb_sstfilewriter_create
(
env
:
*
mut
EnvOptions
,
pub
fn
crocksdb_sstfilewriter_create
(
env
:
*
mut
EnvOptions
,
io_options
:
*
const
DB
Options
)
io_options
:
*
const
Options
)
->
*
mut
SstFileWriter
;
->
*
mut
SstFileWriter
;
pub
fn
crocksdb_sstfilewriter_create_cf
(
env
:
*
mut
EnvOptions
,
pub
fn
crocksdb_sstfilewriter_create_cf
(
env
:
*
mut
EnvOptions
,
io_options
:
*
const
DB
Options
,
io_options
:
*
const
Options
,
cf
:
*
mut
DBCFHandle
)
cf
:
*
mut
DBCFHandle
)
->
*
mut
SstFileWriter
;
->
*
mut
SstFileWriter
;
pub
fn
crocksdb_sstfilewriter_create_with_comparator
(
env
:
*
mut
EnvOptions
,
pub
fn
crocksdb_sstfilewriter_create_with_comparator
(
env
:
*
mut
EnvOptions
,
io_options
:
*
const
DB
Options
,
io_options
:
*
const
Options
,
comparator
:
*
const
DBComparator
,
comparator
:
*
const
DBComparator
,
cf
:
*
mut
DBCFHandle
)
cf
:
*
mut
DBCFHandle
)
->
*
mut
SstFileWriter
;
->
*
mut
SstFileWriter
;
...
@@ -810,7 +807,7 @@ extern "C" {
...
@@ -810,7 +807,7 @@ extern "C" {
// Backup engine
// Backup engine
// TODO: add more ffis about backup engine.
// TODO: add more ffis about backup engine.
pub
fn
crocksdb_backup_engine_open
(
options
:
*
const
DB
Options
,
pub
fn
crocksdb_backup_engine_open
(
options
:
*
const
Options
,
path
:
*
const
c_char
,
path
:
*
const
c_char
,
err
:
*
mut
*
mut
c_char
)
err
:
*
mut
*
mut
c_char
)
->
*
mut
DBBackupEngine
;
->
*
mut
DBBackupEngine
;
...
@@ -841,7 +838,7 @@ extern "C" {
...
@@ -841,7 +838,7 @@ extern "C" {
->
*
mut
DBSliceTransform
;
->
*
mut
DBSliceTransform
;
pub
fn
crocksdb_slicetransform_destroy
(
transform
:
*
mut
DBSliceTransform
);
pub
fn
crocksdb_slicetransform_destroy
(
transform
:
*
mut
DBSliceTransform
);
pub
fn
crocksdb_create_log_from_options
(
path
:
*
const
c_char
,
pub
fn
crocksdb_create_log_from_options
(
path
:
*
const
c_char
,
options
:
*
mut
DB
Options
,
options
:
*
mut
Options
,
err
:
*
mut
*
mut
c_char
)
err
:
*
mut
*
mut
c_char
)
->
*
mut
DBLogger
;
->
*
mut
DBLogger
;
pub
fn
crocksdb_log_destroy
(
logger
:
*
mut
DBLogger
);
pub
fn
crocksdb_log_destroy
(
logger
:
*
mut
DBLogger
);
...
@@ -968,7 +965,7 @@ extern "C" {
...
@@ -968,7 +965,7 @@ extern "C" {
f
:
*
mut
DBTablePropertiesCollectorFactory
);
f
:
*
mut
DBTablePropertiesCollectorFactory
);
pub
fn
crocksdb_options_add_table_properties_collector_factory
(
pub
fn
crocksdb_options_add_table_properties_collector_factory
(
options
:
*
mut
DB
Options
,
f
:
*
mut
DBTablePropertiesCollectorFactory
);
options
:
*
mut
Options
,
f
:
*
mut
DBTablePropertiesCollectorFactory
);
pub
fn
crocksdb_get_properties_of_all_tables
(
db
:
*
mut
DBInstance
,
pub
fn
crocksdb_get_properties_of_all_tables
(
db
:
*
mut
DBInstance
,
errptr
:
*
mut
*
mut
c_char
)
errptr
:
*
mut
*
mut
c_char
)
...
@@ -1038,7 +1035,7 @@ extern "C" {
...
@@ -1038,7 +1035,7 @@ extern "C" {
*
const
DBIngestionInfo
))
*
const
DBIngestionInfo
))
->
*
mut
DBEventListener
;
->
*
mut
DBEventListener
;
pub
fn
crocksdb_eventlistener_destroy
(
et
:
*
mut
DBEventListener
);
pub
fn
crocksdb_eventlistener_destroy
(
et
:
*
mut
DBEventListener
);
pub
fn
crocksdb_options_add_eventlistener
(
opt
:
*
mut
DB
Options
,
et
:
*
mut
DBEventListener
);
pub
fn
crocksdb_options_add_eventlistener
(
opt
:
*
mut
Options
,
et
:
*
mut
DBEventListener
);
// Get All Key Versions
// Get All Key Versions
pub
fn
crocksdb_keyversions_destroy
(
kvs
:
*
mut
DBKeyVersions
);
pub
fn
crocksdb_keyversions_destroy
(
kvs
:
*
mut
DBKeyVersions
);
...
...
src/lib.rs
View file @
130c764e
...
@@ -39,8 +39,9 @@ pub use librocksdb_sys::{DBCompactionStyle, DBCompressionType, DBRecoveryMode, D
...
@@ -39,8 +39,9 @@ pub use librocksdb_sys::{DBCompactionStyle, DBCompressionType, DBRecoveryMode, D
pub
use
merge_operator
::
MergeOperands
;
pub
use
merge_operator
::
MergeOperands
;
pub
use
rocksdb
::{
DB
,
DBIterator
,
DBVector
,
Kv
,
SeekKey
,
Writable
,
WriteBatch
,
CFHandle
,
Range
,
pub
use
rocksdb
::{
DB
,
DBIterator
,
DBVector
,
Kv
,
SeekKey
,
Writable
,
WriteBatch
,
CFHandle
,
Range
,
BackupEngine
,
SstFileWriter
};
BackupEngine
,
SstFileWriter
};
pub
use
rocksdb_options
::{
BlockBasedOptions
,
Options
,
ReadOptions
,
WriteOptions
,
RestoreOptions
,
pub
use
rocksdb_options
::{
BlockBasedOptions
,
DBOptions
,
ColumnFamilyOptions
,
ReadOptions
,
IngestExternalFileOptions
,
EnvOptions
,
HistogramData
,
CompactOptions
};
WriteOptions
,
RestoreOptions
,
IngestExternalFileOptions
,
EnvOptions
,
HistogramData
,
CompactOptions
};
pub
use
slice_transform
::
SliceTransform
;
pub
use
slice_transform
::
SliceTransform
;
pub
use
table_properties
::{
TableProperties
,
TablePropertiesCollection
,
pub
use
table_properties
::{
TableProperties
,
TablePropertiesCollection
,
TablePropertiesCollectionView
,
UserCollectedProperties
};
TablePropertiesCollectionView
,
UserCollectedProperties
};
...
...
src/main.rs
View file @
130c764e
...
@@ -14,7 +14,7 @@
...
@@ -14,7 +14,7 @@
//
//
extern
crate
rocksdb
;
extern
crate
rocksdb
;
use
rocksdb
::{
DB
,
MergeOperands
,
Options
,
Writable
};
use
rocksdb
::{
DB
,
MergeOperands
,
DBOptions
,
Writable
,
ColumnFamilyOptions
};
// fn snapshot_test() {
// fn snapshot_test() {
// let path = "_rust_rocksdb_iteratortest";
// let path = "_rust_rocksdb_iteratortest";
...
@@ -87,11 +87,12 @@ fn concat_merge(_: &[u8], existing_val: Option<&[u8]>, operands: &mut MergeOpera
...
@@ -87,11 +87,12 @@ fn concat_merge(_: &[u8], existing_val: Option<&[u8]>, operands: &mut MergeOpera
fn
custom_merge
()
{
fn
custom_merge
()
{
let
path
=
"_rust_rocksdb_mergetest"
;
let
path
=
"_rust_rocksdb_mergetest"
;
let
mut
opts
=
Options
::
new
();
let
mut
opts
=
DB
Options
::
new
();
opts
.create_if_missing
(
true
);
opts
.create_if_missing
(
true
);
opts
.add_merge_operator
(
"test operator"
,
concat_merge
);
let
mut
cf_opts
=
ColumnFamilyOptions
::
new
();
cf_opts
.add_merge_operator
(
"test operator"
,
concat_merge
);
{
{
let
db
=
DB
::
open
(
opts
,
path
)
.unwrap
();
let
db
=
DB
::
open
_cf
(
opts
,
path
,
vec!
[
"default"
],
vec!
[
cf_opts
]
)
.unwrap
();
db
.put
(
b
"k1"
,
b
"a"
)
.unwrap
();
db
.put
(
b
"k1"
,
b
"a"
)
.unwrap
();
db
.merge
(
b
"k1"
,
b
"b"
)
.unwrap
();
db
.merge
(
b
"k1"
,
b
"b"
)
.unwrap
();
db
.merge
(
b
"k1"
,
b
"c"
)
.unwrap
();
db
.merge
(
b
"k1"
,
b
"c"
)
.unwrap
();
...
@@ -109,19 +110,19 @@ fn custom_merge() {
...
@@ -109,19 +110,19 @@ fn custom_merge() {
Err
(
e
)
=>
println!
(
"error retrieving value: {}"
,
e
),
Err
(
e
)
=>
println!
(
"error retrieving value: {}"
,
e
),
}
}
}
}
let
opts
=
Options
::
new
();
let
opts
=
DB
Options
::
new
();
DB
::
destroy
(
&
opts
,
path
)
.is_ok
();
DB
::
destroy
(
&
opts
,
path
)
.is_ok
();
}
}
#[cfg(test)]
#[cfg(test)]
mod
tests
{
mod
tests
{
use
rocksdb
::{
BlockBasedOptions
,
DB
,
DBCompressionType
,
Options
};
use
rocksdb
::{
BlockBasedOptions
,
DB
,
DBCompressionType
,
ColumnFamilyOptions
,
DB
Options
};
use
rocksdb
::
DBCompactionStyle
;
use
rocksdb
::
DBCompactionStyle
;
use
rocksdb
::
DBRecoveryMode
;
use
rocksdb
::
DBRecoveryMode
;
#[allow(dead_code)]
#[allow(dead_code)]
fn
tuned_for_somebody_elses_disk
(
path
:
&
str
,
fn
tuned_for_somebody_elses_disk
(
path
:
&
str
,
mut
opts
:
Options
,
mut
opts
:
DB
Options
,
blockopts
:
&
mut
BlockBasedOptions
)
blockopts
:
&
mut
BlockBasedOptions
)
->
DB
{
->
DB
{
let
per_level_compression
:
[
DBCompressionType
;
7
]
=
[
DBCompressionType
::
No
,
let
per_level_compression
:
[
DBCompressionType
;
7
]
=
[
DBCompressionType
::
No
,
...
@@ -131,39 +132,39 @@ mod tests {
...
@@ -131,39 +132,39 @@ mod tests {
DBCompressionType
::
Lz4
,
DBCompressionType
::
Lz4
,
DBCompressionType
::
Lz4
,
DBCompressionType
::
Lz4
,
DBCompressionType
::
Lz4
];
DBCompressionType
::
Lz4
];
let
mut
cf_opts
=
ColumnFamilyOptions
::
new
();
opts
.create_if_missing
(
true
);
opts
.create_if_missing
(
true
);
opts
.set_max_open_files
(
10000
);
opts
.set_max_open_files
(
10000
);
opts
.set_use_fsync
(
false
);
opts
.set_use_fsync
(
false
);
opts
.set_bytes_per_sync
(
8388608
);
opts
.set_bytes_per_sync
(
8388608
);
opts
.set_block_cache_size_mb
(
1024
);
cf_
opts
.set_block_cache_size_mb
(
1024
);
opts
.set_table_cache_num_shard_bits
(
6
);
opts
.set_table_cache_num_shard_bits
(
6
);
opts
.set_max_write_buffer_number
(
32
);
cf_
opts
.set_max_write_buffer_number
(
32
);
opts
.set_write_buffer_size
(
536870912
);
cf_
opts
.set_write_buffer_size
(
536870912
);
opts
.set_target_file_size_base
(
1073741824
);
cf_
opts
.set_target_file_size_base
(
1073741824
);
opts
.set_min_write_buffer_number_to_merge
(
4
);
cf_
opts
.set_min_write_buffer_number_to_merge
(
4
);
opts
.set_level_zero_file_num_compaction_trigger
(
4
);
cf_
opts
.set_level_zero_file_num_compaction_trigger
(
4
);
opts
.set_level_zero_stop_writes_trigger
(
2000
);
cf_
opts
.set_level_zero_stop_writes_trigger
(
2000
);
opts
.set_level_zero_slowdown_writes_trigger
(
0
);
cf_
opts
.set_level_zero_slowdown_writes_trigger
(
0
);
opts
.set_compaction_style
(
DBCompactionStyle
::
Universal
);
cf_
opts
.set_compaction_style
(
DBCompactionStyle
::
Universal
);
opts
.set_max_background_compactions
(
4
);
opts
.set_max_background_compactions
(
4
);
opts
.set_max_background_flushes
(
4
);
opts
.set_max_background_flushes
(
4
);
opts
.set_report_bg_io_stats
(
true
);
cf_
opts
.set_report_bg_io_stats
(
true
);
opts
.set_wal_recovery_mode
(
DBRecoveryMode
::
PointInTime
);
opts
.set_wal_recovery_mode
(
DBRecoveryMode
::
PointInTime
);
opts
.enable_statistics
();
opts
.enable_statistics
();
opts
.set_stats_dump_period_sec
(
60
);
opts
.set_stats_dump_period_sec
(
60
);
opts
.compression_per_level
(
&
per_level_compression
);
cf_
opts
.compression_per_level
(
&
per_level_compression
);
blockopts
.set_block_size
(
524288
);
blockopts
.set_block_size
(
524288
);
blockopts
.set_cache_index_and_filter_blocks
(
true
);
blockopts
.set_cache_index_and_filter_blocks
(
true
);
blockopts
.set_bloom_filter
(
10
,
false
);
blockopts
.set_bloom_filter
(
10
,
false
);
opts
.set_block_based_table_factory
(
blockopts
);
cf_
opts
.set_block_based_table_factory
(
blockopts
);
opts
.set_disable_auto_compactions
(
true
);
cf_
opts
.set_disable_auto_compactions
(
true
);
opts
.set_max_compaction_bytes
(
1073741824
*
25
);
cf_
opts
.set_max_compaction_bytes
(
1073741824
*
25
);
// let filter = new_bloom_filter(10);
// let filter = new_bloom_filter(10);
// opts.set_filter(filter);
// opts.set_filter(filter);
DB
::
open
(
opts
,
path
)
.unwrap
()
DB
::
open
_cf
(
opts
,
path
,
vec!
[
"default"
],
vec!
[
cf_opts
]
)
.unwrap
()
}
}
// TODO(tyler) unstable
// TODO(tyler) unstable
...
...
src/merge_operator.rs
View file @
130c764e
...
@@ -148,7 +148,7 @@ impl<'a> Iterator for &'a mut MergeOperands {
...
@@ -148,7 +148,7 @@ impl<'a> Iterator for &'a mut MergeOperands {
#[cfg(test)]
#[cfg(test)]
mod
test
{
mod
test
{
use
rocksdb
::{
DB
,
DBVector
,
Writable
};
use
rocksdb
::{
DB
,
DBVector
,
Writable
};
use
rocksdb_options
::
Options
;
use
rocksdb_options
::
{
DBOptions
,
ColumnFamilyOptions
}
;
use
super
::
*
;
use
super
::
*
;
use
tempdir
::
TempDir
;
use
tempdir
::
TempDir
;
...
@@ -177,10 +177,15 @@ mod test {
...
@@ -177,10 +177,15 @@ mod test {
#[test]
#[test]
fn
mergetest
()
{
fn
mergetest
()
{
let
path
=
TempDir
::
new
(
"_rust_rocksdb_mergetest"
)
.expect
(
""
);
let
path
=
TempDir
::
new
(
"_rust_rocksdb_mergetest"
)
.expect
(
""
);
let
mut
opts
=
Options
::
new
();
let
mut
opts
=
DB
Options
::
new
();
opts
.create_if_missing
(
true
);
opts
.create_if_missing
(
true
);
opts
.add_merge_operator
(
"test operator"
,
test_provided_merge
);
let
mut
cf_opts
=
ColumnFamilyOptions
::
new
();
let
db
=
DB
::
open
(
opts
,
path
.path
()
.to_str
()
.unwrap
())
.unwrap
();
cf_opts
.add_merge_operator
(
"test operator"
,
test_provided_merge
);
let
db
=
DB
::
open_cf
(
opts
,
path
.path
()
.to_str
()
.unwrap
(),
vec!
[
"default"
],
vec!
[
cf_opts
])
.unwrap
();
let
p
=
db
.put
(
b
"k1"
,
b
"a"
);
let
p
=
db
.put
(
b
"k1"
,
b
"a"
);
assert
!
(
p
.is_ok
());
assert
!
(
p
.is_ok
());
let
_
=
db
.merge
(
b
"k1"
,
b
"b"
);
let
_
=
db
.merge
(
b
"k1"
,
b
"b"
);
...
...
src/rocksdb.rs
View file @
130c764e
...
@@ -17,8 +17,9 @@ use crocksdb_ffi::{self, DBWriteBatch, DBCFHandle, DBInstance, DBBackupEngine,
...
@@ -17,8 +17,9 @@ use crocksdb_ffi::{self, DBWriteBatch, DBCFHandle, DBInstance, DBBackupEngine,
DBStatisticsTickerType
,
DBStatisticsHistogramType
,
DBPinnableSlice
,
DBStatisticsTickerType
,
DBStatisticsHistogramType
,
DBPinnableSlice
,
DBCompressionType
};
DBCompressionType
};
use
libc
::{
self
,
c_int
,
c_void
,
size_t
};
use
libc
::{
self
,
c_int
,
c_void
,
size_t
};
use
rocksdb_options
::{
Options
,
ReadOptions
,
UnsafeSnap
,
WriteOptions
,
FlushOptions
,
EnvOptions
,
use
rocksdb_options
::{
DBOptions
,
ColumnFamilyOptions
,
ReadOptions
,
UnsafeSnap
,
WriteOptions
,
RestoreOptions
,
IngestExternalFileOptions
,
HistogramData
,
CompactOptions
};
FlushOptions
,
EnvOptions
,
RestoreOptions
,
IngestExternalFileOptions
,
HistogramData
,
CompactOptions
};
use
std
::{
fs
,
ptr
,
slice
};
use
std
::{
fs
,
ptr
,
slice
};
use
std
::
collections
::
BTreeMap
;
use
std
::
collections
::
BTreeMap
;
use
std
::
collections
::
btree_map
::
Entry
;
use
std
::
collections
::
btree_map
::
Entry
;
...
@@ -59,7 +60,8 @@ pub struct DB {
...
@@ -59,7 +60,8 @@ pub struct DB {
inner
:
*
mut
DBInstance
,
inner
:
*
mut
DBInstance
,
cfs
:
BTreeMap
<
String
,
CFHandle
>
,
cfs
:
BTreeMap
<
String
,
CFHandle
>
,
path
:
String
,
path
:
String
,
opts
:
Options
,
opts
:
DBOptions
,
_cf_opts
:
Vec
<
ColumnFamilyOptions
>
,
}
}
impl
Debug
for
DB
{
impl
Debug
for
DB
{
...
@@ -315,19 +317,19 @@ pub struct KeyVersion {
...
@@ -315,19 +317,19 @@ pub struct KeyVersion {
impl
DB
{
impl
DB
{
pub
fn
open_default
(
path
:
&
str
)
->
Result
<
DB
,
String
>
{
pub
fn
open_default
(
path
:
&
str
)
->
Result
<
DB
,
String
>
{
let
mut
opts
=
Options
::
new
();
let
mut
opts
=
DB
Options
::
new
();
opts
.create_if_missing
(
true
);
opts
.create_if_missing
(
true
);
DB
::
open
(
opts
,
path
)
DB
::
open
(
opts
,
path
)
}
}
pub
fn
open
(
opts
:
Options
,
path
:
&
str
)
->
Result
<
DB
,
String
>
{
pub
fn
open
(
opts
:
DB
Options
,
path
:
&
str
)
->
Result
<
DB
,
String
>
{
DB
::
open_cf
(
opts
,
path
,
&
[],
&
[])
DB
::
open_cf
(
opts
,
path
,
vec!
[],
vec!
[])
}
}
pub
fn
open_cf
(
opts
:
Options
,
pub
fn
open_cf
(
opts
:
DB
Options
,
path
:
&
str
,
path
:
&
str
,
cfs
:
&
[
&
str
]
,
cfs
:
Vec
<&
str
>
,
cf_opts
:
&
[
&
Options
]
)
cf_opts
:
Vec
<
ColumnFamilyOptions
>
)
->
Result
<
DB
,
String
>
{
->
Result
<
DB
,
String
>
{
let
cpath
=
match
CString
::
new
(
path
.as_bytes
())
{
let
cpath
=
match
CString
::
new
(
path
.as_bytes
())
{
Ok
(
c
)
=>
c
,
Ok
(
c
)
=>
c
,
...
@@ -341,18 +343,17 @@ impl DB {
...
@@ -341,18 +343,17 @@ impl DB {
{:?}"
,
{:?}"
,
e
));
e
));
}
}
if
cfs
.len
()
!=
cf_opts
.len
()
{
if
cfs
.len
()
!=
cf_opts
.len
()
{
return
Err
(
format!
(
"cfs.len() and cf_opts.len() not match."
));
return
Err
(
format!
(
"cfs.len() and cf_opts.len() not match."
));
}
}
let
mut
cfs_v
=
cfs
;
let
mut
cf_opts_v
=
cf_opts
;
let
(
db
,
cf_map
)
=
{
let
(
db
,
cf_map
)
=
{
let
mut
cfs_v
=
cfs
.to_vec
();
let
mut
cf_opts_v
=
cf_opts
.to_vec
();
// Always open the default column family
// Always open the default column family
if
!
cfs_v
.contains
(
&
DEFAULT_COLUMN_FAMILY
)
{
if
!
cfs_v
.contains
(
&
DEFAULT_COLUMN_FAMILY
)
{
cfs_v
.push
(
DEFAULT_COLUMN_FAMILY
);
cfs_v
.push
(
DEFAULT_COLUMN_FAMILY
);
cf_opts_v
.push
(
&
opts
);
cf_opts_v
.push
(
ColumnFamilyOptions
::
new
()
);
}
}
// We need to store our CStrings in an intermediate vector
// We need to store our CStrings in an intermediate vector
...
@@ -365,7 +366,7 @@ impl DB {
...
@@ -365,7 +366,7 @@ impl DB {
let
cfhandles
:
Vec
<
_
>
=
cfs_v
.iter
()
.map
(|
_
|
ptr
::
null_mut
())
.collect
();
let
cfhandles
:
Vec
<
_
>
=
cfs_v
.iter
()
.map
(|
_
|
ptr
::
null_mut
())
.collect
();
let
cfopts
:
Vec
<
_
>
=
cf_opts_v
.iter
()
let
cfopts
:
Vec
<
_
>
=
cf_opts_v
.iter
()
.map
(|
x
|
x
.inner
as
*
const
crocksdb_ffi
::
DB
Options
)
.map
(|
x
|
x
.inner
as
*
const
crocksdb_ffi
::
Options
)
.collect
();
.collect
();
let
db
=
unsafe
{
let
db
=
unsafe
{
...
@@ -400,10 +401,11 @@ impl DB {
...
@@ -400,10 +401,11 @@ impl DB {
cfs
:
cf_map
,
cfs
:
cf_map
,
path
:
path
.to_owned
(),
path
:
path
.to_owned
(),
opts
:
opts
,
opts
:
opts
,
_cf_opts
:
cf_opts_v
,
})
})
}
}
pub
fn
destroy
(
opts
:
&
Options
,
path
:
&
str
)
->
Result
<
(),
String
>
{
pub
fn
destroy
(
opts
:
&
DB
Options
,
path
:
&
str
)
->
Result
<
(),
String
>
{
let
cpath
=
CString
::
new
(
path
.as_bytes
())
.unwrap
();
let
cpath
=
CString
::
new
(
path
.as_bytes
())
.unwrap
();
unsafe
{
unsafe
{
ffi_try!
(
crocksdb_destroy_db
(
opts
.inner
,
cpath
.as_ptr
()));
ffi_try!
(
crocksdb_destroy_db
(
opts
.inner
,
cpath
.as_ptr
()));
...
@@ -411,7 +413,7 @@ impl DB {
...
@@ -411,7 +413,7 @@ impl DB {
Ok
(())
Ok
(())
}
}
pub
fn
repair
(
opts
:
Options
,
path
:
&
str
)
->
Result
<
(),
String
>
{
pub
fn
repair
(
opts
:
DB
Options
,
path
:
&
str
)
->
Result
<
(),
String
>
{
let
cpath
=
CString
::
new
(
path
.as_bytes
())
.unwrap
();
let
cpath
=
CString
::
new
(
path
.as_bytes
())
.unwrap
();
unsafe
{
unsafe
{
ffi_try!
(
crocksdb_repair_db
(
opts
.inner
,
cpath
.as_ptr
()));
ffi_try!
(
crocksdb_repair_db
(
opts
.inner
,
cpath
.as_ptr
()));
...
@@ -419,7 +421,7 @@ impl DB {
...
@@ -419,7 +421,7 @@ impl DB {
Ok
(())
Ok
(())
}
}
pub
fn
list_column_families
(
opts
:
&
Options
,
path
:
&
str
)
->
Result
<
Vec
<
String
>
,
String
>
{
pub
fn
list_column_families
(
opts
:
&
DB
Options
,
path
:
&
str
)
->
Result
<
Vec
<
String
>
,
String
>
{
let
cpath
=
match
CString
::
new
(
path
.as_bytes
())
{
let
cpath
=
match
CString
::
new
(
path
.as_bytes
())
{
Ok
(
c
)
=>
c
,
Ok
(
c
)
=>
c
,
Err
(
_
)
=>
{
Err
(
_
)
=>
{
...
@@ -522,7 +524,10 @@ impl DB {
...
@@ -522,7 +524,10 @@ impl DB {
self
.get_cf_opt
(
cf
,
key
,
&
ReadOptions
::
new
())
self
.get_cf_opt
(
cf
,
key
,
&
ReadOptions
::
new
())
}
}
pub
fn
create_cf
(
&
mut
self
,
name
:
&
str
,
opts
:
&
Options
)
->
Result
<&
CFHandle
,
String
>
{
pub
fn
create_cf
(
&
mut
self
,
name
:
&
str
,
cf_opts
:
ColumnFamilyOptions
)
->
Result
<&
CFHandle
,
String
>
{
let
cname
=
match
CString
::
new
(
name
.as_bytes
())
{
let
cname
=
match
CString
::
new
(
name
.as_bytes
())
{
Ok
(
c
)
=>
c
,
Ok
(
c
)
=>
c
,
Err
(
_
)
=>
{
Err
(
_
)
=>
{
...
@@ -532,8 +537,9 @@ impl DB {
...
@@ -532,8 +537,9 @@ impl DB {
let
cname_ptr
=
cname
.as_ptr
();
let
cname_ptr
=
cname
.as_ptr
();
unsafe
{
unsafe
{
let
cf_handler
=
let
cf_handler
=
ffi_try!
(
crocksdb_create_column_family
(
self
.inner
,
opts
.inner
,
cname_ptr
));
ffi_try!
(
crocksdb_create_column_family
(
self
.inner
,
cf_
opts
.inner
,
cname_ptr
));
let
handle
=
CFHandle
{
inner
:
cf_handler
};
let
handle
=
CFHandle
{
inner
:
cf_handler
};
self
._cf_opts
.push
(
cf_opts
);
Ok
(
match
self
.cfs
.entry
(
name
.to_owned
())
{
Ok
(
match
self
.cfs
.entry
(
name
.to_owned
())
{
Entry
::
Occupied
(
mut
e
)
=>
{
Entry
::
Occupied
(
mut
e
)
=>
{
e
.insert
(
handle
);
e
.insert
(
handle
);
...
@@ -959,18 +965,18 @@ impl DB {
...
@@ -959,18 +965,18 @@ impl DB {
self
.opts
.get_statistics_histogram
(
hist_type
)
self
.opts
.get_statistics_histogram
(
hist_type
)
}
}
pub
fn
get_options
(
&
self
)
->
Options
{
pub
fn
get_options
(
&
self
)
->
ColumnFamily
Options
{
let
cf
=
self
.cf_handle
(
"default"
)
.unwrap
();
let
cf
=
self
.cf_handle
(
"default"
)
.unwrap
();
unsafe
{
unsafe
{
let
inner
=
crocksdb_ffi
::
crocksdb_get_options_cf
(
self
.inner
,
cf
.inner
);
let
inner
=
crocksdb_ffi
::
crocksdb_get_options_cf
(
self
.inner
,
cf
.inner
);
Options
::
from_raw
(
inner
)
ColumnFamily
Options
::
from_raw
(
inner
)
}
}
}
}
pub
fn
get_options_cf
(
&
self
,
cf
:
&
CFHandle
)
->
Options
{
pub
fn
get_options_cf
(
&
self
,
cf
:
&
CFHandle
)
->
ColumnFamily
Options
{
unsafe
{
unsafe
{
let
inner
=
crocksdb_ffi
::
crocksdb_get_options_cf
(
self
.inner
,
cf
.inner
);
let
inner
=
crocksdb_ffi
::
crocksdb_get_options_cf
(
self
.inner
,
cf
.inner
);
Options
::
from_raw
(
inner
)
ColumnFamily
Options
::
from_raw
(
inner
)
}
}
}
}
...
@@ -1007,7 +1013,7 @@ impl DB {
...
@@ -1007,7 +1013,7 @@ impl DB {
}
}
pub
fn
backup_at
(
&
self
,
path
:
&
str
)
->
Result
<
BackupEngine
,
String
>
{
pub
fn
backup_at
(
&
self
,
path
:
&
str
)
->
Result
<
BackupEngine
,
String
>
{
let
backup_engine
=
BackupEngine
::
open
(
Options
::
new
(),
path
)
.unwrap
();
let
backup_engine
=
BackupEngine
::
open
(
DB
Options
::
new
(),
path
)
.unwrap
();
unsafe
{
unsafe
{
ffi_try!
(
crocksdb_backup_engine_create_new_backup
(
backup_engine
.inner
,
self
.inner
))
ffi_try!
(
crocksdb_backup_engine_create_new_backup
(
backup_engine
.inner
,
self
.inner
))
}
}
...
@@ -1394,7 +1400,7 @@ pub struct BackupEngine {
...
@@ -1394,7 +1400,7 @@ pub struct BackupEngine {
}
}
impl
BackupEngine
{
impl
BackupEngine
{
pub
fn
open
(
opts
:
Options
,
path
:
&
str
)
->
Result
<
BackupEngine
,
String
>
{
pub
fn
open
(
opts
:
DB
Options
,
path
:
&
str
)
->
Result
<
BackupEngine
,
String
>
{
let
cpath
=
match
CString
::
new
(
path
.as_bytes
())
{
let
cpath
=
match
CString
::
new
(
path
.as_bytes
())
{
Ok
(
c
)
=>
c
,
Ok
(
c
)
=>
c
,
Err
(
_
)
=>
{
Err
(
_
)
=>
{
...
@@ -1427,13 +1433,13 @@ impl Drop for BackupEngine {
...
@@ -1427,13 +1433,13 @@ impl Drop for BackupEngine {
pub
struct
SstFileWriter
{
pub
struct
SstFileWriter
{
inner
:
*
mut
crocksdb_ffi
::
SstFileWriter
,
inner
:
*
mut
crocksdb_ffi
::
SstFileWriter
,
_env_opt
:
EnvOptions
,
_env_opt
:
EnvOptions
,
_opt
:
Options
,
_opt
:
ColumnFamily
Options
,
}
}
unsafe
impl
Send
for
SstFileWriter
{}
unsafe
impl
Send
for
SstFileWriter
{}
impl
SstFileWriter
{
impl
SstFileWriter
{
pub
fn
new
(
env_opt
:
EnvOptions
,
opt
:
Options
)
->
SstFileWriter
{
pub
fn
new
(
env_opt
:
EnvOptions
,
opt
:
ColumnFamily
Options
)
->
SstFileWriter
{
unsafe
{
unsafe
{
SstFileWriter
{
SstFileWriter
{
inner
:
crocksdb_ffi
::
crocksdb_sstfilewriter_create
(
env_opt
.inner
,
opt
.inner
),
inner
:
crocksdb_ffi
::
crocksdb_sstfilewriter_create
(
env_opt
.inner
,
opt
.inner
),
...
@@ -1443,7 +1449,7 @@ impl SstFileWriter {
...
@@ -1443,7 +1449,7 @@ impl SstFileWriter {
}
}
}
}
pub
fn
new_cf
(
env_opt
:
EnvOptions
,
opt
:
Options
,
cf
:
&
CFHandle
)
->
SstFileWriter
{
pub
fn
new_cf
(
env_opt
:
EnvOptions
,
opt
:
ColumnFamily
Options
,
cf
:
&
CFHandle
)
->
SstFileWriter
{
unsafe
{
unsafe
{
SstFileWriter
{
SstFileWriter
{
inner
:
crocksdb_ffi
::
crocksdb_sstfilewriter_create_cf
(
env_opt
.inner
,
inner
:
crocksdb_ffi
::
crocksdb_sstfilewriter_create_cf
(
env_opt
.inner
,
...
@@ -1532,7 +1538,7 @@ mod test {
...
@@ -1532,7 +1538,7 @@ mod test {
let
path
=
TempDir
::
new
(
"_rust_rocksdb_error"
)
.expect
(
""
);
let
path
=
TempDir
::
new
(
"_rust_rocksdb_error"
)
.expect
(
""
);
let
path_str
=
path
.path
()
.to_str
()
.unwrap
();
let
path_str
=
path
.path
()
.to_str
()
.unwrap
();
let
db
=
DB
::
open_default
(
path_str
)
.unwrap
();
let
db
=
DB
::
open_default
(
path_str
)
.unwrap
();
let
opts
=
Options
::
new
();
let
opts
=
DB
Options
::
new
();
// The DB will still be open when we try to destroy and the lock should fail
// The DB will still be open when we try to destroy and the lock should fail
match
DB
::
destroy
(
&
opts
,
path_str
)
{
match
DB
::
destroy
(
&
opts
,
path_str
)
{
Err
(
ref
s
)
=>
{
Err
(
ref
s
)
=>
{
...
@@ -1676,21 +1682,20 @@ mod test {
...
@@ -1676,21 +1682,20 @@ mod test {
{
{
let
mut
cfs_opts
=
vec!
[];
let
mut
cfs_opts
=
vec!
[];
for
_
in
0
..
cfs
.len
()
{
for
_
in
0
..
cfs
.len
()
{
cfs_opts
.push
(
Options
::
new
());
cfs_opts
.push
(
ColumnFamily
Options
::
new
());
}
}
let
cfs_ref_opts
:
Vec
<&
Options
>
=
cfs_opts
.iter
()
.collect
();
let
mut
opts
=
Options
::
new
();
let
mut
opts
=
DB
Options
::
new
();
opts
.create_if_missing
(
true
);
opts
.create_if_missing
(
true
);
let
mut
db
=
DB
::
open
(
opts
,
path
.path
()
.to_str
()
.unwrap
())
.unwrap
();
let
mut
db
=
DB
::
open
(
opts
,
path
.path
()
.to_str
()
.unwrap
())
.unwrap
();
for
(
&
cf
,
&
cf_opts
)
in
cfs
.iter
()
.zip
(
&
cfs_ref
_opts
)
{
for
(
cf
,
cf_opts
)
in
cfs
.iter
()
.zip
(
cfs
_opts
)
{
if
cf
==
"default"
{
if
*
cf
==
"default"
{
continue
;
continue
;
}
}
db
.create_cf
(
cf
,
cf_opts
)
.unwrap
();
db
.create_cf
(
cf
,
cf_opts
)
.unwrap
();
}
}
}
}
let
opts_list_cfs
=
Options
::
new
();
let
opts_list_cfs
=
DB
Options
::
new
();
let
mut
cfs_vec
=
DB
::
list_column_families
(
&
opts_list_cfs
,
path
.path
()
.to_str
()
.unwrap
())
let
mut
cfs_vec
=
DB
::
list_column_families
(
&
opts_list_cfs
,
path
.path
()
.to_str
()
.unwrap
())
.unwrap
();
.unwrap
();
cfs_vec
.sort
();
cfs_vec
.sort
();
...
@@ -1738,7 +1743,7 @@ mod test {
...
@@ -1738,7 +1743,7 @@ mod test {
let
log_path
=
format!
(
"{}"
,
Path
::
new
(
&
db_path
)
.join
(
"log_path"
)
.display
());
let
log_path
=
format!
(
"{}"
,
Path
::
new
(
&
db_path
)
.join
(
"log_path"
)
.display
());
fs
::
create_dir_all
(
&
log_path
)
.unwrap
();
fs
::
create_dir_all
(
&
log_path
)
.unwrap
();
let
mut
opts
=
Options
::
new
();
let
mut
opts
=
DB
Options
::
new
();
opts
.create_if_missing
(
true
);
opts
.create_if_missing
(
true
);
opts
.set_db_log_dir
(
&
log_path
);
opts
.set_db_log_dir
(
&
log_path
);
...
@@ -1839,7 +1844,7 @@ mod test {
...
@@ -1839,7 +1844,7 @@ mod test {
assert
!
(
db
.get
(
b
"k2"
)
.unwrap
()
.is_some
());
assert
!
(
db
.get
(
b
"k2"
)
.unwrap
()
.is_some
());
assert
!
(
snap
.get
(
b
"k2"
)
.unwrap
()
.is_none
());
assert
!
(
snap
.get
(
b
"k2"
)
.unwrap
()
.is_none
());
}
}
let
opts
=
Options
::
new
();
let
opts
=
DB
Options
::
new
();
assert
!
(
DB
::
destroy
(
&
opts
,
path
)
.is_ok
());
assert
!
(
DB
::
destroy
(
&
opts
,
path
)
.is_ok
());
}
}
...
@@ -1864,11 +1869,11 @@ mod test {
...
@@ -1864,11 +1869,11 @@ mod test {
#[test]
#[test]
fn
flush_cf
()
{
fn
flush_cf
()
{
let
path
=
TempDir
::
new
(
"_rust_rocksdb_flush_cf"
)
.expect
(
""
);
let
path
=
TempDir
::
new
(
"_rust_rocksdb_flush_cf"
)
.expect
(
""
);
let
mut
opts
=
Options
::
new
();
let
mut
opts
=
DB
Options
::
new
();
opts
.create_if_missing
(
true
);
opts
.create_if_missing
(
true
);
let
mut
db
=
DB
::
open
(
opts
,
path
.path
()
.to_str
()
.unwrap
())
.unwrap
();
let
mut
db
=
DB
::
open
(
opts
,
path
.path
()
.to_str
()
.unwrap
())
.unwrap
();
let
opts
=
Options
::
new
();
let
cf_opts
=
ColumnFamily
Options
::
new
();
db
.create_cf
(
"cf"
,
&
opts
)
.unwrap
();
db
.create_cf
(
"cf"
,
cf_
opts
)
.unwrap
();
let
cf_handle
=
db
.cf_handle
(
"cf"
)
.unwrap
();
let
cf_handle
=
db
.cf_handle
(
"cf"
)
.unwrap
();
for
i
in
0
..
200
{
for
i
in
0
..
200
{
...
@@ -1901,7 +1906,7 @@ mod test {
...
@@ -1901,7 +1906,7 @@ mod test {
#[test]
#[test]
fn
test_get_all_key_versions
()
{
fn
test_get_all_key_versions
()
{
let
mut
opts
=
Options
::
new
();
let
mut
opts
=
DB
Options
::
new
();
opts
.create_if_missing
(
true
);
opts
.create_if_missing
(
true
);
let
path
=
TempDir
::
new
(
"_rust_rocksdb_get_all_key_version_test"
)
.expect
(
""
);
let
path
=
TempDir
::
new
(
"_rust_rocksdb_get_all_key_version_test"
)
.expect
(
""
);
let
db
=
DB
::
open
(
opts
,
path
.path
()
.to_str
()
.unwrap
())
.unwrap
();
let
db
=
DB
::
open
(
opts
,
path
.path
()
.to_str
()
.unwrap
())
.unwrap
();
...
...
src/rocksdb_options.rs
View file @
130c764e
...
@@ -16,7 +16,7 @@
...
@@ -16,7 +16,7 @@
use
compaction_filter
::{
CompactionFilter
,
new_compaction_filter
,
CompactionFilterHandle
};
use
compaction_filter
::{
CompactionFilter
,
new_compaction_filter
,
CompactionFilterHandle
};
use
comparator
::{
self
,
ComparatorCallback
,
compare_callback
};
use
comparator
::{
self
,
ComparatorCallback
,
compare_callback
};
use
crocksdb_ffi
::{
self
,
DB
Options
,
DBWriteOptions
,
DBBlockBasedTableOptions
,
DBReadOptions
,
use
crocksdb_ffi
::{
self
,
Options
,
DBWriteOptions
,
DBBlockBasedTableOptions
,
DBReadOptions
,
DBRestoreOptions
,
DBCompressionType
,
DBRecoveryMode
,
DBSnapshot
,
DBInstance
,
DBRestoreOptions
,
DBCompressionType
,
DBRecoveryMode
,
DBSnapshot
,
DBInstance
,
DBFlushOptions
,
DBStatisticsTickerType
,
DBStatisticsHistogramType
,
DBFlushOptions
,
DBStatisticsTickerType
,
DBStatisticsHistogramType
,
DBRateLimiter
,
DBInfoLogLevel
,
DBCompactOptions
};
DBRateLimiter
,
DBInfoLogLevel
,
DBCompactOptions
};
...
@@ -294,12 +294,347 @@ impl Drop for CompactOptions {
...
@@ -294,12 +294,347 @@ impl Drop for CompactOptions {
}
}
}
}
pub
struct
Options
{
pub
struct
DBOptions
{
pub
inner
:
*
mut
DBOptions
,
pub
inner
:
*
mut
Options
,
}
impl
Drop
for
DBOptions
{
fn
drop
(
&
mut
self
)
{
unsafe
{
crocksdb_ffi
::
crocksdb_options_destroy
(
self
.inner
);
}
}
}
impl
Default
for
DBOptions
{
fn
default
()
->
DBOptions
{
unsafe
{
let
opts
=
crocksdb_ffi
::
crocksdb_options_create
();
assert
!
(
!
opts
.is_null
(),
"Could not create rocksdb db options"
);
DBOptions
{
inner
:
opts
}
}
}
}
impl
Clone
for
DBOptions
{
fn
clone
(
&
self
)
->
Self
{
unsafe
{
let
opts
=
crocksdb_ffi
::
crocksdb_options_copy
(
self
.inner
);
assert
!
(
!
opts
.is_null
());
DBOptions
{
inner
:
opts
}
}
}
}
impl
DBOptions
{
pub
fn
new
()
->
DBOptions
{
DBOptions
::
default
()
}
pub
fn
increase_parallelism
(
&
mut
self
,
parallelism
:
i32
)
{
unsafe
{
crocksdb_ffi
::
crocksdb_options_increase_parallelism
(
self
.inner
,
parallelism
);
}
}
pub
fn
add_event_listener
<
L
:
EventListener
>
(
&
mut
self
,
l
:
L
)
{
let
handle
=
new_event_listener
(
l
);
unsafe
{
crocksdb_ffi
::
crocksdb_options_add_eventlistener
(
self
.inner
,
handle
)
}
}
pub
fn
create_if_missing
(
&
mut
self
,
create_if_missing
:
bool
)
{
unsafe
{
crocksdb_ffi
::
crocksdb_options_set_create_if_missing
(
self
.inner
,
create_if_missing
);
}
}
pub
fn
set_max_open_files
(
&
mut
self
,
nfiles
:
c_int
)
{
unsafe
{
crocksdb_ffi
::
crocksdb_options_set_max_open_files
(
self
.inner
,
nfiles
);
}
}
pub
fn
set_max_total_wal_size
(
&
mut
self
,
size
:
u64
)
{
unsafe
{
crocksdb_ffi
::
crocksdb_options_set_max_total_wal_size
(
self
.inner
,
size
);
}
}
pub
fn
set_use_fsync
(
&
mut
self
,
useit
:
bool
)
{
unsafe
{
if
useit
{
crocksdb_ffi
::
crocksdb_options_set_use_fsync
(
self
.inner
,
1
)
}
else
{
crocksdb_ffi
::
crocksdb_options_set_use_fsync
(
self
.inner
,
0
)
}
}
}
pub
fn
set_bytes_per_sync
(
&
mut
self
,
nbytes
:
u64
)
{
unsafe
{
crocksdb_ffi
::
crocksdb_options_set_bytes_per_sync
(
self
.inner
,
nbytes
);
}
}
pub
fn
set_table_cache_num_shard_bits
(
&
mut
self
,
nbits
:
c_int
)
{
unsafe
{
crocksdb_ffi
::
crocksdb_options_set_table_cache_numshardbits
(
self
.inner
,
nbits
);
}
}
pub
fn
set_writable_file_max_buffer_size
(
&
mut
self
,
nbytes
:
c_int
)
{
unsafe
{
crocksdb_ffi
::
crocksdb_options_set_writable_file_max_buffer_size
(
self
.inner
,
nbytes
);
}
}
pub
fn
set_use_direct_reads
(
&
mut
self
,
v
:
bool
)
{
unsafe
{
crocksdb_ffi
::
crocksdb_options_set_use_direct_reads
(
self
.inner
,
v
);
}
}
pub
fn
set_use_direct_io_for_flush_and_compaction
(
&
mut
self
,
v
:
bool
)
{
unsafe
{
crocksdb_ffi
::
crocksdb_options_set_use_direct_io_for_flush_and_compaction
(
self
.inner
,
v
);
}
}
pub
fn
set_max_manifest_file_size
(
&
mut
self
,
size
:
u64
)
{
unsafe
{
crocksdb_ffi
::
crocksdb_options_set_max_manifest_file_size
(
self
.inner
,
size
);
}
}
pub
fn
set_max_background_flushes
(
&
mut
self
,
n
:
c_int
)
{
unsafe
{
crocksdb_ffi
::
crocksdb_options_set_max_background_flushes
(
self
.inner
,
n
);
}
}
pub
fn
set_max_subcompactions
(
&
mut
self
,
n
:
u32
)
{
unsafe
{
crocksdb_ffi
::
crocksdb_options_set_max_subcompactions
(
self
.inner
,
n
);
}
}
pub
fn
set_wal_bytes_per_sync
(
&
mut
self
,
n
:
u64
)
{
unsafe
{
crocksdb_ffi
::
crocksdb_options_set_wal_bytes_per_sync
(
self
.inner
,
n
);
}
}
pub
fn
set_base_background_compactions
(
&
mut
self
,
n
:
c_int
)
{
unsafe
{
crocksdb_ffi
::
crocksdb_options_set_base_background_compactions
(
self
.inner
,
n
);
}
}
pub
fn
set_max_background_compactions
(
&
mut
self
,
n
:
c_int
)
{
unsafe
{
crocksdb_ffi
::
crocksdb_options_set_max_background_compactions
(
self
.inner
,
n
);
}
}
pub
fn
set_wal_recovery_mode
(
&
mut
self
,
mode
:
DBRecoveryMode
)
{
unsafe
{
crocksdb_ffi
::
crocksdb_options_set_wal_recovery_mode
(
self
.inner
,
mode
);
}
}
pub
fn
set_delayed_write_rate
(
&
mut
self
,
rate
:
u64
)
{
unsafe
{
crocksdb_ffi
::
crocksdb_options_set_delayed_write_rate
(
self
.inner
,
rate
);
}
}
pub
fn
enable_statistics
(
&
mut
self
)
{
unsafe
{
crocksdb_ffi
::
crocksdb_options_enable_statistics
(
self
.inner
);
}
}
pub
fn
get_statistics_ticker_count
(
&
self
,
ticker_type
:
DBStatisticsTickerType
)
->
u64
{
unsafe
{
crocksdb_ffi
::
crocksdb_options_statistics_get_ticker_count
(
self
.inner
,
ticker_type
)
}
}
pub
fn
get_and_reset_statistics_ticker_count
(
&
self
,
ticker_type
:
DBStatisticsTickerType
)
->
u64
{
unsafe
{
crocksdb_ffi
::
crocksdb_options_statistics_get_and_reset_ticker_count
(
self
.inner
,
ticker_type
)
}
}
pub
fn
get_statistics_histogram
(
&
self
,
hist_type
:
DBStatisticsHistogramType
)
->
Option
<
HistogramData
>
{
unsafe
{
let
mut
data
=
HistogramData
::
default
();
let
ret
=
crocksdb_ffi
::
crocksdb_options_statistics_get_histogram
(
self
.inner
,
hist_type
,
&
mut
data
.median
,
&
mut
data
.percentile95
,
&
mut
data
.percentile99
,
&
mut
data
.average
,
&
mut
data
.standard_deviation
);
if
!
ret
{
return
None
;
}
Some
(
data
)
}
}
pub
fn
get_statistics_histogram_string
(
&
self
,
hist_type
:
DBStatisticsHistogramType
)
->
Option
<
String
>
{
unsafe
{
let
value
=
crocksdb_ffi
::
crocksdb_options_statistics_get_histogram_string
(
self
.inner
,
hist_type
);
if
value
.is_null
()
{
return
None
;
}
let
s
=
CStr
::
from_ptr
(
value
)
.to_str
()
.unwrap
()
.to_owned
();
libc
::
free
(
value
as
*
mut
c_void
);
Some
(
s
)
}
}
pub
fn
get_statistics
(
&
self
)
->
Option
<
String
>
{
unsafe
{
let
value
=
crocksdb_ffi
::
crocksdb_options_statistics_get_string
(
self
.inner
);
if
value
.is_null
()
{
return
None
;
}
// Must valid UTF-8 format.
let
s
=
CStr
::
from_ptr
(
value
)
.to_str
()
.unwrap
()
.to_owned
();
libc
::
free
(
value
as
*
mut
c_void
);
Some
(
s
)
}
}
pub
fn
set_stats_dump_period_sec
(
&
mut
self
,
period
:
usize
)
{
unsafe
{
crocksdb_ffi
::
crocksdb_options_set_stats_dump_period_sec
(
self
.inner
,
period
);
}
}
pub
fn
set_db_log_dir
(
&
mut
self
,
path
:
&
str
)
{
let
path
=
CString
::
new
(
path
.as_bytes
())
.unwrap
();
unsafe
{
crocksdb_ffi
::
crocksdb_options_set_db_log_dir
(
self
.inner
,
path
.as_ptr
());
}
}
pub
fn
set_wal_dir
(
&
mut
self
,
path
:
&
str
)
{
let
path
=
CString
::
new
(
path
.as_bytes
())
.unwrap
();
unsafe
{
crocksdb_ffi
::
crocksdb_options_set_wal_dir
(
self
.inner
,
path
.as_ptr
());
}
}
pub
fn
set_wal_ttl_seconds
(
&
mut
self
,
ttl
:
u64
)
{
unsafe
{
crocksdb_ffi
::
crocksdb_options_set_wal_ttl_seconds
(
self
.inner
,
ttl
as
u64
);
}
}
pub
fn
set_wal_size_limit_mb
(
&
mut
self
,
limit
:
u64
)
{
unsafe
{
crocksdb_ffi
::
crocksdb_options_set_wal_size_limit_mb
(
self
.inner
,
limit
as
u64
);
}
}
pub
fn
set_max_log_file_size
(
&
mut
self
,
size
:
u64
)
{
unsafe
{
crocksdb_ffi
::
crocksdb_options_set_max_log_file_size
(
self
.inner
,
size
as
size_t
);
}
}
pub
fn
set_log_file_time_to_roll
(
&
mut
self
,
ttl
:
u64
)
{
unsafe
{
crocksdb_ffi
::
crocksdb_options_set_log_file_time_to_roll
(
self
.inner
,
ttl
as
size_t
);
}
}
pub
fn
set_info_log_level
(
&
mut
self
,
level
:
DBInfoLogLevel
)
{
unsafe
{
crocksdb_ffi
::
crocksdb_options_set_info_log_level
(
self
.inner
,
level
);
}
}
pub
fn
set_keep_log_file_num
(
&
mut
self
,
num
:
u64
)
{
unsafe
{
crocksdb_ffi
::
crocksdb_options_set_keep_log_file_num
(
self
.inner
,
num
as
size_t
);
}
}
pub
fn
set_compaction_readahead_size
(
&
mut
self
,
size
:
u64
)
{
unsafe
{
crocksdb_ffi
::
crocksdb_options_set_compaction_readahead_size
(
self
.inner
,
size
as
size_t
);
}
}
pub
fn
set_ratelimiter
(
&
mut
self
,
rate_bytes_per_sec
:
i64
)
{
let
rate_limiter
=
RateLimiter
::
new
(
rate_bytes_per_sec
,
DEFAULT_REFILL_PERIOD_US
,
DEFAULT_FAIRNESS
);
unsafe
{
crocksdb_ffi
::
crocksdb_options_set_ratelimiter
(
self
.inner
,
rate_limiter
.inner
);
}
}
// Create a info log with `path` and save to options logger field directly.
// TODO: export more logger options like level, roll size, time, etc...
pub
fn
create_info_log
(
&
self
,
path
:
&
str
)
->
Result
<
(),
String
>
{
let
cpath
=
match
CString
::
new
(
path
.as_bytes
())
{
Ok
(
c
)
=>
c
,
Err
(
_
)
=>
{
return
Err
(
"Failed to convert path to CString when creating rocksdb info log"
.to_owned
())
}
};
unsafe
{
let
logger
=
ffi_try!
(
crocksdb_create_log_from_options
(
cpath
.as_ptr
(),
self
.inner
));
crocksdb_ffi
::
crocksdb_options_set_info_log
(
self
.inner
,
logger
);
// logger uses shared_ptr, it is OK to destroy here.
crocksdb_ffi
::
crocksdb_log_destroy
(
logger
);
}
Ok
(())
}
pub
fn
enable_pipelined_write
(
&
self
,
v
:
bool
)
{
unsafe
{
crocksdb_ffi
::
crocksdb_options_set_enable_pipelined_write
(
self
.inner
,
v
);
}
}
pub
fn
allow_concurrent_memtable_write
(
&
self
,
v
:
bool
)
{
unsafe
{
crocksdb_ffi
::
crocksdb_options_set_allow_concurrent_memtable_write
(
self
.inner
,
v
);
}
}
}
pub
struct
ColumnFamilyOptions
{
pub
inner
:
*
mut
Options
,
filter
:
Option
<
CompactionFilterHandle
>
,
filter
:
Option
<
CompactionFilterHandle
>
,
}
}
impl
Drop
for
Options
{
impl
Drop
for
ColumnFamily
Options
{
fn
drop
(
&
mut
self
)
{
fn
drop
(
&
mut
self
)
{
unsafe
{
unsafe
{
crocksdb_ffi
::
crocksdb_options_destroy
(
self
.inner
);
crocksdb_ffi
::
crocksdb_options_destroy
(
self
.inner
);
...
@@ -307,12 +642,13 @@ impl Drop for Options {
...
@@ -307,12 +642,13 @@ impl Drop for Options {
}
}
}
}
impl
Default
for
Options
{
impl
Default
for
ColumnFamily
Options
{
fn
default
()
->
Options
{
fn
default
()
->
ColumnFamily
Options
{
unsafe
{
unsafe
{
let
opts
=
crocksdb_ffi
::
crocksdb_options_create
();
let
opts
=
crocksdb_ffi
::
crocksdb_options_create
();
assert
!
(
!
opts
.is_null
(),
"Could not create rocksdb options"
);
assert
!
(
!
opts
.is_null
(),
Options
{
"Could not create rocksdb column family options"
);
ColumnFamilyOptions
{
inner
:
opts
,
inner
:
opts
,
filter
:
None
,
filter
:
None
,
}
}
...
@@ -320,13 +656,13 @@ impl Default for Options {
...
@@ -320,13 +656,13 @@ impl Default for Options {
}
}
}
}
impl
Clone
for
Options
{
impl
Clone
for
ColumnFamily
Options
{
fn
clone
(
&
self
)
->
Self
{
fn
clone
(
&
self
)
->
Self
{
assert
!
(
self
.filter
.is_none
());
assert
!
(
self
.filter
.is_none
());
unsafe
{
unsafe
{
let
opts
=
crocksdb_ffi
::
crocksdb_options_copy
(
self
.inner
);
let
opts
=
crocksdb_ffi
::
crocksdb_options_copy
(
self
.inner
);
assert
!
(
!
opts
.is_null
());
assert
!
(
!
opts
.is_null
());
Options
{
ColumnFamily
Options
{
inner
:
opts
,
inner
:
opts
,
filter
:
None
,
filter
:
None
,
}
}
...
@@ -334,26 +670,20 @@ impl Clone for Options {
...
@@ -334,26 +670,20 @@ impl Clone for Options {
}
}
}
}
impl
Options
{
impl
ColumnFamily
Options
{
pub
fn
new
()
->
Options
{
pub
fn
new
()
->
ColumnFamily
Options
{
Options
::
default
()
ColumnFamily
Options
::
default
()
}
}
pub
unsafe
fn
from_raw
(
inner
:
*
mut
DBOptions
)
->
Options
{
pub
unsafe
fn
from_raw
(
inner
:
*
mut
Options
)
->
ColumnFamily
Options
{
assert
!
(
!
inner
.is_null
(),
assert
!
(
!
inner
.is_null
(),
"could not new rocksdb options with null inner"
);
"could not new rocksdb options with null inner"
);
Options
{
ColumnFamily
Options
{
inner
:
inner
,
inner
:
inner
,
filter
:
None
,
filter
:
None
,
}
}
}
}
pub
fn
increase_parallelism
(
&
mut
self
,
parallelism
:
i32
)
{
unsafe
{
crocksdb_ffi
::
crocksdb_options_increase_parallelism
(
self
.inner
,
parallelism
);
}
}
pub
fn
optimize_level_style_compaction
(
&
mut
self
,
memtable_memory_budget
:
i32
)
{
pub
fn
optimize_level_style_compaction
(
&
mut
self
,
memtable_memory_budget
:
i32
)
{
unsafe
{
unsafe
{
crocksdb_ffi
::
crocksdb_options_optimize_level_style_compaction
(
self
.inner
,
crocksdb_ffi
::
crocksdb_options_optimize_level_style_compaction
(
self
.inner
,
...
@@ -395,11 +725,6 @@ impl Options {
...
@@ -395,11 +725,6 @@ impl Options {
}
}
}
}
pub
fn
add_event_listener
<
L
:
EventListener
>
(
&
mut
self
,
l
:
L
)
{
let
handle
=
new_event_listener
(
l
);
unsafe
{
crocksdb_ffi
::
crocksdb_options_add_eventlistener
(
self
.inner
,
handle
)
}
}
pub
fn
add_table_properties_collector_factory
(
&
mut
self
,
pub
fn
add_table_properties_collector_factory
(
&
mut
self
,
fname
:
&
str
,
fname
:
&
str
,
factory
:
Box
<
TablePropertiesCollectorFactory
>
)
{
factory
:
Box
<
TablePropertiesCollectorFactory
>
)
{
...
@@ -409,11 +734,6 @@ impl Options {
...
@@ -409,11 +734,6 @@ impl Options {
}
}
}
}
pub
fn
create_if_missing
(
&
mut
self
,
create_if_missing
:
bool
)
{
unsafe
{
crocksdb_ffi
::
crocksdb_options_set_create_if_missing
(
self
.inner
,
create_if_missing
);
}
}
pub
fn
compression
(
&
mut
self
,
t
:
DBCompressionType
)
{
pub
fn
compression
(
&
mut
self
,
t
:
DBCompressionType
)
{
unsafe
{
unsafe
{
...
@@ -471,61 +791,21 @@ impl Options {
...
@@ -471,61 +791,21 @@ impl Options {
let
cb
=
Box
::
new
(
ComparatorCallback
{
let
cb
=
Box
::
new
(
ComparatorCallback
{
name
:
CString
::
new
(
name
.as_bytes
())
.unwrap
(),
name
:
CString
::
new
(
name
.as_bytes
())
.unwrap
(),
f
:
compare_fn
,
f
:
compare_fn
,
});
});
unsafe
{
let
cmp
=
crocksdb_ffi
::
crocksdb_comparator_create
(
mem
::
transmute
(
cb
),
comparator
::
destructor_callback
,
compare_callback
,
comparator
::
name_callback
);
crocksdb_ffi
::
crocksdb_options_set_comparator
(
self
.inner
,
cmp
);
}
}
pub
fn
set_block_cache_size_mb
(
&
mut
self
,
cache_size
:
u64
)
{
unsafe
{
crocksdb_ffi
::
crocksdb_options_optimize_for_point_lookup
(
self
.inner
,
cache_size
);
}
}
pub
fn
set_max_open_files
(
&
mut
self
,
nfiles
:
c_int
)
{
unsafe
{
crocksdb_ffi
::
crocksdb_options_set_max_open_files
(
self
.inner
,
nfiles
);
}
}
pub
fn
set_max_total_wal_size
(
&
mut
self
,
size
:
u64
)
{
unsafe
{
crocksdb_ffi
::
crocksdb_options_set_max_total_wal_size
(
self
.inner
,
size
);
}
}
pub
fn
set_use_fsync
(
&
mut
self
,
useit
:
bool
)
{
unsafe
{
if
useit
{
crocksdb_ffi
::
crocksdb_options_set_use_fsync
(
self
.inner
,
1
)
}
else
{
crocksdb_ffi
::
crocksdb_options_set_use_fsync
(
self
.inner
,
0
)
}
}
}
pub
fn
set_bytes_per_sync
(
&
mut
self
,
nbytes
:
u64
)
{
unsafe
{
unsafe
{
crocksdb_ffi
::
crocksdb_options_set_bytes_per_sync
(
self
.inner
,
nbytes
);
let
cmp
=
crocksdb_ffi
::
crocksdb_comparator_create
(
mem
::
transmute
(
cb
),
comparator
::
destructor_callback
,
compare_callback
,
comparator
::
name_callback
);
crocksdb_ffi
::
crocksdb_options_set_comparator
(
self
.inner
,
cmp
);
}
}
}
}
pub
fn
set_table_cache_num_shard_bits
(
&
mut
self
,
nbits
:
c_int
)
{
unsafe
{
crocksdb_ffi
::
crocksdb_options_set_table_cache_numshardbits
(
self
.inner
,
nbits
);
}
}
pub
fn
set_
writable_file_max_buffer_size
(
&
mut
self
,
nbytes
:
c_int
)
{
pub
fn
set_
block_cache_size_mb
(
&
mut
self
,
cache_size
:
u64
)
{
unsafe
{
unsafe
{
crocksdb_ffi
::
crocksdb_options_
set_writable_file_max_buffer_size
(
self
.inner
,
nbytes
);
crocksdb_ffi
::
crocksdb_options_
optimize_for_point_lookup
(
self
.inner
,
cache_size
);
}
}
}
}
...
@@ -571,19 +851,6 @@ impl Options {
...
@@ -571,19 +851,6 @@ impl Options {
}
}
}
}
pub
fn
set_use_direct_reads
(
&
mut
self
,
v
:
bool
)
{
unsafe
{
crocksdb_ffi
::
crocksdb_options_set_use_direct_reads
(
self
.inner
,
v
);
}
}
pub
fn
set_use_direct_io_for_flush_and_compaction
(
&
mut
self
,
v
:
bool
)
{
unsafe
{
crocksdb_ffi
::
crocksdb_options_set_use_direct_io_for_flush_and_compaction
(
self
.inner
,
v
);
}
}
pub
fn
set_soft_pending_compaction_bytes_limit
(
&
mut
self
,
size
:
u64
)
{
pub
fn
set_soft_pending_compaction_bytes_limit
(
&
mut
self
,
size
:
u64
)
{
unsafe
{
unsafe
{
crocksdb_ffi
::
crocksdb_options_set_soft_pending_compaction_bytes_limit
(
self
.inner
,
crocksdb_ffi
::
crocksdb_options_set_soft_pending_compaction_bytes_limit
(
self
.inner
,
...
@@ -598,12 +865,6 @@ impl Options {
...
@@ -598,12 +865,6 @@ impl Options {
}
}
}
}
pub
fn
set_max_manifest_file_size
(
&
mut
self
,
size
:
u64
)
{
unsafe
{
crocksdb_ffi
::
crocksdb_options_set_max_manifest_file_size
(
self
.inner
,
size
);
}
}
pub
fn
set_target_file_size_base
(
&
mut
self
,
size
:
u64
)
{
pub
fn
set_target_file_size_base
(
&
mut
self
,
size
:
u64
)
{
unsafe
{
unsafe
{
crocksdb_ffi
::
crocksdb_options_set_target_file_size_base
(
self
.inner
,
size
);
crocksdb_ffi
::
crocksdb_options_set_target_file_size_base
(
self
.inner
,
size
);
...
@@ -647,36 +908,6 @@ impl Options {
...
@@ -647,36 +908,6 @@ impl Options {
}
}
}
}
pub
fn
set_base_background_compactions
(
&
mut
self
,
n
:
c_int
)
{
unsafe
{
crocksdb_ffi
::
crocksdb_options_set_base_background_compactions
(
self
.inner
,
n
);
}
}
pub
fn
set_max_background_compactions
(
&
mut
self
,
n
:
c_int
)
{
unsafe
{
crocksdb_ffi
::
crocksdb_options_set_max_background_compactions
(
self
.inner
,
n
);
}
}
pub
fn
set_max_background_flushes
(
&
mut
self
,
n
:
c_int
)
{
unsafe
{
crocksdb_ffi
::
crocksdb_options_set_max_background_flushes
(
self
.inner
,
n
);
}
}
pub
fn
set_max_subcompactions
(
&
mut
self
,
n
:
u32
)
{
unsafe
{
crocksdb_ffi
::
crocksdb_options_set_max_subcompactions
(
self
.inner
,
n
);
}
}
pub
fn
set_wal_bytes_per_sync
(
&
mut
self
,
n
:
u64
)
{
unsafe
{
crocksdb_ffi
::
crocksdb_options_set_wal_bytes_per_sync
(
self
.inner
,
n
);
}
}
pub
fn
set_disable_auto_compactions
(
&
mut
self
,
disable
:
bool
)
{
pub
fn
set_disable_auto_compactions
(
&
mut
self
,
disable
:
bool
)
{
unsafe
{
unsafe
{
if
disable
{
if
disable
{
...
@@ -703,153 +934,12 @@ impl Options {
...
@@ -703,153 +934,12 @@ impl Options {
}
}
}
}
pub
fn
set_wal_recovery_mode
(
&
mut
self
,
mode
:
DBRecoveryMode
)
{
unsafe
{
crocksdb_ffi
::
crocksdb_options_set_wal_recovery_mode
(
self
.inner
,
mode
);
}
}
pub
fn
set_delayed_write_rate
(
&
mut
self
,
rate
:
u64
)
{
unsafe
{
crocksdb_ffi
::
crocksdb_options_set_delayed_write_rate
(
self
.inner
,
rate
);
}
}
pub
fn
enable_statistics
(
&
mut
self
)
{
unsafe
{
crocksdb_ffi
::
crocksdb_options_enable_statistics
(
self
.inner
);
}
}
pub
fn
get_statistics_ticker_count
(
&
self
,
ticker_type
:
DBStatisticsTickerType
)
->
u64
{
unsafe
{
crocksdb_ffi
::
crocksdb_options_statistics_get_ticker_count
(
self
.inner
,
ticker_type
)
}
}
pub
fn
get_and_reset_statistics_ticker_count
(
&
self
,
ticker_type
:
DBStatisticsTickerType
)
->
u64
{
unsafe
{
crocksdb_ffi
::
crocksdb_options_statistics_get_and_reset_ticker_count
(
self
.inner
,
ticker_type
)
}
}
pub
fn
get_statistics_histogram
(
&
self
,
hist_type
:
DBStatisticsHistogramType
)
->
Option
<
HistogramData
>
{
unsafe
{
let
mut
data
=
HistogramData
::
default
();
let
ret
=
crocksdb_ffi
::
crocksdb_options_statistics_get_histogram
(
self
.inner
,
hist_type
,
&
mut
data
.median
,
&
mut
data
.percentile95
,
&
mut
data
.percentile99
,
&
mut
data
.average
,
&
mut
data
.standard_deviation
);
if
!
ret
{
return
None
;
}
Some
(
data
)
}
}
pub
fn
get_statistics_histogram_string
(
&
self
,
hist_type
:
DBStatisticsHistogramType
)
->
Option
<
String
>
{
unsafe
{
let
value
=
crocksdb_ffi
::
crocksdb_options_statistics_get_histogram_string
(
self
.inner
,
hist_type
);
if
value
.is_null
()
{
return
None
;
}
let
s
=
CStr
::
from_ptr
(
value
)
.to_str
()
.unwrap
()
.to_owned
();
libc
::
free
(
value
as
*
mut
c_void
);
Some
(
s
)
}
}
pub
fn
get_statistics
(
&
self
)
->
Option
<
String
>
{
unsafe
{
let
value
=
crocksdb_ffi
::
crocksdb_options_statistics_get_string
(
self
.inner
);
if
value
.is_null
()
{
return
None
;
}
// Must valid UTF-8 format.
let
s
=
CStr
::
from_ptr
(
value
)
.to_str
()
.unwrap
()
.to_owned
();
libc
::
free
(
value
as
*
mut
c_void
);
Some
(
s
)
}
}
pub
fn
set_stats_dump_period_sec
(
&
mut
self
,
period
:
usize
)
{
unsafe
{
crocksdb_ffi
::
crocksdb_options_set_stats_dump_period_sec
(
self
.inner
,
period
);
}
}
pub
fn
set_num_levels
(
&
mut
self
,
n
:
c_int
)
{
pub
fn
set_num_levels
(
&
mut
self
,
n
:
c_int
)
{
unsafe
{
unsafe
{
crocksdb_ffi
::
crocksdb_options_set_num_levels
(
self
.inner
,
n
);
crocksdb_ffi
::
crocksdb_options_set_num_levels
(
self
.inner
,
n
);
}
}
}
}
pub
fn
set_db_log_dir
(
&
mut
self
,
path
:
&
str
)
{
let
path
=
CString
::
new
(
path
.as_bytes
())
.unwrap
();
unsafe
{
crocksdb_ffi
::
crocksdb_options_set_db_log_dir
(
self
.inner
,
path
.as_ptr
());
}
}
pub
fn
set_wal_dir
(
&
mut
self
,
path
:
&
str
)
{
let
path
=
CString
::
new
(
path
.as_bytes
())
.unwrap
();
unsafe
{
crocksdb_ffi
::
crocksdb_options_set_wal_dir
(
self
.inner
,
path
.as_ptr
());
}
}
pub
fn
set_wal_ttl_seconds
(
&
mut
self
,
ttl
:
u64
)
{
unsafe
{
crocksdb_ffi
::
crocksdb_options_set_wal_ttl_seconds
(
self
.inner
,
ttl
as
u64
);
}
}
pub
fn
set_wal_size_limit_mb
(
&
mut
self
,
limit
:
u64
)
{
unsafe
{
crocksdb_ffi
::
crocksdb_options_set_wal_size_limit_mb
(
self
.inner
,
limit
as
u64
);
}
}
pub
fn
set_max_log_file_size
(
&
mut
self
,
size
:
u64
)
{
unsafe
{
crocksdb_ffi
::
crocksdb_options_set_max_log_file_size
(
self
.inner
,
size
as
size_t
);
}
}
pub
fn
set_log_file_time_to_roll
(
&
mut
self
,
ttl
:
u64
)
{
unsafe
{
crocksdb_ffi
::
crocksdb_options_set_log_file_time_to_roll
(
self
.inner
,
ttl
as
size_t
);
}
}
pub
fn
set_info_log_level
(
&
mut
self
,
level
:
DBInfoLogLevel
)
{
unsafe
{
crocksdb_ffi
::
crocksdb_options_set_info_log_level
(
self
.inner
,
level
);
}
}
pub
fn
set_keep_log_file_num
(
&
mut
self
,
num
:
u64
)
{
unsafe
{
crocksdb_ffi
::
crocksdb_options_set_keep_log_file_num
(
self
.inner
,
num
as
size_t
);
}
}
pub
fn
set_prefix_extractor
<
S
>
(
&
mut
self
,
pub
fn
set_prefix_extractor
<
S
>
(
&
mut
self
,
name
:
S
,
name
:
S
,
transform
:
Box
<
SliceTransform
>
)
transform
:
Box
<
SliceTransform
>
)
...
@@ -897,58 +987,9 @@ impl Options {
...
@@ -897,58 +987,9 @@ impl Options {
}
}
}
}
pub
fn
set_compaction_readahead_size
(
&
mut
self
,
size
:
u64
)
{
unsafe
{
crocksdb_ffi
::
crocksdb_options_set_compaction_readahead_size
(
self
.inner
,
size
as
size_t
);
}
}
pub
fn
set_ratelimiter
(
&
mut
self
,
rate_bytes_per_sec
:
i64
)
{
let
rate_limiter
=
RateLimiter
::
new
(
rate_bytes_per_sec
,
DEFAULT_REFILL_PERIOD_US
,
DEFAULT_FAIRNESS
);
unsafe
{
crocksdb_ffi
::
crocksdb_options_set_ratelimiter
(
self
.inner
,
rate_limiter
.inner
);
}
}
// Create a info log with `path` and save to options logger field directly.
// TODO: export more logger options like level, roll size, time, etc...
pub
fn
create_info_log
(
&
self
,
path
:
&
str
)
->
Result
<
(),
String
>
{
let
cpath
=
match
CString
::
new
(
path
.as_bytes
())
{
Ok
(
c
)
=>
c
,
Err
(
_
)
=>
{
return
Err
(
"Failed to convert path to CString when creating rocksdb info log"
.to_owned
())
}
};
unsafe
{
let
logger
=
ffi_try!
(
crocksdb_create_log_from_options
(
cpath
.as_ptr
(),
self
.inner
));
crocksdb_ffi
::
crocksdb_options_set_info_log
(
self
.inner
,
logger
);
// logger uses shared_ptr, it is OK to destroy here.
crocksdb_ffi
::
crocksdb_log_destroy
(
logger
);
}
Ok
(())
}
pub
fn
get_block_cache_usage
(
&
self
)
->
u64
{
pub
fn
get_block_cache_usage
(
&
self
)
->
u64
{
unsafe
{
crocksdb_ffi
::
crocksdb_options_get_block_cache_usage
(
self
.inner
)
as
u64
}
unsafe
{
crocksdb_ffi
::
crocksdb_options_get_block_cache_usage
(
self
.inner
)
as
u64
}
}
}
pub
fn
enable_pipelined_write
(
&
self
,
v
:
bool
)
{
unsafe
{
crocksdb_ffi
::
crocksdb_options_set_enable_pipelined_write
(
self
.inner
,
v
);
}
}
pub
fn
allow_concurrent_memtable_write
(
&
self
,
v
:
bool
)
{
unsafe
{
crocksdb_ffi
::
crocksdb_options_set_allow_concurrent_memtable_write
(
self
.inner
,
v
);
}
}
}
}
pub
struct
FlushOptions
{
pub
struct
FlushOptions
{
...
...
tests/test_column_family.rs
View file @
130c764e
...
@@ -13,7 +13,7 @@
...
@@ -13,7 +13,7 @@
// limitations under the License.
// limitations under the License.
//
//
use
rocksdb
::{
DB
,
MergeOperands
,
Options
,
Writable
};
use
rocksdb
::{
DB
,
MergeOperands
,
DBOptions
,
ColumnFamily
Options
,
Writable
};
use
tempdir
::
TempDir
;
use
tempdir
::
TempDir
;
#[test]
#[test]
...
@@ -23,12 +23,13 @@ pub fn test_column_family() {
...
@@ -23,12 +23,13 @@ pub fn test_column_family() {
// should be able to create column families
// should be able to create column families
{
{
let
mut
opts
=
Options
::
new
();
let
mut
opts
=
DB
Options
::
new
();
opts
.create_if_missing
(
true
);
opts
.create_if_missing
(
true
);
opts
.add_merge_operator
(
"test operator"
,
test_provided_merge
);
let
mut
cf_opts
=
ColumnFamilyOptions
::
new
();
let
mut
db
=
DB
::
open
(
opts
,
path_str
)
.unwrap
();
cf_opts
.add_merge_operator
(
"test operator"
,
test_provided_merge
);
let
opts
=
Options
::
new
();
let
mut
db
=
DB
::
open_cf
(
opts
,
path_str
,
vec!
[
"default"
],
vec!
[
cf_opts
])
.unwrap
();
match
db
.create_cf
(
"cf1"
,
&
opts
)
{
let
cf_opts
=
ColumnFamilyOptions
::
new
();
match
db
.create_cf
(
"cf1"
,
cf_opts
)
{
Ok
(
_
)
=>
println!
(
"cf1 created successfully"
),
Ok
(
_
)
=>
println!
(
"cf1 created successfully"
),
Err
(
e
)
=>
{
Err
(
e
)
=>
{
panic!
(
"could not create column family: {}"
,
e
);
panic!
(
"could not create column family: {}"
,
e
);
...
@@ -39,9 +40,9 @@ pub fn test_column_family() {
...
@@ -39,9 +40,9 @@ pub fn test_column_family() {
// should fail to open db without specifying same column families
// should fail to open db without specifying same column families
{
{
let
mut
opts
=
Options
::
new
();
let
mut
cf_opts
=
ColumnFamily
Options
::
new
();
opts
.add_merge_operator
(
"test operator"
,
test_provided_merge
);
cf_
opts
.add_merge_operator
(
"test operator"
,
test_provided_merge
);
match
DB
::
open
(
opts
,
path_str
)
{
match
DB
::
open
_cf
(
DBOptions
::
new
(),
path_str
,
vec!
[
"default"
],
vec!
[
cf_opts
]
)
{
Ok
(
_
)
=>
{
Ok
(
_
)
=>
{
panic!
(
"should not have opened DB successfully without
\
panic!
(
"should not have opened DB successfully without
\
specifying column
specifying column
...
@@ -56,18 +57,18 @@ pub fn test_column_family() {
...
@@ -56,18 +57,18 @@ pub fn test_column_family() {
// should properly open db when specifying all column families
// should properly open db when specifying all column families
{
{
let
mut
opts
=
Options
::
new
();
let
mut
cf_opts
=
ColumnFamily
Options
::
new
();
opts
.add_merge_operator
(
"test operator"
,
test_provided_merge
);
cf_
opts
.add_merge_operator
(
"test operator"
,
test_provided_merge
);
match
DB
::
open_cf
(
Options
::
new
(),
path_str
,
&
[
"cf1"
],
&
[
&
opts
])
{
match
DB
::
open_cf
(
DBOptions
::
new
(),
path_str
,
vec!
[
"cf1"
],
vec!
[
cf_
opts
])
{
Ok
(
_
)
=>
println!
(
"successfully opened db with column family"
),
Ok
(
_
)
=>
println!
(
"successfully opened db with column family"
),
Err
(
e
)
=>
panic!
(
"failed to open db with column family: {}"
,
e
),
Err
(
e
)
=>
panic!
(
"failed to open db with column family: {}"
,
e
),
}
}
}
}
// TODO should be able to write, read, merge, batch, and iterate over a cf
// TODO should be able to write, read, merge, batch, and iterate over a cf
{
{
let
mut
opts
=
Options
::
new
();
let
mut
cf_opts
=
ColumnFamily
Options
::
new
();
opts
.add_merge_operator
(
"test operator"
,
test_provided_merge
);
cf_
opts
.add_merge_operator
(
"test operator"
,
test_provided_merge
);
let
db
=
match
DB
::
open_cf
(
Options
::
new
(),
path_str
,
&
[
"cf1"
],
&
[
&
opts
])
{
let
db
=
match
DB
::
open_cf
(
DBOptions
::
new
(),
path_str
,
vec!
[
"cf1"
],
vec!
[
cf_
opts
])
{
Ok
(
db
)
=>
{
Ok
(
db
)
=>
{
println!
(
"successfully opened db with column family"
);
println!
(
"successfully opened db with column family"
);
db
db
...
@@ -114,9 +115,13 @@ pub fn test_column_family() {
...
@@ -114,9 +115,13 @@ pub fn test_column_family() {
{}
{}
// TODO should be able to iterate over a cf
// TODO should be able to iterate over a cf
{}
{}
// should b able to drop a cf
// should b
e
able to drop a cf
{
{
let
mut
db
=
DB
::
open_cf
(
Options
::
new
(),
path_str
,
&
[
"cf1"
],
&
[
&
Options
::
new
()])
.unwrap
();
let
mut
db
=
DB
::
open_cf
(
DBOptions
::
new
(),
path_str
,
vec!
[
"cf1"
],
vec!
[
ColumnFamilyOptions
::
new
()])
.unwrap
();
match
db
.drop_cf
(
"cf1"
)
{
match
db
.drop_cf
(
"cf1"
)
{
Ok
(
_
)
=>
println!
(
"cf1 successfully dropped."
),
Ok
(
_
)
=>
println!
(
"cf1 successfully dropped."
),
Err
(
e
)
=>
panic!
(
"failed to drop column family: {}"
,
e
),
Err
(
e
)
=>
panic!
(
"failed to drop column family: {}"
,
e
),
...
...
tests/test_compact_range.rs
View file @
130c764e
...
@@ -11,14 +11,14 @@
...
@@ -11,14 +11,14 @@
// See the License for the specific language governing permissions and
// See the License for the specific language governing permissions and
// limitations under the License.
// limitations under the License.
use
rocksdb
::{
DB
,
Options
,
Range
,
Writable
};
use
rocksdb
::{
DB
,
DB
Options
,
Range
,
Writable
};
use
tempdir
::
TempDir
;
use
tempdir
::
TempDir
;
#[test]
#[test]
fn
test_compact_range
()
{
fn
test_compact_range
()
{
let
path
=
TempDir
::
new
(
"_rust_rocksdb_test_compact_range"
)
.expect
(
""
);
let
path
=
TempDir
::
new
(
"_rust_rocksdb_test_compact_range"
)
.expect
(
""
);
let
mut
opts
=
Options
::
new
();
let
mut
opts
=
DB
Options
::
new
();
opts
.create_if_missing
(
true
);
opts
.create_if_missing
(
true
);
let
db
=
DB
::
open
(
opts
,
path
.path
()
.to_str
()
.unwrap
())
.unwrap
();
let
db
=
DB
::
open
(
opts
,
path
.path
()
.to_str
()
.unwrap
())
.unwrap
();
let
samples
=
vec!
[(
b
"k1"
.to_vec
(),
b
"value--------1"
.to_vec
()),
let
samples
=
vec!
[(
b
"k1"
.to_vec
(),
b
"value--------1"
.to_vec
()),
...
...
tests/test_compaction_filter.rs
View file @
130c764e
...
@@ -11,7 +11,7 @@
...
@@ -11,7 +11,7 @@
// See the License for the specific language governing permissions and
// See the License for the specific language governing permissions and
// limitations under the License.
// limitations under the License.
use
rocksdb
::{
Writable
,
DB
,
CompactionFilter
,
Options
};
use
rocksdb
::{
Writable
,
DB
,
CompactionFilter
,
DBOptions
,
ColumnFamily
Options
};
use
std
::
sync
::{
Arc
,
RwLock
};
use
std
::
sync
::{
Arc
,
RwLock
};
use
std
::
sync
::
atomic
::{
AtomicBool
,
Ordering
};
use
std
::
sync
::
atomic
::{
AtomicBool
,
Ordering
};
use
tempdir
::
TempDir
;
use
tempdir
::
TempDir
;
...
@@ -40,19 +40,24 @@ impl Drop for Filter {
...
@@ -40,19 +40,24 @@ impl Drop for Filter {
#[test]
#[test]
fn
test_compaction_filter
()
{
fn
test_compaction_filter
()
{
let
path
=
TempDir
::
new
(
"_rust_rocksdb_writebacktest"
)
.expect
(
""
);
let
path
=
TempDir
::
new
(
"_rust_rocksdb_writebacktest"
)
.expect
(
""
);
let
mut
opts
=
Options
::
new
();
let
mut
cf_opts
=
ColumnFamily
Options
::
new
();
let
drop_called
=
Arc
::
new
(
AtomicBool
::
new
(
false
));
let
drop_called
=
Arc
::
new
(
AtomicBool
::
new
(
false
));
let
filtered_kvs
=
Arc
::
new
(
RwLock
::
new
(
vec!
[]));
let
filtered_kvs
=
Arc
::
new
(
RwLock
::
new
(
vec!
[]));
// set ignore_snapshots to false
// set ignore_snapshots to false
opts
.set_compaction_filter
(
"test"
,
cf_
opts
.set_compaction_filter
(
"test"
,
false
,
false
,
Box
::
new
(
Filter
{
Box
::
new
(
Filter
{
drop_called
:
drop_called
.clone
(),
drop_called
:
drop_called
.clone
(),
filtered_kvs
:
filtered_kvs
.clone
(),
filtered_kvs
:
filtered_kvs
.clone
(),
}))
}))
.unwrap
();
.unwrap
();
let
mut
opts
=
DBOptions
::
new
();
opts
.create_if_missing
(
true
);
opts
.create_if_missing
(
true
);
let
db
=
DB
::
open
(
opts
,
path
.path
()
.to_str
()
.unwrap
())
.unwrap
();
let
db
=
DB
::
open_cf
(
opts
,
path
.path
()
.to_str
()
.unwrap
(),
vec!
[
"default"
],
vec!
[
cf_opts
])
.unwrap
();
let
samples
=
vec!
[(
b
"key1"
.to_vec
(),
b
"value1"
.to_vec
()),
let
samples
=
vec!
[(
b
"key1"
.to_vec
(),
b
"value1"
.to_vec
()),
(
b
"key2"
.to_vec
(),
b
"value2"
.to_vec
())];
(
b
"key2"
.to_vec
(),
b
"value2"
.to_vec
())];
for
&
(
ref
k
,
ref
v
)
in
&
samples
{
for
&
(
ref
k
,
ref
v
)
in
&
samples
{
...
@@ -71,10 +76,10 @@ fn test_compaction_filter() {
...
@@ -71,10 +76,10 @@ fn test_compaction_filter() {
}
}
drop
(
db
);
drop
(
db
);
// reregister with ignore_snapshots set to true
// reregister with ignore_snapshots set to true
let
mut
opts
=
Options
::
new
();
let
mut
cf_opts
=
ColumnFamilyOptions
::
new
();
opts
.set_compaction_filter
(
"test"
,
let
opts
=
DBOptions
::
new
();
cf_opts
.set_compaction_filter
(
"test"
,
true
,
true
,
Box
::
new
(
Filter
{
Box
::
new
(
Filter
{
drop_called
:
drop_called
.clone
(),
drop_called
:
drop_called
.clone
(),
...
@@ -84,7 +89,11 @@ fn test_compaction_filter() {
...
@@ -84,7 +89,11 @@ fn test_compaction_filter() {
assert
!
(
drop_called
.load
(
Ordering
::
Relaxed
));
assert
!
(
drop_called
.load
(
Ordering
::
Relaxed
));
drop_called
.store
(
false
,
Ordering
::
Relaxed
);
drop_called
.store
(
false
,
Ordering
::
Relaxed
);
{
{
let
db
=
DB
::
open
(
opts
,
path
.path
()
.to_str
()
.unwrap
())
.unwrap
();
let
db
=
DB
::
open_cf
(
opts
,
path
.path
()
.to_str
()
.unwrap
(),
vec!
[
"default"
],
vec!
[
cf_opts
])
.unwrap
();
let
_snap
=
db
.snapshot
();
let
_snap
=
db
.snapshot
();
// Because ignore_snapshots is true, so all the keys will be compacted.
// Because ignore_snapshots is true, so all the keys will be compacted.
db
.compact_range
(
Some
(
b
"key1"
),
Some
(
b
"key3"
));
db
.compact_range
(
Some
(
b
"key1"
),
Some
(
b
"key3"
));
...
@@ -93,6 +102,5 @@ fn test_compaction_filter() {
...
@@ -93,6 +102,5 @@ fn test_compaction_filter() {
}
}
assert_eq!
(
*
filtered_kvs
.read
()
.unwrap
(),
samples
);
assert_eq!
(
*
filtered_kvs
.read
()
.unwrap
(),
samples
);
}
}
assert
!
(
drop_called
.load
(
Ordering
::
Relaxed
));
assert
!
(
drop_called
.load
(
Ordering
::
Relaxed
));
}
}
tests/test_delete_range.rs
View file @
130c764e
...
@@ -15,7 +15,7 @@ use rocksdb::*;
...
@@ -15,7 +15,7 @@ use rocksdb::*;
use
std
::
fs
;
use
std
::
fs
;
use
tempdir
::
TempDir
;
use
tempdir
::
TempDir
;
fn
gen_sst
(
opt
:
Options
,
cf
:
Option
<&
CFHandle
>
,
path
:
&
str
)
{
fn
gen_sst
(
opt
:
ColumnFamily
Options
,
cf
:
Option
<&
CFHandle
>
,
path
:
&
str
)
{
let
_
=
fs
::
remove_file
(
path
);
let
_
=
fs
::
remove_file
(
path
);
let
env_opt
=
EnvOptions
::
new
();
let
env_opt
=
EnvOptions
::
new
();
let
mut
writer
=
if
cf
.is_some
()
{
let
mut
writer
=
if
cf
.is_some
()
{
...
@@ -157,7 +157,7 @@ fn test_delete_range_sst_files() {
...
@@ -157,7 +157,7 @@ fn test_delete_range_sst_files() {
fn
test_delete_range_ingest_file
()
{
fn
test_delete_range_ingest_file
()
{
let
path
=
TempDir
::
new
(
"_rust_rocksdb_test_delete_range_ingest_file"
)
.expect
(
""
);
let
path
=
TempDir
::
new
(
"_rust_rocksdb_test_delete_range_ingest_file"
)
.expect
(
""
);
let
path_str
=
path
.path
()
.to_str
()
.unwrap
();
let
path_str
=
path
.path
()
.to_str
()
.unwrap
();
let
mut
opts
=
Options
::
new
();
let
mut
opts
=
DB
Options
::
new
();
opts
.create_if_missing
(
true
);
opts
.create_if_missing
(
true
);
let
mut
db
=
DB
::
open
(
opts
,
path_str
)
.unwrap
();
let
mut
db
=
DB
::
open
(
opts
,
path_str
)
.unwrap
();
let
gen_path
=
TempDir
::
new
(
"_rust_rocksdb_ingest_sst_gen"
)
.expect
(
""
);
let
gen_path
=
TempDir
::
new
(
"_rust_rocksdb_ingest_sst_gen"
)
.expect
(
""
);
...
@@ -183,10 +183,10 @@ fn test_delete_range_ingest_file() {
...
@@ -183,10 +183,10 @@ fn test_delete_range_ingest_file() {
db
.cf_handle
(
"default"
),
db
.cf_handle
(
"default"
),
&
[(
b
"key1"
,
None
),
(
b
"key2"
,
None
),
(
b
"key3"
,
None
),
(
b
"key4"
,
Some
(
b
"value4"
))]);
&
[(
b
"key1"
,
None
),
(
b
"key2"
,
None
),
(
b
"key3"
,
None
),
(
b
"key4"
,
Some
(
b
"value4"
))]);
let
cf_opts
=
Options
::
new
();
let
cf_opts
=
ColumnFamily
Options
::
new
();
db
.create_cf
(
"cf1"
,
&
cf_opts
)
.unwrap
();
db
.create_cf
(
"cf1"
,
cf_opts
)
.unwrap
();
let
handle
=
db
.cf_handle
(
"cf1"
)
.unwrap
();
let
handle
=
db
.cf_handle
(
"cf1"
)
.unwrap
();
gen_sst
(
cf_opts
,
None
,
test_sstfile_str
);
gen_sst
(
ColumnFamilyOptions
::
new
()
,
None
,
test_sstfile_str
);
db
.ingest_external_file_cf
(
handle
,
&
ingest_opt
,
&
[
test_sstfile_str
])
db
.ingest_external_file_cf
(
handle
,
&
ingest_opt
,
&
[
test_sstfile_str
])
.unwrap
();
.unwrap
();
...
...
tests/test_event_listener.rs
View file @
130c764e
...
@@ -77,7 +77,7 @@ fn test_event_listener_basic() {
...
@@ -77,7 +77,7 @@ fn test_event_listener_basic() {
let
path
=
TempDir
::
new
(
"_rust_rocksdb_event_listener_flush"
)
.expect
(
""
);
let
path
=
TempDir
::
new
(
"_rust_rocksdb_event_listener_flush"
)
.expect
(
""
);
let
path_str
=
path
.path
()
.to_str
()
.unwrap
();
let
path_str
=
path
.path
()
.to_str
()
.unwrap
();
let
mut
opts
=
Options
::
new
();
let
mut
opts
=
DB
Options
::
new
();
let
counter
=
EventCounter
::
default
();
let
counter
=
EventCounter
::
default
();
opts
.add_event_listener
(
counter
.clone
());
opts
.add_event_listener
(
counter
.clone
());
opts
.create_if_missing
(
true
);
opts
.create_if_missing
(
true
);
...
@@ -111,7 +111,7 @@ fn test_event_listener_ingestion() {
...
@@ -111,7 +111,7 @@ fn test_event_listener_ingestion() {
let
path
=
TempDir
::
new
(
"_rust_rocksdb_event_listener_ingestion"
)
.expect
(
""
);
let
path
=
TempDir
::
new
(
"_rust_rocksdb_event_listener_ingestion"
)
.expect
(
""
);
let
path_str
=
path
.path
()
.to_str
()
.unwrap
();
let
path_str
=
path
.path
()
.to_str
()
.unwrap
();
let
mut
opts
=
Options
::
new
();
let
mut
opts
=
DB
Options
::
new
();
let
counter
=
EventCounter
::
default
();
let
counter
=
EventCounter
::
default
();
opts
.add_event_listener
(
counter
.clone
());
opts
.add_event_listener
(
counter
.clone
());
opts
.create_if_missing
(
true
);
opts
.create_if_missing
(
true
);
...
...
tests/test_ingest_external_file.rs
View file @
130c764e
...
@@ -16,7 +16,10 @@ use rocksdb::*;
...
@@ -16,7 +16,10 @@ use rocksdb::*;
use
std
::
fs
;
use
std
::
fs
;
use
tempdir
::
TempDir
;
use
tempdir
::
TempDir
;
pub
fn
gen_sst
(
opt
:
Options
,
cf
:
Option
<&
CFHandle
>
,
path
:
&
str
,
data
:
&
[(
&
[
u8
],
&
[
u8
])])
{
pub
fn
gen_sst
(
opt
:
ColumnFamilyOptions
,
cf
:
Option
<&
CFHandle
>
,
path
:
&
str
,
data
:
&
[(
&
[
u8
],
&
[
u8
])])
{
let
_
=
fs
::
remove_file
(
path
);
let
_
=
fs
::
remove_file
(
path
);
let
env_opt
=
EnvOptions
::
new
();
let
env_opt
=
EnvOptions
::
new
();
let
mut
writer
=
if
cf
.is_some
()
{
let
mut
writer
=
if
cf
.is_some
()
{
...
@@ -36,11 +39,11 @@ fn test_ingest_external_file() {
...
@@ -36,11 +39,11 @@ fn test_ingest_external_file() {
let
path
=
TempDir
::
new
(
"_rust_rocksdb_ingest_sst"
)
.expect
(
""
);
let
path
=
TempDir
::
new
(
"_rust_rocksdb_ingest_sst"
)
.expect
(
""
);
let
path_str
=
path
.path
()
.to_str
()
.unwrap
();
let
path_str
=
path
.path
()
.to_str
()
.unwrap
();
let
mut
opts
=
Options
::
new
();
let
mut
opts
=
DB
Options
::
new
();
opts
.create_if_missing
(
true
);
opts
.create_if_missing
(
true
);
let
mut
db
=
DB
::
open
(
opts
,
path_str
)
.unwrap
();
let
mut
db
=
DB
::
open
(
opts
,
path_str
)
.unwrap
();
let
cf_opts
=
Options
::
new
();
let
cf_opts
=
ColumnFamily
Options
::
new
();
db
.create_cf
(
"cf1"
,
&
cf_opts
)
.unwrap
();
db
.create_cf
(
"cf1"
,
cf_opts
)
.unwrap
();
let
handle
=
db
.cf_handle
(
"cf1"
)
.unwrap
();
let
handle
=
db
.cf_handle
(
"cf1"
)
.unwrap
();
let
gen_path
=
TempDir
::
new
(
"_rust_rocksdb_ingest_sst_gen"
)
.expect
(
""
);
let
gen_path
=
TempDir
::
new
(
"_rust_rocksdb_ingest_sst_gen"
)
.expect
(
""
);
...
@@ -60,7 +63,7 @@ fn test_ingest_external_file() {
...
@@ -60,7 +63,7 @@ fn test_ingest_external_file() {
assert_eq!
(
db
.get
(
b
"k1"
)
.unwrap
()
.unwrap
(),
b
"v1"
);
assert_eq!
(
db
.get
(
b
"k1"
)
.unwrap
()
.unwrap
(),
b
"v1"
);
assert_eq!
(
db
.get
(
b
"k2"
)
.unwrap
()
.unwrap
(),
b
"v2"
);
assert_eq!
(
db
.get
(
b
"k2"
)
.unwrap
()
.unwrap
(),
b
"v2"
);
gen_sst
(
cf_opts
,
gen_sst
(
ColumnFamilyOptions
::
new
()
,
None
,
None
,
test_sstfile_str
,
test_sstfile_str
,
&
[(
b
"k1"
,
b
"v3"
),
(
b
"k2"
,
b
"v4"
)]);
&
[(
b
"k1"
,
b
"v3"
),
(
b
"k2"
,
b
"v4"
)]);
...
@@ -71,8 +74,7 @@ fn test_ingest_external_file() {
...
@@ -71,8 +74,7 @@ fn test_ingest_external_file() {
let
snap
=
db
.snapshot
();
let
snap
=
db
.snapshot
();
let
opt
=
Options
::
new
();
gen_sst
(
ColumnFamilyOptions
::
new
(),
gen_sst
(
opt
,
None
,
None
,
test_sstfile_str
,
test_sstfile_str
,
&
[(
b
"k2"
,
b
"v5"
),
(
b
"k3"
,
b
"v6"
)]);
&
[(
b
"k2"
,
b
"v5"
),
(
b
"k3"
,
b
"v6"
)]);
...
@@ -86,4 +88,4 @@ fn test_ingest_external_file() {
...
@@ -86,4 +88,4 @@ fn test_ingest_external_file() {
assert_eq!
(
snap
.get_cf
(
handle
,
b
"k1"
)
.unwrap
()
.unwrap
(),
b
"v3"
);
assert_eq!
(
snap
.get_cf
(
handle
,
b
"k1"
)
.unwrap
()
.unwrap
(),
b
"v3"
);
assert_eq!
(
snap
.get_cf
(
handle
,
b
"k2"
)
.unwrap
()
.unwrap
(),
b
"v4"
);
assert_eq!
(
snap
.get_cf
(
handle
,
b
"k2"
)
.unwrap
()
.unwrap
(),
b
"v4"
);
assert
!
(
snap
.get_cf
(
handle
,
b
"k3"
)
.unwrap
()
.is_none
());
assert
!
(
snap
.get_cf
(
handle
,
b
"k3"
)
.unwrap
()
.is_none
());
}
}
\ No newline at end of file
tests/test_iterator.rs
View file @
130c764e
...
@@ -165,7 +165,7 @@ pub fn test_iterator() {
...
@@ -165,7 +165,7 @@ pub fn test_iterator() {
#[test]
#[test]
fn
test_seek_for_prev
()
{
fn
test_seek_for_prev
()
{
let
path
=
TempDir
::
new
(
"_rust_rocksdb_seek_for_prev"
)
.expect
(
""
);
let
path
=
TempDir
::
new
(
"_rust_rocksdb_seek_for_prev"
)
.expect
(
""
);
let
mut
opts
=
Options
::
new
();
let
mut
opts
=
DB
Options
::
new
();
opts
.create_if_missing
(
true
);
opts
.create_if_missing
(
true
);
{
{
let
db
=
DB
::
open
(
opts
,
path
.path
()
.to_str
()
.unwrap
())
.unwrap
();
let
db
=
DB
::
open
(
opts
,
path
.path
()
.to_str
()
.unwrap
())
.unwrap
();
...
@@ -213,7 +213,7 @@ fn test_seek_for_prev() {
...
@@ -213,7 +213,7 @@ fn test_seek_for_prev() {
#[test]
#[test]
fn
read_with_upper_bound
()
{
fn
read_with_upper_bound
()
{
let
path
=
TempDir
::
new
(
"_rust_rocksdb_read_with_upper_bound_test"
)
.expect
(
""
);
let
path
=
TempDir
::
new
(
"_rust_rocksdb_read_with_upper_bound_test"
)
.expect
(
""
);
let
mut
opts
=
Options
::
new
();
let
mut
opts
=
DB
Options
::
new
();
opts
.create_if_missing
(
true
);
opts
.create_if_missing
(
true
);
{
{
let
db
=
DB
::
open
(
opts
,
path
.path
()
.to_str
()
.unwrap
())
.unwrap
();
let
db
=
DB
::
open
(
opts
,
path
.path
()
.to_str
()
.unwrap
())
.unwrap
();
...
@@ -237,18 +237,23 @@ fn test_total_order_seek() {
...
@@ -237,18 +237,23 @@ fn test_total_order_seek() {
let
mut
bbto
=
BlockBasedOptions
::
new
();
let
mut
bbto
=
BlockBasedOptions
::
new
();
bbto
.set_bloom_filter
(
10
,
false
);
bbto
.set_bloom_filter
(
10
,
false
);
bbto
.set_whole_key_filtering
(
false
);
bbto
.set_whole_key_filtering
(
false
);
let
mut
opts
=
Options
::
new
();
let
mut
cf_opts
=
ColumnFamilyOptions
::
new
();
let
mut
opts
=
DBOptions
::
new
();
opts
.create_if_missing
(
true
);
opts
.create_if_missing
(
true
);
opts
.set_block_based_table_factory
(
&
bbto
);
cf_
opts
.set_block_based_table_factory
(
&
bbto
);
opts
.set_prefix_extractor
(
"FixedPrefixTransform"
,
cf_
opts
.set_prefix_extractor
(
"FixedPrefixTransform"
,
Box
::
new
(
FixedPrefixTransform
{
prefix_len
:
2
}))
Box
::
new
(
FixedPrefixTransform
{
prefix_len
:
2
}))
.unwrap
();
.unwrap
();
// also create prefix bloom for memtable
// also create prefix bloom for memtable
opts
.set_memtable_prefix_bloom_size_ratio
(
0.1
as
f64
);
cf_
opts
.set_memtable_prefix_bloom_size_ratio
(
0.1
as
f64
);
let
keys
=
vec!
[
b
"k1-1"
,
b
"k1-2"
,
b
"k1-3"
,
b
"k2-1"
,
b
"k2-2"
,
b
"k2-3"
,
b
"k3-1"
,
b
"k3-2"
,
let
keys
=
vec!
[
b
"k1-1"
,
b
"k1-2"
,
b
"k1-3"
,
b
"k2-1"
,
b
"k2-2"
,
b
"k2-3"
,
b
"k3-1"
,
b
"k3-2"
,
b
"k3-3"
];
b
"k3-3"
];
let
db
=
DB
::
open
(
opts
,
path
.path
()
.to_str
()
.unwrap
())
.unwrap
();
let
db
=
DB
::
open_cf
(
opts
,
path
.path
()
.to_str
()
.unwrap
(),
vec!
[
"default"
],
vec!
[
cf_opts
])
.unwrap
();
let
wopts
=
WriteOptions
::
new
();
let
wopts
=
WriteOptions
::
new
();
// sst1
// sst1
...
@@ -315,14 +320,19 @@ fn test_fixed_suffix_seek() {
...
@@ -315,14 +320,19 @@ fn test_fixed_suffix_seek() {
let
mut
bbto
=
BlockBasedOptions
::
new
();
let
mut
bbto
=
BlockBasedOptions
::
new
();
bbto
.set_bloom_filter
(
10
,
false
);
bbto
.set_bloom_filter
(
10
,
false
);
bbto
.set_whole_key_filtering
(
false
);
bbto
.set_whole_key_filtering
(
false
);
let
mut
opts
=
Options
::
new
();
let
mut
opts
=
DBOptions
::
new
();
let
mut
cf_opts
=
ColumnFamilyOptions
::
new
();
opts
.create_if_missing
(
true
);
opts
.create_if_missing
(
true
);
opts
.set_block_based_table_factory
(
&
bbto
);
cf_
opts
.set_block_based_table_factory
(
&
bbto
);
opts
.set_prefix_extractor
(
"FixedSuffixTransform"
,
cf_
opts
.set_prefix_extractor
(
"FixedSuffixTransform"
,
Box
::
new
(
FixedSuffixTransform
{
suffix_len
:
2
}))
Box
::
new
(
FixedSuffixTransform
{
suffix_len
:
2
}))
.unwrap
();
.unwrap
();
let
db
=
DB
::
open
(
opts
,
path
.path
()
.to_str
()
.unwrap
())
.unwrap
();
let
db
=
DB
::
open_cf
(
opts
,
path
.path
()
.to_str
()
.unwrap
(),
vec!
[
"default"
],
vec!
[
cf_opts
])
.unwrap
();
db
.put
(
b
"k-eghe-5"
,
b
"a"
)
.unwrap
();
db
.put
(
b
"k-eghe-5"
,
b
"a"
)
.unwrap
();
db
.put
(
b
"k-24yfae-6"
,
b
"a"
)
.unwrap
();
db
.put
(
b
"k-24yfae-6"
,
b
"a"
)
.unwrap
();
db
.put
(
b
"k-h1fwd-7"
,
b
"a"
)
.unwrap
();
db
.put
(
b
"k-h1fwd-7"
,
b
"a"
)
.unwrap
();
...
...
tests/test_prefix_extractor.rs
View file @
130c764e
...
@@ -36,7 +36,7 @@ fn test_prefix_extractor_compatibility() {
...
@@ -36,7 +36,7 @@ fn test_prefix_extractor_compatibility() {
// create db with no prefix extractor, and insert data
// create db with no prefix extractor, and insert data
{
{
let
mut
opts
=
Options
::
new
();
let
mut
opts
=
DB
Options
::
new
();
opts
.create_if_missing
(
true
);
opts
.create_if_missing
(
true
);
let
db
=
DB
::
open
(
opts
,
path
.path
()
.to_str
()
.unwrap
())
.unwrap
();
let
db
=
DB
::
open
(
opts
,
path
.path
()
.to_str
()
.unwrap
())
.unwrap
();
let
wopts
=
WriteOptions
::
new
();
let
wopts
=
WriteOptions
::
new
();
...
@@ -53,15 +53,20 @@ fn test_prefix_extractor_compatibility() {
...
@@ -53,15 +53,20 @@ fn test_prefix_extractor_compatibility() {
let
mut
bbto
=
BlockBasedOptions
::
new
();
let
mut
bbto
=
BlockBasedOptions
::
new
();
bbto
.set_bloom_filter
(
10
,
false
);
bbto
.set_bloom_filter
(
10
,
false
);
bbto
.set_whole_key_filtering
(
false
);
bbto
.set_whole_key_filtering
(
false
);
let
mut
opts
=
Options
::
new
();
let
mut
opts
=
DBOptions
::
new
();
let
mut
cf_opts
=
ColumnFamilyOptions
::
new
();
opts
.create_if_missing
(
false
);
opts
.create_if_missing
(
false
);
opts
.set_block_based_table_factory
(
&
bbto
);
cf_
opts
.set_block_based_table_factory
(
&
bbto
);
opts
.set_prefix_extractor
(
"FixedPrefixTransform"
,
cf_
opts
.set_prefix_extractor
(
"FixedPrefixTransform"
,
Box
::
new
(
FixedPrefixTransform
{
prefix_len
:
2
}))
Box
::
new
(
FixedPrefixTransform
{
prefix_len
:
2
}))
.unwrap
();
.unwrap
();
// also create prefix bloom for memtable
// also create prefix bloom for memtable
opts
.set_memtable_prefix_bloom_size_ratio
(
0.1
as
f64
);
cf_opts
.set_memtable_prefix_bloom_size_ratio
(
0.1
as
f64
);
let
db
=
DB
::
open
(
opts
,
path
.path
()
.to_str
()
.unwrap
())
.unwrap
();
let
db
=
DB
::
open_cf
(
opts
,
path
.path
()
.to_str
()
.unwrap
(),
vec!
[
"default"
],
vec!
[
cf_opts
])
.unwrap
();
let
wopts
=
WriteOptions
::
new
();
let
wopts
=
WriteOptions
::
new
();
// sst2 with prefix bloom.
// sst2 with prefix bloom.
...
...
tests/test_rocksdb_options.rs
View file @
130c764e
...
@@ -11,8 +11,8 @@
...
@@ -11,8 +11,8 @@
// See the License for the specific language governing permissions and
// See the License for the specific language governing permissions and
// limitations under the License.
// limitations under the License.
use
rocksdb
::{
DB
,
Options
,
BlockBasedOptions
,
WriteOptions
,
SliceTransform
,
Writable
,
use
rocksdb
::{
DB
,
ColumnFamilyOptions
,
DBOptions
,
BlockBasedOptions
,
WriteOptions
,
SliceTransform
,
CompactOptions
};
Writable
,
CompactOptions
};
use
rocksdb
::
crocksdb_ffi
::{
DBStatisticsHistogramType
as
HistogramType
,
use
rocksdb
::
crocksdb_ffi
::{
DBStatisticsHistogramType
as
HistogramType
,
DBStatisticsTickerType
as
TickerType
,
DBInfoLogLevel
as
InfoLogLevel
,
DBStatisticsTickerType
as
TickerType
,
DBInfoLogLevel
as
InfoLogLevel
,
CompactionPriority
,
DBCompressionType
};
CompactionPriority
,
DBCompressionType
};
...
@@ -25,17 +25,22 @@ use tempdir::TempDir;
...
@@ -25,17 +25,22 @@ use tempdir::TempDir;
#[test]
#[test]
fn
test_set_num_levels
()
{
fn
test_set_num_levels
()
{
let
path
=
TempDir
::
new
(
"_rust_rocksdb_test_set_num_levels"
)
.expect
(
""
);
let
path
=
TempDir
::
new
(
"_rust_rocksdb_test_set_num_levels"
)
.expect
(
""
);
let
mut
opts
=
Options
::
new
();
let
mut
opts
=
DBOptions
::
new
();
let
mut
cf_opts
=
ColumnFamilyOptions
::
new
();
opts
.create_if_missing
(
true
);
opts
.create_if_missing
(
true
);
opts
.set_num_levels
(
2
);
cf_opts
.set_num_levels
(
2
);
let
db
=
DB
::
open
(
opts
,
path
.path
()
.to_str
()
.unwrap
())
.unwrap
();
let
db
=
DB
::
open_cf
(
opts
,
path
.path
()
.to_str
()
.unwrap
(),
vec!
[
"default"
],
vec!
[
cf_opts
])
.unwrap
();
drop
(
db
);
drop
(
db
);
}
}
#[test]
#[test]
fn
test_log_file_opt
()
{
fn
test_log_file_opt
()
{
let
path
=
TempDir
::
new
(
"_rust_rocksdb_log_file_opt"
)
.expect
(
""
);
let
path
=
TempDir
::
new
(
"_rust_rocksdb_log_file_opt"
)
.expect
(
""
);
let
mut
opts
=
Options
::
new
();
let
mut
opts
=
DB
Options
::
new
();
opts
.create_if_missing
(
true
);
opts
.create_if_missing
(
true
);
opts
.set_max_log_file_size
(
100
*
1024
*
1024
);
opts
.set_max_log_file_size
(
100
*
1024
*
1024
);
opts
.set_keep_log_file_num
(
10
);
opts
.set_keep_log_file_num
(
10
);
...
@@ -46,7 +51,7 @@ fn test_log_file_opt() {
...
@@ -46,7 +51,7 @@ fn test_log_file_opt() {
#[test]
#[test]
fn
test_compaction_readahead_size
()
{
fn
test_compaction_readahead_size
()
{
let
path
=
TempDir
::
new
(
"_rust_rocksdb_compaction_readahead_size"
)
.expect
(
""
);
let
path
=
TempDir
::
new
(
"_rust_rocksdb_compaction_readahead_size"
)
.expect
(
""
);
let
mut
opts
=
Options
::
new
();
let
mut
opts
=
DB
Options
::
new
();
opts
.create_if_missing
(
true
);
opts
.create_if_missing
(
true
);
opts
.set_compaction_readahead_size
(
2048
);
opts
.set_compaction_readahead_size
(
2048
);
let
db
=
DB
::
open
(
opts
,
path
.path
()
.to_str
()
.unwrap
())
.unwrap
();
let
db
=
DB
::
open
(
opts
,
path
.path
()
.to_str
()
.unwrap
())
.unwrap
();
...
@@ -55,14 +60,14 @@ fn test_compaction_readahead_size() {
...
@@ -55,14 +60,14 @@ fn test_compaction_readahead_size() {
#[test]
#[test]
fn
test_set_max_manifest_file_size
()
{
fn
test_set_max_manifest_file_size
()
{
let
mut
opts
=
Options
::
new
();
let
mut
opts
=
DB
Options
::
new
();
let
size
=
20
*
1024
*
1024
;
let
size
=
20
*
1024
*
1024
;
opts
.set_max_manifest_file_size
(
size
)
opts
.set_max_manifest_file_size
(
size
)
}
}
#[test]
#[test]
fn
test_enable_statistics
()
{
fn
test_enable_statistics
()
{
let
mut
opts
=
Options
::
new
();
let
mut
opts
=
DB
Options
::
new
();
opts
.enable_statistics
();
opts
.enable_statistics
();
opts
.set_stats_dump_period_sec
(
60
);
opts
.set_stats_dump_period_sec
(
60
);
assert
!
(
opts
.get_statistics
()
.is_some
());
assert
!
(
opts
.get_statistics
()
.is_some
());
...
@@ -75,7 +80,7 @@ fn test_enable_statistics() {
...
@@ -75,7 +80,7 @@ fn test_enable_statistics() {
assert_eq!
(
opts
.get_statistics_ticker_count
(
TickerType
::
BlockCacheMiss
),
assert_eq!
(
opts
.get_statistics_ticker_count
(
TickerType
::
BlockCacheMiss
),
0
);
0
);
let
opts
=
Options
::
new
();
let
opts
=
DB
Options
::
new
();
assert
!
(
opts
.get_statistics
()
.is_none
());
assert
!
(
opts
.get_statistics
()
.is_none
());
}
}
...
@@ -96,14 +101,19 @@ impl SliceTransform for FixedPrefixTransform {
...
@@ -96,14 +101,19 @@ impl SliceTransform for FixedPrefixTransform {
#[test]
#[test]
fn
test_memtable_insert_hint_prefix_extractor
()
{
fn
test_memtable_insert_hint_prefix_extractor
()
{
let
path
=
TempDir
::
new
(
"_rust_rocksdb_memtable_insert_hint_prefix_extractor"
)
.expect
(
""
);
let
path
=
TempDir
::
new
(
"_rust_rocksdb_memtable_insert_hint_prefix_extractor"
)
.expect
(
""
);
let
mut
opts
=
Options
::
new
();
let
mut
opts
=
DBOptions
::
new
();
let
mut
cf_opts
=
ColumnFamilyOptions
::
new
();
opts
.create_if_missing
(
true
);
opts
.create_if_missing
(
true
);
opts
.set_memtable_insert_hint_prefix_extractor
(
"FixedPrefixTransform"
,
cf_
opts
.set_memtable_insert_hint_prefix_extractor
(
"FixedPrefixTransform"
,
Box
::
new
(
FixedPrefixTransform
{
Box
::
new
(
FixedPrefixTransform
{
prefix_len
:
2
,
prefix_len
:
2
,
}))
}))
.unwrap
();
.unwrap
();
let
db
=
DB
::
open
(
opts
,
path
.path
()
.to_str
()
.unwrap
())
.unwrap
();
let
db
=
DB
::
open_cf
(
opts
,
path
.path
()
.to_str
()
.unwrap
(),
vec!
[
"default"
],
vec!
[
cf_opts
])
.unwrap
();
let
wopts
=
WriteOptions
::
new
();
let
wopts
=
WriteOptions
::
new
();
db
.put_opt
(
b
"k0-1"
,
b
"a"
,
&
wopts
)
.unwrap
();
db
.put_opt
(
b
"k0-1"
,
b
"a"
,
&
wopts
)
.unwrap
();
...
@@ -117,7 +127,7 @@ fn test_memtable_insert_hint_prefix_extractor() {
...
@@ -117,7 +127,7 @@ fn test_memtable_insert_hint_prefix_extractor() {
#[test]
#[test]
fn
test_set_delayed_write_rate
()
{
fn
test_set_delayed_write_rate
()
{
let
path
=
TempDir
::
new
(
"_rust_rocksdb_test_set_delayed_write_rate"
)
.expect
(
""
);
let
path
=
TempDir
::
new
(
"_rust_rocksdb_test_set_delayed_write_rate"
)
.expect
(
""
);
let
mut
opts
=
Options
::
new
();
let
mut
opts
=
DB
Options
::
new
();
opts
.create_if_missing
(
true
);
opts
.create_if_missing
(
true
);
opts
.set_delayed_write_rate
(
2
*
1024
*
1024
);
opts
.set_delayed_write_rate
(
2
*
1024
*
1024
);
let
db
=
DB
::
open
(
opts
,
path
.path
()
.to_str
()
.unwrap
())
.unwrap
();
let
db
=
DB
::
open
(
opts
,
path
.path
()
.to_str
()
.unwrap
())
.unwrap
();
...
@@ -127,7 +137,7 @@ fn test_set_delayed_write_rate() {
...
@@ -127,7 +137,7 @@ fn test_set_delayed_write_rate() {
#[test]
#[test]
fn
test_set_ratelimiter
()
{
fn
test_set_ratelimiter
()
{
let
path
=
TempDir
::
new
(
"_rust_rocksdb_test_set_rate_limiter"
)
.expect
(
""
);
let
path
=
TempDir
::
new
(
"_rust_rocksdb_test_set_rate_limiter"
)
.expect
(
""
);
let
mut
opts
=
Options
::
new
();
let
mut
opts
=
DB
Options
::
new
();
opts
.create_if_missing
(
true
);
opts
.create_if_missing
(
true
);
// compaction and flush rate limited below 100MB/sec
// compaction and flush rate limited below 100MB/sec
opts
.set_ratelimiter
(
100
*
1024
*
1024
);
opts
.set_ratelimiter
(
100
*
1024
*
1024
);
...
@@ -138,7 +148,7 @@ fn test_set_ratelimiter() {
...
@@ -138,7 +148,7 @@ fn test_set_ratelimiter() {
#[test]
#[test]
fn
test_set_wal_opt
()
{
fn
test_set_wal_opt
()
{
let
path
=
TempDir
::
new
(
"_rust_rocksdb_test_set_wal_opt"
)
.expect
(
""
);
let
path
=
TempDir
::
new
(
"_rust_rocksdb_test_set_wal_opt"
)
.expect
(
""
);
let
mut
opts
=
Options
::
new
();
let
mut
opts
=
DB
Options
::
new
();
opts
.create_if_missing
(
true
);
opts
.create_if_missing
(
true
);
opts
.set_wal_ttl_seconds
(
86400
);
opts
.set_wal_ttl_seconds
(
86400
);
opts
.set_wal_size_limit_mb
(
10
);
opts
.set_wal_size_limit_mb
(
10
);
...
@@ -152,7 +162,7 @@ fn test_set_wal_opt() {
...
@@ -152,7 +162,7 @@ fn test_set_wal_opt() {
#[test]
#[test]
fn
test_sync_wal
()
{
fn
test_sync_wal
()
{
let
path
=
TempDir
::
new
(
"_rust_rocksdb_test_sync_wal"
)
.expect
(
""
);
let
path
=
TempDir
::
new
(
"_rust_rocksdb_test_sync_wal"
)
.expect
(
""
);
let
mut
opts
=
Options
::
new
();
let
mut
opts
=
DB
Options
::
new
();
opts
.create_if_missing
(
true
);
opts
.create_if_missing
(
true
);
let
db
=
DB
::
open
(
opts
,
path
.path
()
.to_str
()
.unwrap
())
.unwrap
();
let
db
=
DB
::
open
(
opts
,
path
.path
()
.to_str
()
.unwrap
())
.unwrap
();
db
.put
(
b
"key"
,
b
"value"
)
.unwrap
();
db
.put
(
b
"key"
,
b
"value"
)
.unwrap
();
...
@@ -163,7 +173,7 @@ fn test_sync_wal() {
...
@@ -163,7 +173,7 @@ fn test_sync_wal() {
#[test]
#[test]
fn
test_create_info_log
()
{
fn
test_create_info_log
()
{
let
path
=
TempDir
::
new
(
"_rust_rocksdb_test_create_info_log_opt"
)
.expect
(
""
);
let
path
=
TempDir
::
new
(
"_rust_rocksdb_test_create_info_log_opt"
)
.expect
(
""
);
let
mut
opts
=
Options
::
new
();
let
mut
opts
=
DB
Options
::
new
();
opts
.create_if_missing
(
true
);
opts
.create_if_missing
(
true
);
opts
.set_info_log_level
(
InfoLogLevel
::
Debug
);
opts
.set_info_log_level
(
InfoLogLevel
::
Debug
);
opts
.set_log_file_time_to_roll
(
1
);
opts
.set_log_file_time_to_roll
(
1
);
...
@@ -192,7 +202,7 @@ fn test_create_info_log() {
...
@@ -192,7 +202,7 @@ fn test_create_info_log() {
#[test]
#[test]
fn
test_auto_roll_max_size_info_log
()
{
fn
test_auto_roll_max_size_info_log
()
{
let
path
=
TempDir
::
new
(
"_rust_rocksdb_test_max_size_info_log_opt"
)
.expect
(
""
);
let
path
=
TempDir
::
new
(
"_rust_rocksdb_test_max_size_info_log_opt"
)
.expect
(
""
);
let
mut
opts
=
Options
::
new
();
let
mut
opts
=
DB
Options
::
new
();
opts
.create_if_missing
(
true
);
opts
.create_if_missing
(
true
);
opts
.set_max_log_file_size
(
10
);
opts
.set_max_log_file_size
(
10
);
...
@@ -213,27 +223,37 @@ fn test_auto_roll_max_size_info_log() {
...
@@ -213,27 +223,37 @@ fn test_auto_roll_max_size_info_log() {
#[test]
#[test]
fn
test_set_pin_l0_filter_and_index_blocks_in_cache
()
{
fn
test_set_pin_l0_filter_and_index_blocks_in_cache
()
{
let
path
=
TempDir
::
new
(
"_rust_rocksdb_set_cache_and_index"
)
.expect
(
""
);
let
path
=
TempDir
::
new
(
"_rust_rocksdb_set_cache_and_index"
)
.expect
(
""
);
let
mut
opts
=
Options
::
new
();
let
mut
opts
=
DBOptions
::
new
();
let
mut
cf_opts
=
ColumnFamilyOptions
::
new
();
opts
.create_if_missing
(
true
);
opts
.create_if_missing
(
true
);
let
mut
block_opts
=
BlockBasedOptions
::
new
();
let
mut
block_opts
=
BlockBasedOptions
::
new
();
block_opts
.set_pin_l0_filter_and_index_blocks_in_cache
(
true
);
block_opts
.set_pin_l0_filter_and_index_blocks_in_cache
(
true
);
opts
.set_block_based_table_factory
(
&
block_opts
);
cf_opts
.set_block_based_table_factory
(
&
block_opts
);
DB
::
open
(
opts
,
path
.path
()
.to_str
()
.unwrap
())
.unwrap
();
DB
::
open_cf
(
opts
,
path
.path
()
.to_str
()
.unwrap
(),
vec!
[
"default"
],
vec!
[
cf_opts
])
.unwrap
();
}
}
#[test]
#[test]
fn
test_pending_compaction_bytes_limit
()
{
fn
test_pending_compaction_bytes_limit
()
{
let
path
=
TempDir
::
new
(
"_rust_rocksdb_pending_compaction_bytes_limit"
)
.expect
(
""
);
let
path
=
TempDir
::
new
(
"_rust_rocksdb_pending_compaction_bytes_limit"
)
.expect
(
""
);
let
mut
opts
=
Options
::
new
();
let
mut
opts
=
DBOptions
::
new
();
let
mut
cf_opts
=
ColumnFamilyOptions
::
new
();
opts
.create_if_missing
(
true
);
opts
.create_if_missing
(
true
);
opts
.set_soft_pending_compaction_bytes_limit
(
64
*
1024
*
1024
*
1024
);
cf_opts
.set_soft_pending_compaction_bytes_limit
(
64
*
1024
*
1024
*
1024
);
opts
.set_hard_pending_compaction_bytes_limit
(
256
*
1024
*
1024
*
1024
);
cf_opts
.set_hard_pending_compaction_bytes_limit
(
256
*
1024
*
1024
*
1024
);
DB
::
open
(
opts
,
path
.path
()
.to_str
()
.unwrap
())
.unwrap
();
DB
::
open_cf
(
opts
,
path
.path
()
.to_str
()
.unwrap
(),
vec!
[
"default"
],
vec!
[
cf_opts
])
.unwrap
();
}
}
#[test]
#[test]
fn
test_set_max_subcompactions
()
{
fn
test_set_max_subcompactions
()
{
let
path
=
TempDir
::
new
(
"_rust_rocksdb_max_subcompactions"
)
.expect
(
""
);
let
path
=
TempDir
::
new
(
"_rust_rocksdb_max_subcompactions"
)
.expect
(
""
);
let
mut
opts
=
Options
::
new
();
let
mut
opts
=
DB
Options
::
new
();
opts
.create_if_missing
(
true
);
opts
.create_if_missing
(
true
);
opts
.set_max_subcompactions
(
4
);
opts
.set_max_subcompactions
(
4
);
DB
::
open
(
opts
,
path
.path
()
.to_str
()
.unwrap
())
.unwrap
();
DB
::
open
(
opts
,
path
.path
()
.to_str
()
.unwrap
())
.unwrap
();
...
@@ -242,7 +262,7 @@ fn test_set_max_subcompactions() {
...
@@ -242,7 +262,7 @@ fn test_set_max_subcompactions() {
#[test]
#[test]
fn
test_set_bytes_per_sync
()
{
fn
test_set_bytes_per_sync
()
{
let
path
=
TempDir
::
new
(
"_rust_rocksdb_bytes_per_sync"
)
.expect
(
""
);
let
path
=
TempDir
::
new
(
"_rust_rocksdb_bytes_per_sync"
)
.expect
(
""
);
let
mut
opts
=
Options
::
new
();
let
mut
opts
=
DB
Options
::
new
();
opts
.create_if_missing
(
true
);
opts
.create_if_missing
(
true
);
opts
.set_bytes_per_sync
(
1024
*
1024
);
opts
.set_bytes_per_sync
(
1024
*
1024
);
opts
.set_wal_bytes_per_sync
(
1024
*
1024
);
opts
.set_wal_bytes_per_sync
(
1024
*
1024
);
...
@@ -252,24 +272,34 @@ fn test_set_bytes_per_sync() {
...
@@ -252,24 +272,34 @@ fn test_set_bytes_per_sync() {
#[test]
#[test]
fn
test_set_optimize_filters_for_hits
()
{
fn
test_set_optimize_filters_for_hits
()
{
let
path
=
TempDir
::
new
(
"_rust_rocksdb_optimize_filters_for_hits"
)
.expect
(
""
);
let
path
=
TempDir
::
new
(
"_rust_rocksdb_optimize_filters_for_hits"
)
.expect
(
""
);
let
mut
opts
=
Options
::
new
();
let
mut
opts
=
DBOptions
::
new
();
let
mut
cf_opts
=
ColumnFamilyOptions
::
new
();
opts
.create_if_missing
(
true
);
opts
.create_if_missing
(
true
);
opts
.set_optimize_filters_for_hits
(
true
);
cf_opts
.set_optimize_filters_for_hits
(
true
);
DB
::
open
(
opts
,
path
.path
()
.to_str
()
.unwrap
())
.unwrap
();
DB
::
open_cf
(
opts
,
path
.path
()
.to_str
()
.unwrap
(),
vec!
[
"default"
],
vec!
[
cf_opts
])
.unwrap
();
}
}
#[test]
#[test]
fn
test_get_block_cache_usage
()
{
fn
test_get_block_cache_usage
()
{
let
path
=
TempDir
::
new
(
"_rust_rocksdb_set_cache_and_index"
)
.expect
(
""
);
let
path
=
TempDir
::
new
(
"_rust_rocksdb_set_cache_and_index"
)
.expect
(
""
);
let
mut
opts
=
Options
::
new
();
let
mut
opts
=
DBOptions
::
new
();
assert_eq!
(
opts
.get_block_cache_usage
(),
0
);
let
mut
cf_opts
=
ColumnFamilyOptions
::
new
();
assert_eq!
(
cf_opts
.get_block_cache_usage
(),
0
);
opts
.create_if_missing
(
true
);
opts
.create_if_missing
(
true
);
let
mut
block_opts
=
BlockBasedOptions
::
new
();
let
mut
block_opts
=
BlockBasedOptions
::
new
();
block_opts
.set_lru_cache
(
16
*
1024
*
1024
);
block_opts
.set_lru_cache
(
16
*
1024
*
1024
);
opts
.set_block_based_table_factory
(
&
block_opts
);
cf_opts
.set_block_based_table_factory
(
&
block_opts
);
let
db
=
DB
::
open
(
opts
,
path
.path
()
.to_str
()
.unwrap
())
.unwrap
();
let
db
=
DB
::
open_cf
(
opts
,
path
.path
()
.to_str
()
.unwrap
(),
vec!
[
"default"
],
vec!
[
cf_opts
])
.unwrap
();
for
i
in
0
..
200
{
for
i
in
0
..
200
{
db
.put
(
format!
(
"k_{}"
,
i
)
.as_bytes
(),
b
"v"
)
.unwrap
();
db
.put
(
format!
(
"k_{}"
,
i
)
.as_bytes
(),
b
"v"
)
.unwrap
();
...
@@ -285,10 +315,15 @@ fn test_get_block_cache_usage() {
...
@@ -285,10 +315,15 @@ fn test_get_block_cache_usage() {
#[test]
#[test]
fn
test_set_level_compaction_dynamic_level_bytes
()
{
fn
test_set_level_compaction_dynamic_level_bytes
()
{
let
path
=
TempDir
::
new
(
"_rust_rocksdb_level_compaction_dynamic_level_bytes"
)
.expect
(
""
);
let
path
=
TempDir
::
new
(
"_rust_rocksdb_level_compaction_dynamic_level_bytes"
)
.expect
(
""
);
let
mut
opts
=
Options
::
new
();
let
mut
opts
=
DBOptions
::
new
();
let
mut
cf_opts
=
ColumnFamilyOptions
::
new
();
opts
.create_if_missing
(
true
);
opts
.create_if_missing
(
true
);
opts
.set_level_compaction_dynamic_level_bytes
(
true
);
cf_opts
.set_level_compaction_dynamic_level_bytes
(
true
);
DB
::
open
(
opts
,
path
.path
()
.to_str
()
.unwrap
())
.unwrap
();
DB
::
open_cf
(
opts
,
path
.path
()
.to_str
()
.unwrap
(),
vec!
[
"default"
],
vec!
[
cf_opts
])
.unwrap
();
}
}
#[test]
#[test]
...
@@ -308,7 +343,7 @@ fn test_compact_options() {
...
@@ -308,7 +343,7 @@ fn test_compact_options() {
#[test]
#[test]
fn
test_direct_read_write
()
{
fn
test_direct_read_write
()
{
let
path
=
TempDir
::
new
(
"_rust_rocksdb_direct_read_write"
)
.expect
(
""
);
let
path
=
TempDir
::
new
(
"_rust_rocksdb_direct_read_write"
)
.expect
(
""
);
let
mut
opts
=
Options
::
new
();
let
mut
opts
=
DB
Options
::
new
();
opts
.create_if_missing
(
true
);
opts
.create_if_missing
(
true
);
opts
.set_use_direct_reads
(
true
);
opts
.set_use_direct_reads
(
true
);
opts
.set_use_direct_io_for_flush_and_compaction
(
true
);
opts
.set_use_direct_io_for_flush_and_compaction
(
true
);
...
@@ -318,7 +353,7 @@ fn test_direct_read_write() {
...
@@ -318,7 +353,7 @@ fn test_direct_read_write() {
#[test]
#[test]
fn
test_writable_file_max_buffer_size
()
{
fn
test_writable_file_max_buffer_size
()
{
let
path
=
TempDir
::
new
(
"_rust_rocksdb_writable_file_max_buffer_size"
)
.expect
(
""
);
let
path
=
TempDir
::
new
(
"_rust_rocksdb_writable_file_max_buffer_size"
)
.expect
(
""
);
let
mut
opts
=
Options
::
new
();
let
mut
opts
=
DB
Options
::
new
();
opts
.create_if_missing
(
true
);
opts
.create_if_missing
(
true
);
opts
.set_writable_file_max_buffer_size
(
1024
*
1024
);
opts
.set_writable_file_max_buffer_size
(
1024
*
1024
);
DB
::
open
(
opts
,
path
.path
()
.to_str
()
.unwrap
())
.unwrap
();
DB
::
open
(
opts
,
path
.path
()
.to_str
()
.unwrap
())
.unwrap
();
...
@@ -327,7 +362,7 @@ fn test_writable_file_max_buffer_size() {
...
@@ -327,7 +362,7 @@ fn test_writable_file_max_buffer_size() {
#[test]
#[test]
fn
test_set_base_background_compactions
()
{
fn
test_set_base_background_compactions
()
{
let
path
=
TempDir
::
new
(
"_rust_rocksdb_base_background_compactions"
)
.expect
(
""
);
let
path
=
TempDir
::
new
(
"_rust_rocksdb_base_background_compactions"
)
.expect
(
""
);
let
mut
opts
=
Options
::
new
();
let
mut
opts
=
DB
Options
::
new
();
opts
.create_if_missing
(
true
);
opts
.create_if_missing
(
true
);
opts
.set_base_background_compactions
(
4
);
opts
.set_base_background_compactions
(
4
);
DB
::
open
(
opts
,
path
.path
()
.to_str
()
.unwrap
())
.unwrap
();
DB
::
open
(
opts
,
path
.path
()
.to_str
()
.unwrap
())
.unwrap
();
...
@@ -336,16 +371,21 @@ fn test_set_base_background_compactions() {
...
@@ -336,16 +371,21 @@ fn test_set_base_background_compactions() {
#[test]
#[test]
fn
test_set_compaction_pri
()
{
fn
test_set_compaction_pri
()
{
let
path
=
TempDir
::
new
(
"_rust_rocksdb_compaction_pri"
)
.expect
(
""
);
let
path
=
TempDir
::
new
(
"_rust_rocksdb_compaction_pri"
)
.expect
(
""
);
let
mut
opts
=
Options
::
new
();
let
mut
opts
=
DBOptions
::
new
();
let
mut
cf_opts
=
ColumnFamilyOptions
::
new
();
opts
.create_if_missing
(
true
);
opts
.create_if_missing
(
true
);
opts
.compaction_priority
(
CompactionPriority
::
MinOverlappingRatio
);
cf_opts
.compaction_priority
(
CompactionPriority
::
MinOverlappingRatio
);
DB
::
open
(
opts
,
path
.path
()
.to_str
()
.unwrap
())
.unwrap
();
DB
::
open_cf
(
opts
,
path
.path
()
.to_str
()
.unwrap
(),
vec!
[
"default"
],
vec!
[
cf_opts
])
.unwrap
();
}
}
#[test]
#[test]
fn
test_allow_concurrent_memtable_write
()
{
fn
test_allow_concurrent_memtable_write
()
{
let
path
=
TempDir
::
new
(
"_rust_rocksdb_allow_concurrent_memtable_write"
)
.expect
(
""
);
let
path
=
TempDir
::
new
(
"_rust_rocksdb_allow_concurrent_memtable_write"
)
.expect
(
""
);
let
mut
opts
=
Options
::
new
();
let
mut
opts
=
DB
Options
::
new
();
opts
.create_if_missing
(
true
);
opts
.create_if_missing
(
true
);
opts
.allow_concurrent_memtable_write
(
false
);
opts
.allow_concurrent_memtable_write
(
false
);
let
db
=
DB
::
open
(
opts
,
path
.path
()
.to_str
()
.unwrap
())
.unwrap
();
let
db
=
DB
::
open
(
opts
,
path
.path
()
.to_str
()
.unwrap
())
.unwrap
();
...
@@ -357,7 +397,7 @@ fn test_allow_concurrent_memtable_write() {
...
@@ -357,7 +397,7 @@ fn test_allow_concurrent_memtable_write() {
#[test]
#[test]
fn
test_enable_pipelined_write
()
{
fn
test_enable_pipelined_write
()
{
let
path
=
TempDir
::
new
(
"_rust_rocksdb_enable_pipelined_write"
)
.expect
(
""
);
let
path
=
TempDir
::
new
(
"_rust_rocksdb_enable_pipelined_write"
)
.expect
(
""
);
let
mut
opts
=
Options
::
new
();
let
mut
opts
=
DB
Options
::
new
();
opts
.create_if_missing
(
true
);
opts
.create_if_missing
(
true
);
opts
.enable_pipelined_write
(
true
);
opts
.enable_pipelined_write
(
true
);
let
db
=
DB
::
open
(
opts
,
path
.path
()
.to_str
()
.unwrap
())
.unwrap
();
let
db
=
DB
::
open
(
opts
,
path
.path
()
.to_str
()
.unwrap
())
.unwrap
();
...
@@ -368,41 +408,47 @@ fn test_enable_pipelined_write() {
...
@@ -368,41 +408,47 @@ fn test_enable_pipelined_write() {
#[test]
#[test]
fn
test_get_compression
()
{
fn
test_get_compression
()
{
let
mut
opts
=
Options
::
new
();
let
mut
opts
=
DBOptions
::
new
();
let
mut
cf_opts
=
ColumnFamilyOptions
::
new
();
opts
.create_if_missing
(
true
);
opts
.create_if_missing
(
true
);
opts
.compression
(
DBCompressionType
::
Snappy
);
cf_
opts
.compression
(
DBCompressionType
::
Snappy
);
assert_eq!
(
opts
.get_compression
(),
DBCompressionType
::
Snappy
);
assert_eq!
(
cf_
opts
.get_compression
(),
DBCompressionType
::
Snappy
);
}
}
#[test]
#[test]
fn
test_get_compression_per_level
()
{
fn
test_get_compression_per_level
()
{
let
mut
opts
=
Options
::
new
();
let
mut
cf_opts
=
ColumnFamily
Options
::
new
();
let
compressions
=
&
[
DBCompressionType
::
No
,
DBCompressionType
::
Snappy
];
let
compressions
=
&
[
DBCompressionType
::
No
,
DBCompressionType
::
Snappy
];
opts
.compression_per_level
(
compressions
);
cf_
opts
.compression_per_level
(
compressions
);
let
v
=
opts
.get_compression_per_level
();
let
v
=
cf_
opts
.get_compression_per_level
();
assert_eq!
(
v
.len
(),
2
);
assert_eq!
(
v
.len
(),
2
);
assert_eq!
(
v
[
0
],
DBCompressionType
::
No
);
assert_eq!
(
v
[
0
],
DBCompressionType
::
No
);
assert_eq!
(
v
[
1
],
DBCompressionType
::
Snappy
);
assert_eq!
(
v
[
1
],
DBCompressionType
::
Snappy
);
let
mut
opts2
=
Options
::
new
();
let
mut
cf_opts2
=
ColumnFamily
Options
::
new
();
let
empty
:
&
[
DBCompressionType
]
=
&
[];
let
empty
:
&
[
DBCompressionType
]
=
&
[];
opts2
.compression_per_level
(
empty
);
cf_
opts2
.compression_per_level
(
empty
);
let
v2
=
opts2
.get_compression_per_level
();
let
v2
=
cf_
opts2
.get_compression_per_level
();
assert_eq!
(
v2
.len
(),
0
);
assert_eq!
(
v2
.len
(),
0
);
}
}
#[test]
#[test]
fn
test_bottommost_compression
()
{
fn
test_bottommost_compression
()
{
let
path
=
TempDir
::
new
(
"_rust_rocksdb_bottommost_compression"
)
.expect
(
""
);
let
path
=
TempDir
::
new
(
"_rust_rocksdb_bottommost_compression"
)
.expect
(
""
);
let
mut
opts
=
Options
::
new
();
let
mut
opts
=
DBOptions
::
new
();
let
cf_opts
=
ColumnFamilyOptions
::
new
();
opts
.create_if_missing
(
true
);
opts
.create_if_missing
(
true
);
opts
.bottommost_compression
(
DBCompressionType
::
No
);
cf_opts
.bottommost_compression
(
DBCompressionType
::
No
);
DB
::
open
(
opts
,
path
.path
()
.to_str
()
.unwrap
())
.unwrap
();
DB
::
open_cf
(
opts
,
path
.path
()
.to_str
()
.unwrap
(),
vec!
[
"default"
],
vec!
[
cf_opts
])
.unwrap
();
}
}
#[test]
#[test]
fn
test_clone_options
()
{
fn
test_clone_options
()
{
let
mut
opts
=
Options
::
new
();
let
mut
cf_opts
=
ColumnFamily
Options
::
new
();
opts
.compression
(
DBCompressionType
::
Snappy
);
cf_
opts
.compression
(
DBCompressionType
::
Snappy
);
let
opts2
=
opts
.clone
();
let
cf_opts2
=
cf_
opts
.clone
();
assert_eq!
(
opts
.get_compression
(),
opts2
.get_compression
());
assert_eq!
(
cf_opts
.get_compression
(),
cf_
opts2
.get_compression
());
}
}
tests/test_slice_transform.rs
View file @
130c764e
...
@@ -11,7 +11,8 @@
...
@@ -11,7 +11,8 @@
// See the License for the specific language governing permissions and
// See the License for the specific language governing permissions and
// limitations under the License.
// limitations under the License.
use
rocksdb
::{
Writable
,
DB
,
SliceTransform
,
Options
,
SeekKey
,
BlockBasedOptions
};
use
rocksdb
::{
Writable
,
DB
,
SliceTransform
,
ColumnFamilyOptions
,
DBOptions
,
SeekKey
,
BlockBasedOptions
};
use
tempdir
::
TempDir
;
use
tempdir
::
TempDir
;
struct
FixedPostfixTransform
{
struct
FixedPostfixTransform
{
...
@@ -32,20 +33,25 @@ impl SliceTransform for FixedPostfixTransform {
...
@@ -32,20 +33,25 @@ impl SliceTransform for FixedPostfixTransform {
#[test]
#[test]
fn
test_slice_transform
()
{
fn
test_slice_transform
()
{
let
path
=
TempDir
::
new
(
"_rust_rocksdb_slice_transform_test"
)
.expect
(
""
);
let
path
=
TempDir
::
new
(
"_rust_rocksdb_slice_transform_test"
)
.expect
(
""
);
let
mut
opts
=
Options
::
new
();
let
mut
opts
=
DBOptions
::
new
();
let
mut
cf_opts
=
ColumnFamilyOptions
::
new
();
let
mut
block_opts
=
BlockBasedOptions
::
new
();
let
mut
block_opts
=
BlockBasedOptions
::
new
();
block_opts
.set_bloom_filter
(
10
,
false
);
block_opts
.set_bloom_filter
(
10
,
false
);
block_opts
.set_whole_key_filtering
(
false
);
block_opts
.set_whole_key_filtering
(
false
);
opts
.set_block_based_table_factory
(
&
block_opts
);
cf_
opts
.set_block_based_table_factory
(
&
block_opts
);
opts
.set_memtable_prefix_bloom_size_ratio
(
0.25
);
cf_
opts
.set_memtable_prefix_bloom_size_ratio
(
0.25
);
opts
.set_prefix_extractor
(
"test"
,
Box
::
new
(
FixedPostfixTransform
{
postfix_len
:
2
}))
cf_
opts
.set_prefix_extractor
(
"test"
,
Box
::
new
(
FixedPostfixTransform
{
postfix_len
:
2
}))
.unwrap
();
.unwrap
();
opts
.create_if_missing
(
true
);
opts
.create_if_missing
(
true
);
let
db
=
DB
::
open
(
opts
,
path
.path
()
.to_str
()
.unwrap
())
.unwrap
();
let
db
=
DB
::
open_cf
(
opts
,
path
.path
()
.to_str
()
.unwrap
(),
vec!
[
"default"
],
vec!
[
cf_opts
])
.unwrap
();
let
samples
=
vec!
[(
b
"key_01"
.to_vec
(),
b
"1"
.to_vec
()),
let
samples
=
vec!
[(
b
"key_01"
.to_vec
(),
b
"1"
.to_vec
()),
(
b
"key_02"
.to_vec
(),
b
"2"
.to_vec
()),
(
b
"key_02"
.to_vec
(),
b
"2"
.to_vec
()),
(
b
"key_0303"
.to_vec
(),
b
"3"
.to_vec
()),
(
b
"key_0303"
.to_vec
(),
b
"3"
.to_vec
()),
...
...
tests/test_statistics.rs
View file @
130c764e
...
@@ -18,7 +18,7 @@ use tempdir::TempDir;
...
@@ -18,7 +18,7 @@ use tempdir::TempDir;
#[test]
#[test]
fn
test_db_statistics
()
{
fn
test_db_statistics
()
{
let
path
=
TempDir
::
new
(
"_rust_rocksdb_statistics"
)
.expect
(
""
);
let
path
=
TempDir
::
new
(
"_rust_rocksdb_statistics"
)
.expect
(
""
);
let
mut
opts
=
Options
::
new
();
let
mut
opts
=
DB
Options
::
new
();
opts
.create_if_missing
(
true
);
opts
.create_if_missing
(
true
);
opts
.enable_statistics
();
opts
.enable_statistics
();
let
db
=
DB
::
open
(
opts
,
path
.path
()
.to_str
()
.unwrap
())
.unwrap
();
let
db
=
DB
::
open
(
opts
,
path
.path
()
.to_str
()
.unwrap
())
.unwrap
();
...
...
tests/test_table_properties.rs
View file @
130c764e
...
@@ -12,8 +12,9 @@
...
@@ -12,8 +12,9 @@
// limitations under the License.
// limitations under the License.
use
byteorder
::{
LittleEndian
,
ReadBytesExt
,
WriteBytesExt
};
use
byteorder
::{
LittleEndian
,
ReadBytesExt
,
WriteBytesExt
};
use
rocksdb
::{
DB
,
Range
,
Options
,
Writable
,
DBEntryType
,
TablePropertiesCollection
,
use
rocksdb
::{
DB
,
Range
,
ColumnFamilyOptions
,
DBOptions
,
Writable
,
DBEntryType
,
TablePropertiesCollector
,
TablePropertiesCollectorFactory
,
UserCollectedProperties
};
TablePropertiesCollection
,
TablePropertiesCollector
,
TablePropertiesCollectorFactory
,
UserCollectedProperties
};
use
std
::
collections
::
HashMap
;
use
std
::
collections
::
HashMap
;
use
std
::
fmt
;
use
std
::
fmt
;
use
tempdir
::
TempDir
;
use
tempdir
::
TempDir
;
...
@@ -159,12 +160,17 @@ fn check_collection(collection: &TablePropertiesCollection,
...
@@ -159,12 +160,17 @@ fn check_collection(collection: &TablePropertiesCollection,
#[test]
#[test]
fn
test_table_properties_collector_factory
()
{
fn
test_table_properties_collector_factory
()
{
let
f
=
ExampleFactory
::
new
();
let
f
=
ExampleFactory
::
new
();
let
mut
opts
=
Options
::
new
();
let
mut
opts
=
DBOptions
::
new
();
let
mut
cf_opts
=
ColumnFamilyOptions
::
new
();
opts
.create_if_missing
(
true
);
opts
.create_if_missing
(
true
);
opts
.add_table_properties_collector_factory
(
"example-collector"
,
Box
::
new
(
f
));
cf_
opts
.add_table_properties_collector_factory
(
"example-collector"
,
Box
::
new
(
f
));
let
path
=
TempDir
::
new
(
"_rust_rocksdb_collectortest"
)
.expect
(
""
);
let
path
=
TempDir
::
new
(
"_rust_rocksdb_collectortest"
)
.expect
(
""
);
let
db
=
DB
::
open
(
opts
,
path
.path
()
.to_str
()
.unwrap
())
.unwrap
();
let
db
=
DB
::
open_cf
(
opts
,
path
.path
()
.to_str
()
.unwrap
(),
vec!
[
"default"
],
vec!
[
cf_opts
])
.unwrap
();
let
samples
=
vec!
[(
b
"key1"
.to_vec
(),
b
"value1"
.to_vec
()),
let
samples
=
vec!
[(
b
"key1"
.to_vec
(),
b
"value1"
.to_vec
()),
(
b
"key2"
.to_vec
(),
b
"value2"
.to_vec
()),
(
b
"key2"
.to_vec
(),
b
"value2"
.to_vec
()),
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment