Commit 79b56519 authored by zhangjinpeng1987's avatar zhangjinpeng1987 Committed by Huachao Huang

Export compaction reason (#242)

* export compaction reason
Signed-off-by: 's avatarzhangjinpeng1987 <zhangjinpeng@pingcap.com>
parent 24b6bba8
......@@ -143,6 +143,7 @@ using rocksdb::ColumnFamilyMetaData;
using rocksdb::LevelMetaData;
using rocksdb::SstFileMetaData;
using rocksdb::CompactionOptions;
using rocksdb::CompactionReason;
using rocksdb::PerfLevel;
using rocksdb::PerfContext;
using rocksdb::BottommostLevelCompaction;
......@@ -1884,6 +1885,11 @@ uint64_t crocksdb_compactionjobinfo_total_output_bytes(
return info->rep.stats.total_output_bytes;
}
CompactionReason crocksdb_compactionjobinfo_compaction_reason(
const crocksdb_compactionjobinfo_t* info) {
return info->rep.compaction_reason;
}
/* ExternalFileIngestionInfo */
const char* crocksdb_externalfileingestioninfo_cf_name(
......
......@@ -164,6 +164,44 @@ pub enum CompactionPriority {
MinOverlappingRatio = 3,
}
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
#[repr(C)]
pub enum CompactionReason {
Unknown,
// [Level] number of L0 files > level0_file_num_compaction_trigger
LevelL0FilesNum,
// [Level] total size of level > MaxBytesForLevel()
LevelMaxLevelSize,
// [Universal] Compacting for size amplification
UniversalSizeAmplification,
// [Universal] Compacting for size ratio
UniversalSizeRatio,
// [Universal] number of sorted runs > level0_file_num_compaction_trigger
UniversalSortedRunNum,
// [FIFO] total size > max_table_files_size
FIFOMaxSize,
// [FIFO] reduce number of files.
FIFOReduceNumFiles,
// [FIFO] files with creation time < (current_time - interval)
FIFOTtl,
// Manual compaction
ManualCompaction,
// DB::SuggestCompactRange() marked files for compaction
FilesMarkedForCompaction,
// [Level] Automatic compaction within bottommost level to cleanup duplicate
// versions of same user key, usually due to a released snapshot.
BottommostFiles,
// Compaction based on TTL
Ttl,
// According to the comments in flush_job.cc, RocksDB treats flush as
// a level 0 compaction in internal stats.
Flush,
// Compaction caused by external sst file ingestion
ExternalSstIngestion,
// total number of compaction reasons, new reasons must be added above this.
NumOfReasons,
}
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
#[repr(C)]
pub enum DBInfoLogLevel {
......@@ -1488,6 +1526,9 @@ extern "C" {
pub fn crocksdb_compactionjobinfo_total_output_bytes(
info: *const DBCompactionJobInfo,
) -> uint64_t;
pub fn crocksdb_compactionjobinfo_compaction_reason(
info: *const DBCompactionJobInfo,
) -> CompactionReason;
pub fn crocksdb_externalfileingestioninfo_cf_name(
info: *const DBIngestionInfo,
......
......@@ -12,8 +12,8 @@
// limitations under the License.
use crocksdb_ffi::{
self, DBCompactionJobInfo, DBEventListener, DBFlushJobInfo, DBIngestionInfo, DBInstance,
DBWriteStallInfo, WriteStallCondition,
self, CompactionReason, DBCompactionJobInfo, DBEventListener, DBFlushJobInfo, DBIngestionInfo,
DBInstance, DBWriteStallInfo, WriteStallCondition,
};
use libc::c_void;
use std::path::Path;
......@@ -116,6 +116,10 @@ impl CompactionJobInfo {
pub fn total_output_bytes(&self) -> u64 {
unsafe { crocksdb_ffi::crocksdb_compactionjobinfo_total_output_bytes(&self.0) }
}
pub fn compaction_reason(&self) -> CompactionReason {
unsafe { crocksdb_ffi::crocksdb_compactionjobinfo_compaction_reason(&self.0) }
}
}
pub struct IngestionInfo(DBIngestionInfo);
......
......@@ -25,9 +25,9 @@ pub use event_listener::{
CompactionJobInfo, EventListener, FlushJobInfo, IngestionInfo, WriteStallInfo,
};
pub use librocksdb_sys::{
self as crocksdb_ffi, new_bloom_filter, CompactionPriority, DBBottommostLevelCompaction,
DBCompactionStyle, DBCompressionType, DBEntryType, DBInfoLogLevel, DBRecoveryMode,
DBStatisticsHistogramType, DBStatisticsTickerType, WriteStallCondition,
self as crocksdb_ffi, new_bloom_filter, CompactionPriority, CompactionReason,
DBBottommostLevelCompaction, DBCompactionStyle, DBCompressionType, DBEntryType, DBInfoLogLevel,
DBRecoveryMode, DBStatisticsHistogramType, DBStatisticsTickerType, WriteStallCondition,
};
pub use merge_operator::MergeOperands;
pub use metadata::{ColumnFamilyMetaData, LevelMetaData, SstFileMetaData};
......
......@@ -27,6 +27,7 @@ struct EventCounter {
output_records: Arc<AtomicUsize>,
input_bytes: Arc<AtomicUsize>,
output_bytes: Arc<AtomicUsize>,
manual_compaction: Arc<AtomicUsize>,
}
impl Drop for EventCounter {
......@@ -75,6 +76,10 @@ impl EventListener for EventCounter {
.fetch_add(info.total_input_bytes() as usize, Ordering::SeqCst);
self.output_bytes
.fetch_add(info.total_output_bytes() as usize, Ordering::SeqCst);
if info.compaction_reason() == CompactionReason::ManualCompaction {
self.manual_compaction.fetch_add(1, Ordering::SeqCst);
}
}
fn on_external_file_ingested(&self, info: &IngestionInfo) {
......@@ -199,6 +204,7 @@ fn test_event_listener_basic() {
assert!(
counter.input_bytes.load(Ordering::SeqCst) > counter.output_bytes.load(Ordering::SeqCst)
);
assert_eq!(counter.manual_compaction.load(Ordering::SeqCst), 1);
}
#[test]
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment