| Crates.io | kotoba-bench |
| lib.rs | kotoba-bench |
| version | 0.1.16 |
| created_at | 2025-09-18 03:13:43.77409+00 |
| updated_at | 2025-09-18 03:13:43.77409+00 |
| description | Comprehensive benchmarking suite for KotobaDB |
| homepage | https://github.com/com-junkawasaki/kotoba |
| repository | https://github.com/com-junkawasaki/kotoba |
| max_upload_size | |
| id | 1844201 |
| size | 222,690 |
A comprehensive performance benchmarking framework for KotobaDB with advanced analytics, trend analysis, and regression detection.
use kotoba_bench::*;
use kotoba_db::DB;
// Create database instance
let db = DB::open_lsm("benchmark.db").await?;
// Create benchmark configuration
let config = BenchmarkConfig {
duration: Duration::from_secs(60),
concurrency: 32,
warmup_duration: Duration::from_secs(10),
..Default::default()
};
// Run CRUD benchmark
let crud_benchmark = workloads::CrudBenchmark::new(db, 10000);
let runner = BenchmarkRunner::new(config);
let result = runner.run_benchmark(crud_benchmark).await?;
// Generate reports
let reporter = MetricsReporter::new("benchmark_reports");
reporter.generate_reports(&[result])?;
let crud_benchmark = workloads::CrudBenchmark::new(db, 10000)
.with_operation_mix(CrudOperationMix {
create_percent: 0.25,
read_percent: 0.50,
update_percent: 0.20,
delete_percent: 0.05,
});
let query_benchmark = workloads::QueryBenchmark::new(db, 50000);
let tx_benchmark = workloads::TransactionBenchmark::new(db, 10); // 10 operations per transaction
let memory_benchmark = workloads::MemoryBenchmark::new(db, 1024 * 1024); // 1MB per operation
let storage_benchmark = workloads::StorageBenchmark::new(db, 1000);
let analyzer = PerformanceAnalyzer::new();
analyzer.add_result(result);
let analysis = analyzer.analyze();
println!("Analysis: {:?}", analysis.summary);
println!("Bottlenecks: {:?}", analysis.bottlenecks);
println!("Recommendations: {:?}", analysis.recommendations);
let mut trend_analyzer = TrendAnalyzer::new(100);
trend_analyzer.add_snapshot(metrics_snapshot);
let trends = trend_analyzer.analyze_trends();
println!("Performance trend: {}%", trends.throughput_trend);
let mut comparator = BenchmarkComparator::new();
comparator.set_baseline("crud", baseline_result);
if let Some(comparison) = comparator.compare("crud", ¤t_result) {
if comparison.has_regression {
println!("â ïļ Performance regression detected!");
println!("Throughput change: {:.1}%", comparison.throughput_change_percent);
}
}
let config = BenchmarkConfig {
duration: Duration::from_secs(300), // 5 minutes
concurrency: 64, // 64 concurrent workers
warmup_duration: Duration::from_secs(30), // 30 second warmup
operations_per_second: Some(10000), // Rate limiting
measure_latency: true, // Collect latency metrics
profile_memory: true, // Monitor memory usage
profile_storage: true, // Monitor storage I/O
parameters: HashMap::new(), // Custom parameters
};
use patterns::*;
// Ramp up load
let ramp_up_generator = RampUpLoadGenerator::new(
workload,
1000.0, // Start at 1000 ops/sec
10000.0, // Ramp up to 10000 ops/sec
Duration::from_secs(300), // Over 5 minutes
);
// Bursty load
let bursty_generator = BurstyLoadGenerator::new(
workload,
Duration::from_secs(10), // 10 second bursts
Duration::from_secs(5), // 5 second cooldowns
3.0, // 3x multiplier during bursts
);
// Spike load
let spike_generator = SpikeLoadGenerator::new(
workload,
5000.0, // Base throughput
5.0, // 5x spike multiplier
0.1, // 10% chance of spike
Duration::from_secs(30), // 30 second spike duration
);
ð KotobaDB Benchmark Results
âââââââââââââââââââââââââââââââââââââââââââââââ
ð Benchmark 1/1
âââââââââââââ
ð·ïļ Name: CRUD Operations
âąïļ Duration: 60.00s
ð Operations: 125000
ð Throughput: 2083 ops/sec
Latency Percentiles (Ξs):
50th: 1250 Ξs
95th: 2800 Ξs
99th: 4500 Ξs
99.9th: 12000 Ξs
Max: 25000 Ξs
Error Analysis:
â Error Rate: 0.050%
ð Error Count: 62
Performance Assessment:
â
Excellent throughput: 2083 ops/sec
â ïļ Acceptable latency: 2800 Ξs p95
â ïļ Acceptable reliability: 0.050% error rate
Interactive HTML reports with charts:
Structured data export for:
let mut profiler = PerformanceProfiler::new();
profiler.start_profiling();
// During benchmark execution
profiler.sample(); // Collect current metrics
profiler.record_event("gc_time", 15.5); // Record custom events
// Generate profiling report
let report = profiler.generate_report();
println!("Profiling recommendations: {:?}", report.recommendations);
profiler.record_event("cache_hit_rate", 0.95);
profiler.record_event("connection_pool_size", 25.0);
profiler.record_event("query_complexity", 3.2);
let baseline_result = runner.run_benchmark(baseline_benchmark).await?;
save_baseline("crud_operations_v1.0", &baseline_result)?;
let current_result = runner.run_benchmark(current_benchmark).await?;
let regression = compare_with_baseline(¤t_result, &baseline)?;
if regression.has_regression {
send_alert(&format!("Performance regression detected: {:.1}% throughput drop",
regression.throughput_change_percent));
}
// YCSB-A: 50% reads, 50% updates
let ycsb_a = YcsbWorkloadA::new(1_000_000, 1024);
// YCSB-B: 95% reads, 5% updates
let ycsb_b = YcsbWorkloadB::new(1_000_000, 1024);
// YCSB-C: 100% reads
let ycsb_c = YcsbWorkloadC::new(1_000_000);
// Social network patterns
let social_network = SocialNetworkWorkload::new(100_000, 1_000_000);
// E-commerce patterns
let ecommerce = EcommerceWorkload::new(50_000, 25_000);
#[async_trait]
impl Benchmark for MyCustomBenchmark {
fn name(&self) -> &str {
"My Custom Benchmark"
}
async fn setup(&mut self, config: &BenchmarkConfig) -> Result<(), Box<dyn std::error::Error>> {
// Custom setup logic
Ok(())
}
async fn run(&self, config: &BenchmarkConfig) -> Result<BenchmarkResult, Box<dyn std::error::Error>> {
// Custom execution logic
Ok(result)
}
async fn teardown(&mut self) -> Result<(), Box<dyn std::error::Error>> {
// Custom cleanup logic
Ok(())
}
}
impl BenchmarkExt for MyCustomBenchmark {
async fn run_operation(&self, worker_id: usize, operation_count: u64) -> Result<(), Box<dyn std::error::Error>> {
// Custom operation logic
Ok(())
}
}
// Run multiple benchmarks in suite
let mut suite = BenchmarkSuite::new(config);
suite.add_benchmark(crud_benchmark);
suite.add_benchmark(query_benchmark);
suite.add_benchmark(tx_benchmark);
let results = suite.run_all().await?;
let analysis = PerformanceAnalyzer::analyze_suite(&results);
Remember: Measure, analyze, optimize, repeat! ððŽâĄ