| Crates.io | kotoba-memory |
| lib.rs | kotoba-memory |
| version | 0.1.16 |
| created_at | 2025-09-18 03:58:47.390767+00 |
| updated_at | 2025-09-18 03:58:47.390767+00 |
| description | Advanced memory management and optimization tools for KotobaDB |
| homepage | https://github.com/com-junkawasaki/kotoba |
| repository | https://github.com/com-junkawasaki/kotoba |
| max_upload_size | |
| id | 1844227 |
| size | 207,644 |
Advanced memory management and optimization features for KotobaDB, providing intelligent memory pooling, caching strategies, and garbage collection optimization.
use kotoba_memory::{MemoryOptimizer, MemoryConfig};
let config = MemoryConfig {
enable_pooling: true,
pool_size_mb: 256,
enable_caching: true,
cache_size_mb: 512,
cache_policy: kotoba_memory::CachePolicy::Adaptive,
enable_custom_allocators: false,
..Default::default()
};
let mut optimizer = MemoryOptimizer::new(config);
optimizer.start().await?;
// Your application code here
run_my_database_operations().await;
let report = optimizer.stop().await?;
println!("{}", report.summary());
use kotoba_memory::memory_pool::{MemoryPool, MemoryBlock};
// Create a memory pool
let pool = MemoryPool::new(64 * 1024 * 1024); // 64MB pool
// Allocate from pool
let block = pool.allocate(1024)?; // 1KB allocation
assert_eq!(block.size(), 1024);
// Use the memory
let slice = block.as_slice();
// ... use slice ...
// Automatic deallocation when block goes out of scope
drop(block);
use kotoba_memory::cache_manager::{CacheManager, CachePolicy, CachedValue};
use std::time::{Duration, Instant};
let cache = CacheManager::new(100 * 1024 * 1024, CachePolicy::Adaptive); // 100MB cache
let value = CachedValue {
data: vec![1, 2, 3, 4, 5],
metadata: CacheMetadata {
content_type: "binary".to_string(),
size_bytes: 5,
compression_ratio: None,
checksum: None,
},
access_count: 0,
last_access: Instant::now(),
created_at: Instant::now(),
ttl: Some(Duration::from_secs(3600)), // 1 hour TTL
};
// Store in cache
cache.put("my_key".to_string(), value);
// Retrieve from cache
if let Some(cached_value) = cache.get("my_key") {
println!("Cache hit! Data: {:?}", cached_value.data);
}
use kotoba_memory::allocators::{create_custom_allocator, create_monitored_allocator};
// Create a custom arena allocator
let arena_allocator = create_custom_allocator();
// Wrap with monitoring
let monitored_allocator = create_monitored_allocator(arena_allocator);
// Use allocator
let layout = std::alloc::Layout::from_size_align(1024, 8)?;
let ptr = monitored_allocator.allocate(layout)?;
// Check statistics
let stats = monitored_allocator.stats();
println!("Allocations: {}, Peak usage: {} bytes",
stats.allocations, stats.peak_usage);
use kotoba_memory::gc_optimizer::GcOptimizer;
use std::time::Duration;
let mut gc_optimizer = GcOptimizer::new();
gc_optimizer.start().await?;
// Record GC events (in real usage, this would be automatic)
gc_optimizer.record_collection(
Duration::from_millis(45), // pause time
10_000_000, // bytes reclaimed
0 // generation
);
// Analyze GC performance
let analysis = gc_optimizer.analyze().await?;
println!("GC Performance Score: {:.2}", analysis.performance_score);
// Apply optimizations
gc_optimizer.optimize().await?;
let config = MemoryConfig {
enable_pooling: true,
pool_size_mb: 256,
enable_caching: true,
cache_size_mb: 512,
cache_policy: CachePolicy::Adaptive,
enable_custom_allocators: true,
allocator_type: AllocatorType::Custom,
enable_gc_optimization: true,
target_memory_usage_percent: 75.0,
monitoring_interval_ms: 1000,
};
jemalloc feature)mimalloc feature)let stats = optimizer.memory_stats().await;
println!("Current memory: {:.1} MB", stats.current_memory_mb);
println!("Peak memory: {:.1} MB", stats.peak_memory_mb);
println!("Memory efficiency: {:.1}%", stats.memory_efficiency * 100.0);
let cache_analysis = cache.analyze();
println!("Cache hit rate: {:.1}%", cache_analysis.stats.hit_rate * 100.0);
println!("Cache effectiveness: {:.1}%", cache_analysis.cache_effectiveness * 100.0);
for recommendation in &cache_analysis.recommendations {
println!("ðĄ {}", recommendation);
}
let gc_analysis = gc_optimizer.analyze().await?;
println!("GC bottlenecks: {}", gc_analysis.bottlenecks.len());
for bottleneck in &gc_analysis.bottlenecks {
println!("ð§ {} (Severity: {:.1})",
bottleneck.description, bottleneck.severity);
}
// Create specialized pools for different object sizes
let small_pool = MemoryPool::new(16 * 1024 * 1024); // 16MB for small objects
let large_pool = MemoryPool::new(128 * 1024 * 1024); // 128MB for large objects
// Use appropriate pool based on size
let block = if size <= 4096 {
small_pool.allocate(size)?
} else {
large_pool.allocate(size)?
};
// L1 cache (fast, small)
let l1_cache = CacheManager::new(64 * 1024 * 1024, CachePolicy::Lru);
// L2 cache (slower, larger)
let l2_cache = CacheManager::new(512 * 1024 * 1024, CachePolicy::Adaptive);
// Implement cache hierarchy
fn get_with_hierarchy(key: &str) -> Option<CachedValue> {
// Try L1 first
if let Some(value) = l1_cache.get(key) {
return Some(value);
}
// Try L2
if let Some(value) = l2_cache.get(key) {
// Promote to L1 for faster future access
l1_cache.put(key.to_string(), value.clone());
return Some(value);
}
None
}
let profiler = memory_profiler::MemoryProfiler::new();
profiler.start().await?;
// Run application workload
run_workload().await;
// Analyze for leaks
let analysis = profiler.analyze().await?;
for leak in &analysis.memory_leaks {
println!("ðĻ Memory leak detected:");
println!(" Size: {} bytes", leak.size);
println!(" Location: {}", leak.allocation_site);
println!(" Age: {:.1} seconds", leak.age_seconds);
}
let recommendations = gc_optimizer.analyze().await?
.optimization_opportunities;
for rec in recommendations {
if rec.expected_benefit > 0.3 && matches!(rec.risk_level, RiskLevel::Low) {
println!("ðŊ {}: {}", rec.optimization_type, rec.description);
println!(" Expected benefit: {:.0}%", rec.expected_benefit * 100.0);
for action in &rec.actions {
println!(" âĒ {}", action);
}
}
}
Before: 50,000 allocations/sec, 15% fragmentation
After: 200,000 allocations/sec, 2% fragmentation
Impact: 4x allocation throughput, 87% fragmentation reduction
Before: 40% cache hit rate, 25ms avg response time
After: 85% cache hit rate, 8ms avg response time
Impact: 2.1x cache efficiency, 68% response time improvement
Before: 150ms max pause time, 25 GC/min
After: 35ms max pause time, 8 GC/min
Impact: 4.3x pause time reduction, 68% GC frequency reduction
Before: 500MB memory growth over 1 hour, OOM crashes
After: Stable 200MB usage, no OOM events
Impact: 60% memory usage reduction, eliminated OOM crashes
Remember: Measure, analyze, optimize, repeat! ðâĄð§
Enable optional features in your Cargo.toml:
[dependencies]
kotoba-memory = { version = "0.1.0", features = ["jemalloc", "mimalloc"] }
Available features:
jemalloc: Enable jemalloc allocator supportmimalloc: Enable mimalloc allocator supportcluster: Enable cluster-aware memory optimization