| Crates.io | ranged-mmap |
| lib.rs | ranged-mmap |
| version | 0.4.0 |
| created_at | 2025-11-06 04:18:20.331504+00 |
| updated_at | 2025-12-02 05:25:11.421516+00 |
| description | Type-safe memory-mapped file library with lock-free concurrent writes to non-overlapping ranges |
| homepage | https://github.com/ShaoG-R/ranged-mmap |
| repository | https://github.com/ShaoG-R/ranged-mmap |
| max_upload_size | |
| id | 1919049 |
| size | 185,140 |
A type-safe, high-performance memory-mapped file library optimized for lock-free concurrent writes to non-overlapping ranges.
Perfect for:
Not suitable for:
Add to your Cargo.toml:
[dependencies]
ranged-mmap = "0.4"
The MmapFile API provides compile-time safety guarantees through range allocation:
use ranged_mmap::{MmapFile, allocator::ALIGNMENT};
use std::num::NonZeroU64;
fn main() -> ranged_mmap::Result<()> {
// Create a file (size in 4K units) and range allocator
// All allocations are 4K aligned automatically
let (file, mut allocator) = MmapFile::create_default(
"output.bin",
NonZeroU64::new(ALIGNMENT * 256).unwrap() // 1MB (256 * 4K)
)?;
// Allocate non-overlapping ranges in the main thread (4K aligned)
let range1 = allocator.allocate(NonZeroU64::new(ALIGNMENT * 128).unwrap()).unwrap(); // [0, 512KB)
let range2 = allocator.allocate(NonZeroU64::new(ALIGNMENT * 128).unwrap()).unwrap(); // [512KB, 1MB)
// Concurrent writes to different ranges (compile-time safe!)
std::thread::scope(|s| {
let f1 = file.clone();
let f2 = file.clone();
s.spawn(move || {
let receipt = f1.write_range(range1, &vec![1u8; (ALIGNMENT * 128) as usize]);
f1.flush_range(receipt);
});
s.spawn(move || {
let receipt = f2.write_range(range2, &vec![2u8; (ALIGNMENT * 128) as usize]);
f2.flush_range(receipt);
});
});
// Final synchronous flush to ensure all data is written
unsafe { file.sync_all()?; }
Ok(())
}
For scenarios where you can manually guarantee non-overlapping writes:
use ranged_mmap::MmapFileInner;
use std::num::NonZeroU64;
fn main() -> ranged_mmap::Result<()> {
let file = MmapFileInner::create("output.bin", NonZeroU64::new(1024).unwrap())?;
let file1 = file.clone();
let file2 = file.clone();
std::thread::scope(|s| {
// β οΈ Safety: You must ensure non-overlapping regions
s.spawn(|| unsafe {
file1.write_at(0, &[1; 512]);
});
s.spawn(|| unsafe {
file2.write_at(512, &[2; 512]);
});
});
unsafe { file.flush()?; }
Ok(())
}
MmapFile: Type-safe memory-mapped file with compile-time safetyMmapFileInner: Unsafe high-performance version for manual safety managementRangeAllocator: Trait for range allocatorsallocator::sequential::Allocator: Sequential allocator for single-thread useallocator::concurrent::Allocator: Wait-free concurrent allocator for multi-thread scenariosAllocatedRange: Represents a valid, non-overlapping file rangeWriteReceipt: Proof that a range has been written (enables type-safe flushing)SplitUpResult: Result of splitting with 4K upper alignmentSplitDownResult: Result of splitting with 4K lower alignmentALIGNMENT: 4K alignment constant (4096 bytes)align_up: Function to align values up to 4K boundaryalign_down: Function to align values down to 4K boundaryMmapFile (Type-Safe)use std::num::NonZeroU64;
use ranged_mmap::allocator::{sequential, concurrent, ALIGNMENT};
// Create file with default sequential allocator
let (file, mut allocator) = MmapFile::create_default(path, NonZeroU64::new(size).unwrap())?;
// Or specify allocator type explicitly
let (file, mut allocator) = MmapFile::create::<sequential::Allocator>(path, NonZeroU64::new(size).unwrap())?;
// Use concurrent allocator for multi-thread allocation
let (file, allocator) = MmapFile::create::<concurrent::Allocator>(path, NonZeroU64::new(size).unwrap())?;
// Allocate ranges (4K aligned, returns Option)
let range = allocator.allocate(NonZeroU64::new(ALIGNMENT).unwrap()).unwrap();
// Write to range (returns receipt directly)
let receipt = file.write_range(range, data);
// Flush using receipt
file.flush_range(receipt);
// Sync all data to disk
unsafe { file.sync_all()?; }
MmapFileInner (Unsafe)use std::num::NonZeroU64;
// Create file
let file = MmapFileInner::create(path, NonZeroU64::new(size).unwrap())?;
// Write at offset (must ensure non-overlapping)
unsafe { file.write_at(offset, data); }
// Flush to disk
unsafe { file.flush()?; }
MmapFile)The type system ensures:
RangeAllocatorWriteReceipt)MmapFileInner)You must ensure:
This library is optimized for concurrent random write scenarios. Compared to standard tokio::fs::File, it offers:
See benches/concurrent_write.rs for detailed benchmarks.
use ranged_mmap::{MmapFile, allocator::{concurrent, ALIGNMENT}};
use std::num::NonZeroU64;
use std::sync::Arc;
fn main() -> ranged_mmap::Result<()> {
// Use concurrent allocator for wait-free allocation from multiple threads
let (file, allocator) = MmapFile::create::<concurrent::Allocator>(
"output.bin",
NonZeroU64::new(ALIGNMENT * 100).unwrap()
)?;
let allocator = Arc::new(allocator);
std::thread::scope(|s| {
for _ in 0..4 {
let f = file.clone();
let alloc = Arc::clone(&allocator);
s.spawn(move || {
// Each thread can allocate independently (wait-free)
while let Some(range) = alloc.allocate(NonZeroU64::new(ALIGNMENT).unwrap()) {
let receipt = f.write_range(range, &vec![42u8; ALIGNMENT as usize]);
f.flush_range(receipt);
}
});
}
});
unsafe { file.sync_all()?; }
Ok(())
}
use ranged_mmap::{MmapFile, allocator::ALIGNMENT};
use std::num::NonZeroU64;
use tokio::task;
#[tokio::main]
async fn main() -> ranged_mmap::Result<()> {
let (file, mut allocator) = MmapFile::create_default(
"output.bin",
NonZeroU64::new(ALIGNMENT * 256).unwrap() // 1MB
)?;
// Allocate ranges (4K aligned)
let range1 = allocator.allocate(NonZeroU64::new(ALIGNMENT * 128).unwrap()).unwrap();
let range2 = allocator.allocate(NonZeroU64::new(ALIGNMENT * 128).unwrap()).unwrap();
// Spawn async tasks
let f1 = file.clone();
let f2 = file.clone();
let task1 = task::spawn_blocking(move || {
f1.write_range(range1, &vec![1u8; (ALIGNMENT * 128) as usize])
});
let task2 = task::spawn_blocking(move || {
f2.write_range(range2, &vec![2u8; (ALIGNMENT * 128) as usize])
});
let receipt1 = task1.await.unwrap();
let receipt2 = task2.await.unwrap();
// Flush specific ranges
file.flush_range(receipt1);
file.flush_range(receipt2);
unsafe { file.sync_all()?; }
Ok(())
}
use ranged_mmap::{MmapFile, allocator::ALIGNMENT};
use std::num::NonZeroU64;
fn main() -> ranged_mmap::Result<()> {
let (file, mut allocator) = MmapFile::create_default(
"output.bin",
NonZeroU64::new(ALIGNMENT).unwrap()
)?;
// Allocations are 4K aligned
let range = allocator.allocate(NonZeroU64::new(ALIGNMENT).unwrap()).unwrap();
// Write data (data length must match range length)
file.write_range(range, &vec![42u8; ALIGNMENT as usize]);
// Read back
let mut buf = vec![0u8; ALIGNMENT as usize];
file.read_range(range, &mut buf)?;
assert_eq!(buf[0], 42u8);
Ok(())
}
use ranged_mmap::{MmapFile, allocator::ALIGNMENT};
use std::num::NonZeroU64;
fn main() -> ranged_mmap::Result<()> {
// Open existing file with default sequential allocator
let (file, mut allocator) = MmapFile::open_default("existing.bin")?;
println!("File size: {} bytes", file.size());
println!("Remaining allocatable: {} bytes", allocator.remaining());
// Continue allocating and writing (4K aligned)
if let Some(range) = allocator.allocate(NonZeroU64::new(ALIGNMENT).unwrap()) {
file.write_range(range, &vec![0u8; ALIGNMENT as usize]);
}
Ok(())
}
memmap2, making it accessible as a continuous memory regionsequential::Allocator: Simple sequential allocation for single-thread useconcurrent::Allocator: Wait-free atomic allocation for multi-thread scenariosAllocatedRange can only be created through the allocator, guaranteeing validityAllocatedRange, avoiding locks| Feature | ranged-mmap | tokio::fs::File | std::fs::File |
|---|---|---|---|
| Concurrent writes | β Lock-free | β Requires locks | β Requires locks |
| Zero-copy | β Yes | β No | β No |
| Type safety | β Compile-time | β οΈ Runtime | β οΈ Runtime |
| System calls (write) | β Zero | β Per write | β Per write |
| Dynamic size | β Fixed | β Yes | β Yes |
| Async support | β Runtime agnostic | β Tokio only | β No |
Contributions are welcome! Please feel free to submit issues or pull requests.
This project is licensed under either of:
at your option.
Built on top of the excellent memmap2 crate.