| Crates.io | smallring |
| lib.rs | smallring |
| version | 0.2.2 |
| created_at | 2025-11-04 18:30:20.302352+00 |
| updated_at | 2026-01-11 15:56:49.159384+00 |
| description | High-performance ring buffer with automatic stack/heap optimization | 高性能环形缓冲区,支持栈/堆自动优化 |
| homepage | https://github.com/ShaoG-R/smallring |
| repository | https://github.com/ShaoG-R/smallring |
| max_upload_size | |
| id | 1916772 |
| size | 320,734 |
A collection of high-performance lock-free ring buffer implementations with automatic stack/heap optimization. Provides three specialized modules for different use cases: Generic for general-purpose buffers, Atomic for atomic types, and SPSC for cross-thread communication.
no_std environments (requires alloc)Add this to your Cargo.toml:
[dependencies]
smallring = "0.2"
use smallring::generic::RingBuf;
// Overwrite mode: automatically overwrites oldest data when full
let buf: RingBuf<i32, 32, true> = RingBuf::new(4);
buf.push(1); // Returns None
buf.push(2);
buf.push(3);
buf.push(4);
buf.push(5); // Returns Some(1), overwrote oldest
// Non-overwrite mode: rejects writes when full
let buf: RingBuf<i32, 32, false> = RingBuf::new(4);
buf.push(1).unwrap(); // Returns Ok(())
buf.push(2).unwrap();
buf.push(3).unwrap();
buf.push(4).unwrap();
assert!(buf.push(5).is_err()); // Returns Err(Full(5))
use smallring::atomic::AtomicRingBuf;
use std::sync::atomic::{AtomicU64, Ordering};
// Create a ring buffer for atomic values
let buf: AtomicRingBuf<AtomicU64, 32> = AtomicRingBuf::new(8);
// Push and pop atomic values
buf.push(42, Ordering::Relaxed);
buf.push(100, Ordering::Relaxed);
assert_eq!(buf.pop(Ordering::Acquire), Some(42));
assert_eq!(buf.pop(Ordering::Acquire), Some(100));
use smallring::spsc::new;
use std::num::NonZero;
// Create a ring buffer with capacity 8, stack threshold 32
let (mut producer, mut consumer) = new::<i32, 32>(NonZero::new(8).unwrap());
// Producer pushes data
producer.push(42).unwrap();
producer.push(100).unwrap();
// Consumer pops data
assert_eq!(consumer.pop().unwrap(), 42);
assert_eq!(consumer.pop().unwrap(), 100);
use smallring::generic::RingBuf;
fn main() {
let mut buf: RingBuf<String, 64, false> = RingBuf::new(16);
// Push some data
buf.push("Hello".to_string()).unwrap();
buf.push("World".to_string()).unwrap();
// Pop data in order
println!("{}", buf.pop().unwrap()); // "Hello"
println!("{}", buf.pop().unwrap()); // "World"
// Check if empty
assert!(buf.is_empty());
}
use smallring::generic::RingBuf;
use std::sync::Arc;
use std::thread;
fn main() {
// Overwrite mode is thread-safe for concurrent writers
let buf = Arc::new(RingBuf::<u64, 128, true>::new(128));
let mut handles = vec![];
// Multiple writer threads
for thread_id in 0..4 {
let buf_clone = Arc::clone(&buf);
let handle = thread::spawn(move || {
for i in 0..100 {
let value = (thread_id * 100 + i) as u64;
buf_clone.push(value); // Automatically overwrites old data
}
});
handles.push(handle);
}
for handle in handles {
handle.join().unwrap();
}
}
use smallring::generic::{RingBuf, RingBufError};
// Non-overwrite mode
let buf: RingBuf<i32, 32, false> = RingBuf::new(4);
// Fill the buffer
for i in 0..4 {
buf.push(i).unwrap();
}
// Buffer is full - push returns error with value
match buf.push(99) {
Err(RingBufError::Full(value)) => {
println!("Buffer full, couldn't push {}", value);
}
Ok(_) => {}
}
// Empty the buffer
while buf.pop().is_ok() {}
// Buffer is empty - pop returns error
match buf.pop() {
Err(RingBufError::Empty) => {
println!("Buffer is empty");
}
Ok(_) => {}
}
use smallring::atomic::AtomicRingBuf;
use std::sync::atomic::{AtomicU64, Ordering};
fn main() {
let buf: AtomicRingBuf<AtomicU64, 32> = AtomicRingBuf::new(8);
// Push atomic values
buf.push(42, Ordering::Relaxed);
buf.push(100, Ordering::Relaxed);
// Pop atomic values
assert_eq!(buf.pop(Ordering::Acquire), Some(42));
assert_eq!(buf.pop(Ordering::Acquire), Some(100));
// Check if empty
assert!(buf.is_empty());
}
use smallring::atomic::AtomicRingBuf;
use std::sync::atomic::{AtomicU64, Ordering};
use std::sync::Arc;
use std::thread;
fn main() {
let buf = Arc::new(AtomicRingBuf::<AtomicU64, 64>::new(32));
let mut handles = vec![];
// Multiple threads pushing atomic values
for thread_id in 0..4 {
let buf_clone = Arc::clone(&buf);
let handle = thread::spawn(move || {
for i in 0..50 {
let value = (thread_id * 50 + i) as u64;
buf_clone.push(value, Ordering::Relaxed);
}
});
handles.push(handle);
}
for handle in handles {
handle.join().unwrap();
}
}
use smallring::spsc::new;
use std::num::NonZero;
fn main() {
let (mut producer, mut consumer) = new::<String, 64>(NonZero::new(16).unwrap());
// Push some data
producer.push("Hello".to_string()).unwrap();
producer.push("World".to_string()).unwrap();
// Pop data in order
println!("{}", consumer.pop().unwrap()); // "Hello"
println!("{}", consumer.pop().unwrap()); // "World"
// Check if empty
assert!(consumer.is_empty());
}
use smallring::spsc::new;
use std::thread;
use std::num::NonZero;
fn main() {
let (mut producer, mut consumer) = new::<String, 64>(NonZero::new(32).unwrap());
// Producer thread
let producer_handle = thread::spawn(move || {
for i in 0..100 {
let msg = format!("Message {}", i);
while producer.push(msg.clone()).is_err() {
thread::yield_now();
}
}
});
// Consumer thread
let consumer_handle = thread::spawn(move || {
let mut received = Vec::new();
for _ in 0..100 {
loop {
match consumer.pop() {
Ok(msg) => {
received.push(msg);
break;
}
Err(_) => thread::yield_now(),
}
}
}
received
});
producer_handle.join().unwrap();
let messages = consumer_handle.join().unwrap();
assert_eq!(messages.len(), 100);
}
use smallring::spsc::{new, PushError, PopError};
use std::num::NonZero;
let (mut producer, mut consumer) = new::<i32, 32>(NonZero::new(4).unwrap());
// Fill the buffer
for i in 0..4 {
producer.push(i).unwrap();
}
// Buffer is full - push returns error with value
match producer.push(99) {
Err(PushError::Full(value)) => {
println!("Buffer full, couldn't push {}", value);
}
Ok(_) => {}
}
// Empty the buffer
while consumer.pop().is_ok() {}
// Buffer is empty - pop returns error
match consumer.pop() {
Err(PopError::Empty) => {
println!("Buffer is empty");
}
Ok(_) => {}
}
use smallring::spsc::new;
use std::num::NonZero;
let (mut producer, mut consumer) = new::<u32, 64>(NonZero::new(32).unwrap());
// Push multiple elements at once (requires T: Copy)
let data = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
let pushed = producer.push_slice(&data);
assert_eq!(pushed, 10);
// Pop multiple elements at once
let mut output = [0u32; 5];
let popped = consumer.pop_slice(&mut output);
assert_eq!(popped, 5);
assert_eq!(output, [1, 2, 3, 4, 5]);
// Drain remaining elements
let remaining: Vec<u32> = consumer.drain().collect();
assert_eq!(remaining, vec![6, 7, 8, 9, 10]);
| Feature | Generic | Atomic | SPSC |
|---|---|---|---|
| Use Case | General-purpose, shared access | Atomic types only | Cross-thread communication |
| Element Types | Any type T |
AtomicU8, AtomicU64, etc. | Any type T |
| Handles | Single shared RingBuf |
Single shared AtomicRingBuf |
Split Producer/Consumer |
| Overwrite Mode | Compile-time configurable | Always overwrites | Always rejects when full |
| Concurrency | Multiple readers/writers | Multiple readers/writers | Single producer, single consumer |
| Cache Optimization | Direct atomic access | Direct atomic access | Cached read/write indices |
| Drop Behavior | Manual cleanup via clear() |
Manual cleanup via clear() |
Consumer auto-cleans on drop |
Choose Generic when:
ArcChoose Atomic when:
Choose SPSC when:
All three modules use generic constant N to control the stack/heap optimization threshold. When capacity ≤ N, data is stored on the stack; otherwise, it's allocated on the heap.
use smallring::spsc::new;
use smallring::generic::RingBuf;
use smallring::atomic::AtomicRingBuf;
use std::sync::atomic::AtomicU64;
use std::num::NonZero;
// SPSC: Capacity ≤ 32, uses stack storage (faster initialization, no heap allocation)
let (prod, cons) = new::<u64, 32>(NonZero::new(16).unwrap());
// SPSC: Capacity > 32, uses heap storage (suitable for larger buffers)
let (prod, cons) = new::<u64, 32>(NonZero::new(64).unwrap());
// Generic: Larger stack threshold for larger stack storage
let buf: RingBuf<u64, 128, true> = RingBuf::new(100);
// Atomic: Stack threshold for atomic types
let atomic_buf: AtomicRingBuf<AtomicU64, 64> = AtomicRingBuf::new(32);
Guidelines:
N=32 for optimal performanceN=128 to avoid heap allocationnew() performance and reduces memory allocator pressureCreating a Ring Buffer:
pub fn new<T, const N: usize, const OVERWRITE: bool>(capacity: usize) -> RingBuf<T, N, OVERWRITE>
RingBuf Methods:
push(&mut self, value: T) - Push element (return type depends on OVERWRITE flag)
OVERWRITE=true: Returns Option<T> (Some if element was overwritten)OVERWRITE=false: Returns Result<(), RingBufError<T>>pop(&mut self) -> Result<T, RingBufError<T>> - Pop a single elementpush_slice(&mut self, values: &[T]) -> usize - Push multiple elements (requires T: Copy)pop_slice(&mut self, dest: &mut [T]) -> usize - Pop multiple elements (requires T: Copy)peek(&self) -> Option<&T> - View first element without removingclear(&mut self) - Remove all elementsas_slices(&self) -> (&[T], &[T]) - Get readable data as contiguous slicesas_mut_slices(&mut self) -> (&mut [T], &mut [T]) - Get readable data as mutable contiguous slicesiter(&self) -> Iter<'_, T> - Create element iteratoriter_mut(&mut self) -> IterMut<'_, T> - Create mutable element iteratorcapacity() -> usize - Get buffer capacitylen() -> usize - Get number of elements in bufferis_empty() -> bool - Check if buffer is emptyis_full() -> bool - Check if buffer is fullCreating a Ring Buffer:
pub fn new<E: AtomicElement, const N: usize>(capacity: usize) -> AtomicRingBuf<E, N>
AtomicRingBuf Methods:
push(&self, value: E::Primitive, order: Ordering) - Push an atomic valuepop(&self, order: Ordering) -> Option<E::Primitive> - Pop an atomic valuepeek(&self, order: Ordering) -> Option<E::Primitive> - View first element without removingclear(&mut self) - Remove all elementscapacity() -> usize - Get buffer capacitylen(&self, order: Ordering) -> usize - Get number of elements in bufferis_empty(&self, order: Ordering) -> bool - Check if buffer is emptyis_full(&self, order: Ordering) -> bool - Check if buffer is fullSupported Atomic Types:
AtomicU8, AtomicU16, AtomicU32, AtomicU64, AtomicUsizeAtomicI8, AtomicI16, AtomicI32, AtomicI64, AtomicIsizeAtomicBoolCreating a Ring Buffer:
pub fn new<T, const N: usize>(capacity: NonZero<usize>) -> (Producer<T, N>, Consumer<T, N>)
Producer Methods:
push(&mut self, value: T) -> Result<(), PushError<T>> - Push a single elementpush_slice(&mut self, values: &[T]) -> usize - Push multiple elements (requires T: Copy)capacity() -> usize - Get buffer capacitylen() / slots() -> usize - Get number of elements in bufferfree_slots() -> usize - Get available spaceis_full() -> bool - Check if buffer is fullis_empty() -> bool - Check if buffer is emptyConsumer Methods:
pop(&mut self) -> Result<T, PopError> - Pop a single elementpop_slice(&mut self, dest: &mut [T]) -> usize - Pop multiple elements (requires T: Copy)peek(&self) -> Option<&T> - View first element without removingdrain(&mut self) -> Drain<'_, T, N> - Create draining iteratorclear(&mut self) - Remove all elementscapacity() -> usize - Get buffer capacitylen() / slots() -> usize - Get number of elements in bufferis_empty() -> bool - Check if buffer is emptypush_slice and pop_slice are significantly faster than individual operations when working with Copy types.peek() to inspect without consuming.Capacity is automatically rounded up to the nearest power of 2:
// Requested capacity → Actual capacity
// 5 → 8
// 10 → 16
// 30 → 32
// 100 → 128
Recommendation: Choose power-of-2 capacities to avoid wasted space.
RingBuf is Send and Sync when T is SendArcAtomicRingBuf is Send and Sync for all supported atomic typesProducer and Consumer are not Sync, ensuring single-threaded accessProducer and Consumer are Send, allowing them to be moved between threadsMaybeUninit<T> internally for safe uninitialized memory handlingArc or used in single-threaded scenariosOVERWRITE flag controls behavior when full:
true: Automatically overwrites oldest data (circular buffer semantics)false: Rejects new writes and returns errorclear() explicitly if neededOrdering parameter for fine-grained controlAtomicElement trait ensures only valid atomic types are supportedclear() explicitly if neededConsumer automatically cleans up remaining elements when droppedPushError::FullPerformance characteristics (approximate, system-dependent):
capacity ≤ N): ~1-2 ns per new() callcapacity > N): ~50-100 ns per new() callRust 1.87 or later is required due to const generics features.
Licensed under either of:
at your option.
Contributions are welcome! Please feel free to submit a Pull Request.
cargo test passescargo fmt before committingInspired by various ring buffer implementations in the Rust ecosystem, with a focus on simplicity, performance, and automatic stack/heap optimization.