| Crates.io | distributed-lock-core |
| lib.rs | distributed-lock-core |
| version | 0.2.0 |
| created_at | 2025-12-28 11:26:32.36687+00 |
| updated_at | 2026-01-01 07:01:19.802823+00 |
| description | Core traits and types for distributed locks |
| homepage | |
| repository | https://github.com/XuHaoJun/distributed-lock-rs |
| max_upload_size | |
| id | 2008626 |
| size | 33,208 |
Distributed locks for Rust with multiple backend support. This library provides distributed synchronization primitives (mutex locks, reader-writer locks, semaphores) that work across processes and machines.
Uses PostgreSQL advisory locks. Production-ready with connection pooling and transaction-scoped locks.
[dependencies]
distributed-lock-postgres = "0.1"
tokio = { version = "1", features = ["full"] }
Uses MySQL user-defined variables and database tables. Supports reader-writer locks through automatic table creation.
Note: Reader-writer locks require creating a distributed_locks table in your MySQL database. This table is created automatically when needed, as MySQL doesn't provide built-in reader-writer lock primitives like PostgreSQL.
[dependencies]
distributed-lock-mysql = "0.1"
tokio = { version = "1", features = ["full"] }
Or use the meta-crate:
[dependencies]
distributed-lock = { version = "0.1", features = ["mysql"] }
tokio = { version = "1", features = ["full"] }
Uses Redis with RedLock algorithm for multi-server deployments. Supports semaphores and automatic lease extension.
[dependencies]
distributed-lock-redis = "0.1"
tokio = { version = "1", features = ["full"] }
Uses OS-level file locking. Simple and requires no external services.
[dependencies]
distributed-lock-file = "0.1"
tokio = { version = "1", features = ["full"] }
Uses MongoDB's atomic document updates and aggregation pipelines. Production-ready with support for TTL-based expiration.
[dependencies]
distributed-lock-mongo = "0.1"
tokio = { version = "1", features = ["full"] }
Or use the meta-crate to get all backends:
[dependencies]
distributed-lock = "0.1"
tokio = { version = "1", features = ["full"] }
You can also enable specific backends:
[dependencies]
distributed-lock = { version = "0.1", features = ["mysql", "postgres"] }
tokio = { version = "1", features = ["full"] }
use distributed_lock::*;
use std::time::Duration;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// Create a provider (example: file backend)
let provider = FileLockProvider::builder()
.directory("/tmp/locks")
.build()?;
// Create a lock by name
let lock = provider.create_lock("my-resource");
// Acquire the lock with a timeout
let handle = lock.acquire(Some(Duration::from_secs(5))).await?;
// Critical section - we have exclusive access
println!("Doing critical work...");
// Release the lock (also happens automatically on drop)
handle.release().await?;
Ok(())
}
use distributed_lock_postgres::PostgresLockProvider;
use distributed_lock_core::prelude::*;
use std::time::Duration;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let provider = PostgresLockProvider::builder()
.connection_string("postgresql://user:pass@localhost/db")
.build()
.await?;
let lock = provider.create_lock("my-resource");
let handle = lock.acquire(Some(Duration::from_secs(5))).await?;
// Critical section
do_work().await?;
handle.release().await?;
Ok(())
}
use distributed_lock_mysql::MySqlLockProvider;
use distributed_lock_core::prelude::*;
use std::time::Duration;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let provider = MySqlLockProvider::builder()
.connection_string("mysql://user:pass@localhost/db")
.build()
.await?;
// Basic mutex lock
let lock = provider.create_lock("my-resource");
let handle = lock.acquire(Some(Duration::from_secs(5))).await?;
do_work().await?;
handle.release().await?;
// Reader-writer lock (creates distributed_locks table automatically)
let rw_lock = provider.create_reader_writer_lock("shared-resource");
let read_handle = rw_lock.acquire_read(None).await?;
read_data().await;
read_handle.release().await?;
Ok(())
}
use distributed_lock_redis::RedisLockProvider;
use distributed_lock_core::prelude::*;
use std::time::Duration;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let provider = RedisLockProvider::builder()
.add_server("redis://localhost:6379")
.build()
.await?;
let lock = provider.create_lock("my-resource");
let handle = lock.acquire(Some(Duration::from_secs(5))).await?;
// Lock is automatically extended in the background
do_long_running_work().await?;
handle.release().await?;
Ok(())
}
use distributed_lock_mongo::MongoDistributedLock;
use distributed_lock_core::prelude::*;
use std::time::Duration;
use mongodb::Client;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let client = Client::with_uri_str("mongodb://localhost:27017").await?;
let database = client.database("my_database");
let lock = MongoDistributedLock::new(
"my-resource".to_string(),
database,
None, // Default collection "DistributedLocks"
None // Default options
);
let handle = lock.acquire(Some(Duration::from_secs(5))).await?;
do_work().await?;
handle.release().await?;
Ok(())
}
use distributed_lock_postgres::PostgresLockProvider;
use distributed_lock_core::prelude::*;
async fn cache_example(provider: &PostgresLockProvider) -> Result<(), LockError> {
let rw_lock = provider.create_reader_writer_lock("cache");
// Multiple readers can hold the lock simultaneously
{
let read_handle = rw_lock.acquire_read(None).await?;
let data = read_from_cache().await;
read_handle.release().await?;
}
// Writers get exclusive access
{
let write_handle = rw_lock.acquire_write(None).await?;
update_cache().await;
write_handle.release().await?;
}
Ok(())
}
use distributed_lock_redis::RedisLockProvider;
use distributed_lock_core::prelude::*;
use std::time::Duration;
async fn rate_limit_example(provider: &RedisLockProvider) -> Result<(), LockError> {
// Allow at most 5 concurrent database connections
let semaphore = provider.create_semaphore("db-pool", 5);
// Acquire a "ticket"
let ticket = semaphore.acquire(Some(Duration::from_secs(10))).await?;
// Use the limited resource
query_database().await?;
// Release the ticket
ticket.release().await?;
Ok(())
}
Use try_acquire when you want to check if a lock is available without waiting:
match lock.try_acquire().await? {
Some(handle) => {
// We got the lock!
do_work().await;
handle.release().await?;
}
None => {
// Someone else has it
println!("Lock unavailable");
}
}
use distributed_lock_core::prelude::*;
match lock.acquire(Some(Duration::from_secs(5))).await {
Ok(handle) => {
// Got the lock
handle.release().await?;
}
Err(LockError::Timeout(duration)) => {
eprintln!("Timed out after {:?}", duration);
}
Err(LockError::Connection(e)) => {
eprintln!("Connection error: {}", e);
}
Err(e) => {
eprintln!("Error: {}", e);
}
}
This library is organized as a Cargo workspace with separate crates:
distributed-lock-core: Core traits and typesdistributed-lock-file: File system backenddistributed-lock-mysql: MySQL backenddistributed-lock-postgres: PostgreSQL backenddistributed-lock-redis: Redis backenddistributed-lock-mongo: MongoDB backenddistributed-lock: Meta-crate re-exporting all backendsEach backend implements the same trait interfaces, allowing you to swap backends without changing application code.
pg_advisory_lock supportdistributed_locks table)Licensed under the MIT License (LICENSE or http://opensource.org/licenses/MIT).
Contributions are welcome! Please feel free to submit a Pull Request.
This library is inspired by the DistributedLock C# library, ported to Rust with idiomatic patterns and async/await support.