Crates.io | luckdb |
lib.rs | luckdb |
version | 0.1.2 |
created_at | 2025-09-06 15:14:23.974503+00 |
updated_at | 2025-09-11 16:26:58.720529+00 |
description | A Lightweight JSON Document Database in Rust |
homepage | http://dnrops.gitlink.net |
repository | https://gitcode.net/dnrops/luckdb |
max_upload_size | |
id | 1827171 |
size | 232,207 |
LuckDB is a lightweight, in-memory JSON document database written in Rust, inspired by MongoDB. It provides a simple yet powerful API for storing, querying, and manipulating JSON documents with support for indexing, aggregation, and persistence.
Add LuckDB to your Cargo.toml
:
[dependencies]
luckdb = "0.1.0"
use luckdb::{Client, DocId, Query, UpdateDocument};
use serde_json::json;
fn main() -> luckdb::Result<()> {
// Create a new client
let mut client = Client::new();
// Get a database
let db = client.db("mydb");
// Get a collection
let collection = db.collection("users");
// Insert a document
let doc = json!({
"name": "Alice",
"age": 30,
"city": "New York",
"interests": ["reading", "hiking"]
});
let id = collection.insert(doc)?;
println!("Inserted document with ID: {}", id);
// Query the collection
let query = Query::new().eq("name", "Alice".into());
let results = collection.find(query, None)?;
for (id, doc) in results {
println!("Found document {}: {}", id, doc);
}
Ok(())
}
The Client
is the entry point to LuckDB. It manages multiple databases.
let mut client = Client::new();
// With storage path for persistence
let mut client = Client::with_storage_path("mongodb://localhost", "./data");
db(&mut self, name: &str) -> &mut Database
: Get or create a databaselist_database_names(&self) -> Vec<String>
: List all databasesdrop_database(&mut self, name: &str) -> Result<()>
: Drop a databasesave(&self) -> Result<()>
: Save all data to diskload(&mut self) -> Result<()>
: Load data from diskA Database
contains multiple collections.
let db = client.db("mydb");
collection(&mut self, name: &str) -> &mut Collection
: Get or create a collectionlist_collection_names(&self) -> Vec<String>
: List all collectionscreate_collection(&mut self, name: &str, options: Option<CreateCollectionOptions>) -> Result<()>
: Create a collection with optionsdrop_collection(&mut self, name: &str) -> Result<()>
: Drop a collectionstats(&self) -> Result<DatabaseStats>
: Get database statisticsrun_command(&mut self, command: &Document) -> Result<Document>
: Run a database commandA Collection
stores JSON documents.
let collection = db.collection("users");
insert(&mut self, doc: Document) -> Result<DocId>
: Insert a documentinsert_many(&mut self, docs: Vec<Document>) -> Result<Vec<DocId>>
: Insert multiple documentsfind(&self, query: Query, options: Option<FindOptions>) -> Result<Vec<(DocId, Document)>>
: Find documents matching a queryfind_one(&self, query: Query, options: Option<FindOptions>) -> Result<(DocId, Document)>
: Find a single documentupdate_one(&mut self, query: Query, update: UpdateDocument, upsert: bool) -> Result<usize>
: Update the first matching documentupdate_many(&mut self, query: Query, update: UpdateDocument) -> Result<usize>
: Update all matching documentsreplace_one(&mut self, query: Query, replacement: Document, upsert: bool) -> Result<usize>
: Replace the first matching documentdelete_one(&mut self, query: Query) -> Result<usize>
: Delete the first matching documentdelete_many(&mut self, query: Query) -> Result<usize>
: Delete all matching documentscount_documents(&self, query: Query) -> Result<usize>
: Count documents matching a querycreate_index(&mut self, index: Index) -> Result<()>
: Create an indexdrop_index(&mut self, name: &str) -> Result<()>
: Drop an indexlist_indexes(&self) -> Result<Vec<Index>>
: List all indexesaggregate(&self, pipeline: Vec<AggregationStage>) -> Result<Vec<Document>>
: Run an aggregation pipelinedistinct(&self, field: &str, query: Option<Query>) -> Result<Vec<Value>>
: Get distinct values for a fieldbulk_write(&mut self, operations: Vec<BulkWriteOperation>, options: Option<BulkWriteOptions>) -> Result<BulkWriteResult>
: Execute bulk write operationsDocuments are JSON values represented by serde_json::Value
.
use serde_json::json;
let doc = json!({
"name": "Alice",
"age": 30,
"city": "New York",
"interests": ["reading", "hiking"]
});
Each document automatically gets an _id
field of type DocId
when inserted.
Queries are built using the Query
struct and its methods.
use luckdb::{Query, Value};
use serde_json::json;
// Simple equality query
let query = Query::new().eq("name", "Alice".into());
// Complex query with multiple conditions
let query = Query::new()
.eq("city", "New York".into())
.gt("age", json!(25))
.in_("interests", vec!["reading".into(), "hiking".into()]);
// Using logical operators
let query1 = Query::new().eq("city", "New York".into());
let query2 = Query::new().eq("city", "San Francisco".into());
let query = Query::new().or(vec![query1, query2]);
eq(key, value)
: Field equals valuene(key, value)
: Field not equal to valuegt(key, value)
: Field greater than valuegte(key, value)
: Field greater than or equal to valuelt(key, value)
: Field less than valuelte(key, value)
: Field less than or equal to valuein_(key, values)
: Field in array of valuesnin(key, values)
: Field not in array of valuesexists(key, exists)
: Field exists (or not)regex(key, pattern)
: Field matches regex patternand(queries)
: Logical AND of queriesor(queries)
: Logical OR of queriesnor(queries)
: Logical NOR of queriesnot(query)
: Logical NOT of queryall(key, values)
: Array contains all valueselem_match(key, query)
: Array element matches querysize(key, size)
: Array has specified sizenear(key, point, max_distance)
: Geospatial near querywithin(key, shape)
: Geospatial within queryintersects(key, shape)
: Geospatial intersects queryUpdate operations are built using the UpdateDocument
struct.
use luckdb::UpdateDocument;
use serde_json::json;
let update = UpdateDocument::new()
.set("status", "active".into())
.inc("login_count", json!(1))
.push("tags", "premium".into());
set(key, value)
: Set field to valueunset(key)
: Remove fieldinc(key, value)
: Increment field by valuemul(key, value)
: Multiply field by valuerename(old_key, new_key)
: Rename fieldset_on_insert(key, value)
: Set field on insertmin(key, value)
: Set field to minimum of current and valuemax(key, value)
: Set field to maximum of current and valuecurrent_date(key, type_spec)
: Set field to current datepush(key, value)
: Push value to array fieldpush_all(key, values)
: Push all values to array fieldadd_to_set(key, value)
: Add value to array if not presentpop(key, pos)
: Remove first or last element of arraypull(key, condition)
: Remove elements matching conditionpull_all(key, values)
: Remove all specified values from arraybit(key, operation)
: Bitwise operationIndexes improve query performance.
use luckdb::{Index, IndexType};
// Create a simple index
let index = Index::new("name_index".to_string(), vec![("name".to_string(), IndexType::Ascending)]);
collection.create_index(index)?;
// Create a compound index
let index = Index::new("compound_index".to_string(), vec![
("city".to_string(), IndexType::Ascending),
("age".to_string(), IndexType::Descending)
]);
collection.create_index(index)?;
// Create a unique index
let index = Index::new("email_index".to_string(), vec![("email".to_string(), IndexType::Ascending)])
.unique(true);
collection.create_index(index)?;
Aggregation pipelines transform and analyze data.
use luckdb::{AggregationStage, GroupOperation, GroupId, SortOrder};
use serde_json::json;
let pipeline = vec![
// Match documents
AggregationStage::Match(Query::new().eq("status", "active".into())),
// Group by city and count
AggregationStage::Group(GroupSpecification {
id: GroupId::Field("city".to_string()),
operations: {
let mut ops = std::collections::HashMap::new();
ops.insert("count".to_string(), GroupOperation::Sum(json!(1)));
ops.insert("avg_age".to_string(), GroupOperation::Avg("$age".into()));
ops
},
}),
// Sort by count
AggregationStage::Sort(vec![("count".to_string(), SortOrder::Descending)]),
// Limit results
AggregationStage::Limit(10),
];
let results = collection.aggregate(pipeline)?;
use luckdb::{Client, Query, UpdateDocument};
use serde_json::json;
fn main() -> luckdb::Result<()> {
let mut client = Client::new();
let db = client.db("test");
let collection = db.collection("users");
// Create
let doc = json!({
"name": "Alice",
"age": 30,
"city": "New York",
"interests": ["reading", "hiking"]
});
let id = collection.insert(doc)?;
println!("Created document with ID: {}", id);
// Read
let query = Query::new().eq("name", "Alice".into());
let results = collection.find(query, None)?;
for (id, doc) in results {
println!("Found document {}: {}", id, doc);
}
// Update
let update = UpdateDocument::new()
.set("age", json!(31))
.push("interests", "travel".into());
let count = collection.update_one(Query::new().eq("name", "Alice".into()), update, false)?;
println!("Updated {} documents", count);
// Delete
let count = collection.delete_one(Query::new().eq("name", "Alice".into()))?;
println!("Deleted {} documents", count);
Ok(())
}
use luckdb::{Query, Value};
use serde_json::json;
fn main() -> luckdb::Result<()> {
let mut client = Client::new();
let db = client.db("test");
let collection = db.collection("users");
// Insert some test data
collection.insert(json!({"name": "Alice", "age": 30, "city": "New York", "active": true}))?;
collection.insert(json!({"name": "Bob", "age": 25, "city": "San Francisco", "active": false}))?;
collection.insert(json!({"name": "Charlie", "age": 35, "city": "New York", "active": true}))?;
collection.insert(json!({"name": "David", "age": 40, "city": "Chicago", "active": true}))?;
// Find active users in New York
let query = Query::new()
.eq("city", "New York".into())
.eq("active", Value::Bool(true));
let results = collection.find(query, None)?;
println!("Active users in New York:");
for (id, doc) in results {
println!(" {}: {}", id, doc);
}
// Find users older than 30 or inactive users
let query1 = Query::new().gt("age", json!(30));
let query2 = Query::new().eq("active", Value::Bool(false));
let query = Query::new().or(vec![query1, query2]);
let results = collection.find(query, None)?;
println!("Users older than 30 or inactive:");
for (id, doc) in results {
println!(" {}: {}", id, doc);
}
// Find users with specific interests
collection.insert(json!({
"name": "Eve",
"age": 28,
"city": "Boston",
"interests": ["reading", "coding", "hiking"]
}))?;
let query = Query::new().all("interests", vec!["reading".into(), "hiking".into()]);
let results = collection.find(query, None)?;
println!("Users with both reading and hiking interests:");
for (id, doc) in results {
println!(" {}: {}", id, doc);
}
Ok(())
}
use luckdb::{Client, Index, IndexType, Query};
use serde_json::json;
fn main() -> luckdb::Result<()> {
let mut client = Client::new();
let db = client.db("test");
let collection = db.collection("users");
// Insert test data
for i in 0..1000 {
collection.insert(json!({
"name": format!("User {}", i),
"age": i % 50 + 20,
"city": ["New York", "San Francisco", "Chicago", "Boston"][i % 4],
"active": i % 3 != 0
}))?;
}
// Create indexes
let name_index = Index::new("name_index".to_string(), vec![("name".to_string(), IndexType::Ascending)]);
collection.create_index(name_index)?;
let city_age_index = Index::new("city_age_index".to_string(), vec![
("city".to_string(), IndexType::Ascending),
("age".to_string(), IndexType::Descending)
]);
collection.create_index(city_age_index)?;
// Query using indexes
let query = Query::new().eq("city", "New York".into()).gt("age", json!(30));
let results = collection.find(query, None)?;
println!("Found {} users in New York older than 30", results.len());
// List indexes
let indexes = collection.list_indexes()?;
println!("Indexes:");
for index in indexes {
println!(" {}: {:?}", index.name, index.key);
}
Ok(())
}
use luckdb::{Client, AggregationStage, GroupOperation, GroupId, SortOrder, Query};
use serde_json::json;
fn main() -> luckdb::Result<()> {
let mut client = Client::new();
let db = client.db("test");
let collection = db.collection("sales");
// Insert test data
collection.insert(json!({
"product": "Laptop",
"category": "Electronics",
"price": 1200,
"quantity": 1,
"date": "2023-01-15",
"customer": "Alice"
}))?;
collection.insert(json!({
"product": "Phone",
"category": "Electronics",
"price": 800,
"quantity": 2,
"date": "2023-01-16",
"customer": "Bob"
}))?;
collection.insert(json!({
"product": "Desk Chair",
"category": "Furniture",
"price": 200,
"quantity": 1,
"date": "2023-01-17",
"customer": "Alice"
}))?;
collection.insert(json!({
"product": "Monitor",
"category": "Electronics",
"price": 300,
"quantity": 1,
"date": "2023-01-18",
"customer": "Charlie"
}))?;
// Calculate total sales by category
let pipeline = vec![
AggregationStage::Group(GroupSpecification {
id: GroupId::Field("category".to_string()),
operations: {
let mut ops = std::collections::HashMap::new();
ops.insert("total_sales".to_string(),
GroupOperation::Sum(json!({ "$multiply": ["$price", "$quantity"] })));
ops.insert("count".to_string(), GroupOperation::Sum(json!(1)));
ops
},
}),
AggregationStage::Sort(vec![("total_sales".to_string(), SortOrder::Descending)])
];
let results = collection.aggregate(pipeline)?;
println!("Sales by category:");
for doc in results {
println!(" {}", doc);
}
// Find top customers
let pipeline = vec![
AggregationStage::Group(GroupSpecification {
id: GroupId::Field("customer".to_string()),
operations: {
let mut ops = std::collections::HashMap::new();
ops.insert("total_spent".to_string(),
GroupOperation::Sum(json!({ "$multiply": ["$price", "$quantity"] })));
ops.insert("purchase_count".to_string(), GroupOperation::Sum(json!(1)));
ops
},
}),
AggregationStage::Sort(vec![("total_spent".to_string(), SortOrder::Descending)]),
AggregationStage::Limit(3)
];
let results = collection.aggregate(pipeline)?;
println!("Top customers:");
for doc in results {
println!(" {}", doc);
}
Ok(())
}
use luckdb::Client;
use serde_json::json;
fn main() -> luckdb::Result<()> {
// Create a client with storage path
let mut client = Client::with_storage_path("mongodb://localhost", "./data");
// Load existing data if any
client.load()?;
// Get or create database and collection
let db = client.db("myapp");
let collection = db.collection("users");
// Insert a document
let doc = json!({
"name": "Alice",
"email": "alice@example.com",
"created_at": "2023-01-01T00:00:00Z"
});
let id = collection.insert(doc)?;
println!("Inserted document with ID: {}", id);
// Save data to disk
client.save()?;
println!("Data saved to disk");
// Later, load the data again
let mut client2 = Client::with_storage_path("mongodb://localhost", "./data");
client2.load()?;
let db2 = client2.db("myapp");
let collection2 = db2.collection("users");
// Query the loaded data
let results = collection2.find(luckdb::Query::new(), None)?;
println!("Loaded {} documents", results.len());
for (id, doc) in results {
println!(" {}: {}", id, doc);
}
Ok(())
}
use luckdb::{Client, Server};
use std::net::SocketAddr;
use std::path::PathBuf;
fn main() -> luckdb::Result<()> {
// Start the server in a separate thread
let server_thread = std::thread::spawn(|| {
let addr: SocketAddr = "127.0.0.1:27017".parse().unwrap();
let storage_path = Some(PathBuf::from("./data"));
let mut server = Server::new(addr, storage_path);
server.start().unwrap();
});
// Give the server time to start
std::thread::sleep(std::time::Duration::from_millis(100));
// Connect to the server
let remote_client = luckdb::RemoteClient::new("127.0.0.1:27017".parse().unwrap());
let mut connection = remote_client.connect()?;
// Send commands
let response = connection.send_command("INSERT mydb users {\"name\":\"Alice\",\"age\":30}")?;
println!("Server response: {}", response);
let response = connection.send_command("FIND mydb users {\"name\":\"Alice\"}")?;
println!("Server response: {}", response);
let response = connection.send_command("SAVE")?;
println!("Server response: {}", response);
// Close the connection
connection.close()?;
// Stop the server
let response = connection.send_command("EXIT")?;
println!("Server response: {}", response);
server_thread.join().unwrap();
Ok(())
}
Note: This is just a basic example to give you an idea of how to use LuckDB. In a real-world application, you may want to add more error handling and security features.
#![allow(warnings)]
use luckdb::{Client, Server};
use std::net::SocketAddr;
use std::path::PathBuf;
fn main() -> luckdb::Result<()> {
// Start the server in a separate thread
let server_thread = std::thread::spawn(|| {
// Create server with authentication
let addr: SocketAddr = "127.0.0.1:27017".parse().unwrap();
let storage_path = Some(PathBuf::from("./data"));
let mut server = Server::new(addr, storage_path)
.with_auth("admin".to_string(), "password123".to_string());
server.start().unwrap();
});
// Give the server time to start
std::thread::sleep(std::time::Duration::from_millis(100));
// Connect to the server
let remote_client = luckdb::RemoteClient::new("127.0.0.1:27017".parse().unwrap());
let mut connection = remote_client.connect()?;
let response = connection.send_command("AUTH admin password123")?;
println!("Authentication response: {}", response);
// Send commands
let response = connection.send_command("INSERT mydb users {\"name\":\"Alice\",\"age\":30}")?;
println!("Server response: {}", response);
let response = connection.send_command("FIND mydb users {\"name\":\"Alice\"}")?;
println!("Server response: {}", response);
let response = connection.send_command("SAVE")?;
println!("Server response: {}", response);
// Close the connection
// connection.close()?;
// Stop the server
let response = connection.send_command("EXIT")?;
println!("Server response: {}", response);
server_thread.join().unwrap();
Ok(())
}
LuckDB is licensed under the MIT License. See LICENSE for details.