| Crates.io | luckdb |
| lib.rs | luckdb |
| version | 0.1.4 |
| created_at | 2025-09-06 15:14:23.974503+00 |
| updated_at | 2025-11-23 12:52:11.621953+00 |
| description | A Lightweight JSON Document Database in Rust |
| homepage | http://dnrops.gitlink.net |
| repository | https://gitcode.net/dnrops/luckdb |
| max_upload_size | |
| id | 1827171 |
| size | 1,649,016 |
LuckDB is a lightweight, in-memory JSON document database written in Rust with built-in AES-256 encryption and configuration management. It provides a simple yet powerful API for storing, querying, and manipulating JSON documents with support for indexing, aggregation, persistence, and secure client-server mode.
Add LuckDB to your Cargo.toml:
[dependencies]
luckdb = "0.1.3"
The LuckDB CLI server tool is included as part of the project. Build it from source:
# Clone the repository
git clone https://gitcode.net/dnrops/luckdb
cd luckdb
# Build the CLI tool
cargo build --release --bin luckdb-server-cli
# OR
cargo install luckdb
# The CLI binary will be available at:
# Linux/macOS: target/release/luckdb-server-cli
# Windows: target/release/luckdb-server-cli.exe
Alternatively, use the convenience scripts:
# Linux/macOS
chmod +x run-server.sh
./run-server.sh --help
# Windows
run-server.bat --help
use luckdb::{Client, config::DatabaseConfig, Query};
use serde_json::json;
fn main() -> luckdb::Result<()> {
// Create a client with encryption and storage
let mut config = DatabaseConfig::with_storage_path("./data");
config.encryption_enabled = true;
config.encryption_password = Some("my_secure_password".to_string());
let mut client = Client::with_config(config)?;
// Get a database
let db = client.db("mydb");
// Get a collection
let collection = db.collection("users");
// Insert a document
let doc = json!({
"name": "Alice",
"age": 30,
"city": "New York",
"interests": ["reading", "hiking"]
});
let id = collection.insert(doc)?;
println!("Inserted document with ID: {}", id);
// Query the collection
let query = Query::new().eq("name", "Alice".into());
let results = collection.find(query, None)?;
for (id, doc) in results {
println!("Found document {}: {}", id, doc);
}
// Save with encryption
client.save()?;
Ok(())
}
use luckdb::{Client, config::DatabaseConfig};
// Create configuration with encryption
let config = DatabaseConfig::with_storage_path("./secure_data")
.with_encryption("my_password");
let mut client = Client::with_config(config)?;
// All data will be encrypted when saved to disk
client.save()?;
use luckdb::{Server, config::DatabaseConfig};
// Configure server with authentication and encryption
let server_config = DatabaseConfig::with_storage_path("./server_data")
.with_auth("admin", "secure_password")
.with_encryption("server_encryption_key");
let mut server = Server::with_config("127.0.0.1:27017".parse().unwrap(), server_config)?;
server.start()?;
Create a config.toml file to configure LuckDB:
[luckdb]
# Storage path for database files (optional)
storage_path = "./data"
# Enable AES-256 encryption (default: false)
encryption_enabled = true
# Encryption password for data protection
encryption_password = "your_secure_password_here"
# Authentication for client-server mode (optional)
auth_username = "admin"
auth_password = "your_auth_password_here"
# Server address for client-server mode (optional)
server_address = "127.0.0.1:27017"
# Auto-save interval in seconds (optional, None = manual only)
auto_save_interval_seconds = 300
# Maximum number of backup files to keep (default: 5)
max_backup_files = 10
Then load the configuration:
use luckdb::{Client, config::DatabaseConfig};
// Load configuration from file
let config = DatabaseConfig::load_from_file("config.toml")?;
let mut client = Client::with_config(config)?;
// Or use the convenience method
let mut client = Client::with_config_file("config.toml")?;
# Quick development server (in-memory, debug logging)
luckdb-server-cli --dev
# Development server with file persistence
luckdb-server-cli --storage ./dev_data --log-level debug
# Development server with encryption
luckdb-server-cli --dev --encrypt --password "dev_password"
# Create production configuration
luckdb-server-cli config --output prod.toml --config-type prod
# Edit the configuration with your secure passwords
# Edit prod.toml to set real encryption and auth passwords
# Start server with configuration
luckdb-server-cli --config prod.toml
# Quick server with authentication
luckdb-server-cli --auth --username admin --auth-password "secure_password" --storage ./data
# Server with encryption and authentication
luckdb-server-cli \
--encrypt \
--password "encryption_password" \
--auth \
--username admin \
--auth-password "auth_password" \
--storage ./secure_data
# View help
luckdb-server-cli --help
# Generate configuration files
luckdb-server-cli config --output config.toml --config-type prod
# Check version
luckdb-server-cli version
The Client is the entry point to LuckDB. It manages multiple databases with configurable storage and encryption.
// Basic in-memory client
let mut client = Client::new();
// With configuration
let config = DatabaseConfig::with_storage_path("./data")
.with_encryption("password");
let mut client = Client::with_config(config)?;
// From configuration file
let mut client = Client::with_config_file("config.toml")?;
new() -> Client: Create a basic in-memory clientwith_config(config: DatabaseConfig) -> Result<Client>: Create client with configurationwith_config_file(path: P) -> Result<Client>: Create client from TOML configuration filewith_encryption(password: S) -> Result<Self>: Enable encryption on existing clientdb(&mut self, name: &str) -> &mut Database: Get or create a databaselist_database_names(&self) -> Vec<String>: List all databasesdrop_database(&mut self, name: &str) -> Result<()>: Drop a databasesave(&self) -> Result<()>: Save all data to disk (encrypted if configured)load(&mut self) -> Result<()>: Load data from disk (decrypt if needed)config(&self) -> &DatabaseConfig: Get current configurationupdate_config(&mut self, config: DatabaseConfig) -> Result<()>: Update configurationA Database contains multiple collections.
let db = client.db("mydb");
collection(&mut self, name: &str) -> &mut Collection: Get or create a collectionlist_collection_names(&self) -> Vec<String>: List all collectionscreate_collection(&mut self, name: &str, options: Option<CreateCollectionOptions>) -> Result<()>: Create a collection with optionsdrop_collection(&mut self, name: &str) -> Result<()>: Drop a collectionstats(&self) -> Result<DatabaseStats>: Get database statisticsrun_command(&mut self, command: &Document) -> Result<Document>: Run a database commandA Collection stores JSON documents.
let collection = db.collection("users");
insert(&mut self, doc: Document) -> Result<DocId>: Insert a documentinsert_many(&mut self, docs: Vec<Document>) -> Result<Vec<DocId>>: Insert multiple documentsfind(&self, query: Query, options: Option<FindOptions>) -> Result<Vec<(DocId, Document)>>: Find documents matching a queryfind_one(&self, query: Query, options: Option<FindOptions>) -> Result<(DocId, Document)>: Find a single documentupdate_one(&mut self, query: Query, update: UpdateDocument, upsert: bool) -> Result<usize>: Update the first matching documentupdate_many(&mut self, query: Query, update: UpdateDocument) -> Result<usize>: Update all matching documentsreplace_one(&mut self, query: Query, replacement: Document, upsert: bool) -> Result<usize>: Replace the first matching documentdelete_one(&mut self, query: Query) -> Result<usize>: Delete the first matching documentdelete_many(&mut self, query: Query) -> Result<usize>: Delete all matching documentscount_documents(&self, query: Query) -> Result<usize>: Count documents matching a querycreate_index(&mut self, index: Index) -> Result<()>: Create an indexdrop_index(&mut self, name: &str) -> Result<()>: Drop an indexlist_indexes(&self) -> Result<Vec<Index>>: List all indexesaggregate(&self, pipeline: Vec<AggregationStage>) -> Result<Vec<Document>>: Run an aggregation pipelinedistinct(&self, field: &str, query: Option<Query>) -> Result<Vec<Value>>: Get distinct values for a fieldbulk_write(&mut self, operations: Vec<BulkWriteOperation>, options: Option<BulkWriteOptions>) -> Result<BulkWriteResult>: Execute bulk write operationsThe DatabaseConfig struct provides comprehensive configuration options for LuckDB.
use luckdb::config::DatabaseConfig;
// Default configuration
let config = DatabaseConfig::new();
// With storage path
let config = DatabaseConfig::with_storage_path("./data");
// With encryption
let config = DatabaseConfig::with_encryption("password");
// With authentication
let config = DatabaseConfig::new()
.with_auth("username", "password")
.with_storage_path("./data");
// Save/load configuration
config.save_to_file("config.toml")?;
let loaded_config = DatabaseConfig::load_from_file("config.toml")?;
storage_path: Option<PathBuf>: Path for database file storage (can be directory or specific file)encryption_enabled: bool: Enable/disable AES-256 encryption (default: false)encryption_password: Option<String>: Password for encryption key derivationserver_address: Option<String>: Server address for client-server modeauth_username: Option<String>: Authentication usernameauth_password: Option<String>: Authentication passwordauto_save_interval_seconds: Option<u64>: Auto-save interval in seconds (None = manual only)max_backup_files: usize: Maximum number of backup files to keep (default: 5)new() -> DatabaseConfig: Create default configurationwith_storage_path(path: P) -> DatabaseConfig: Set storage pathwith_encryption(password: S) -> DatabaseConfig: Enable encryption with passwordwith_auth<U: Into<String>, P: Into<String>>(username, password) -> DatabaseConfig: Set authenticationwith_server_mode<S: Into<String>>(address: S) -> DatabaseConfig: Set server addresssave_to_file<P: AsRef<Path>>(&self, path: P) -> Result<()>: Save config to TOML fileload_from_file<P: AsRef<Path>>(path: P) -> Result<Self>: Load config from TOML filevalidate(&self) -> Result<()>: Validate configuration settingsis_encryption_configured(&self) -> bool: Check if encryption is properly configuredis_auth_configured(&self) -> bool: Check if authentication is configuredget_or_create_storage_path(&self) -> Result<PathBuf>: Get storage path, create parent dirs if neededLuckDB provides built-in AES-256 encryption for secure data persistence.
use luckdb::encryption::{AesEncryption, EncryptionKey};
// Create encryption key from password
let key = EncryptionKey::from_password("my_password", b"salt");
// Encrypt data
let encrypted = AesEncryption::encrypt_string("sensitive data", &key)?;
// Decrypt data
let decrypted = AesEncryption::decrypt_string(&encrypted, &key)?;
The Server provides networked access to LuckDB with authentication and encryption.
use luckdb::{Server, config::DatabaseConfig};
// Create server with configuration
let config = DatabaseConfig::with_storage_path("./server_data")
.with_auth("admin", "password")
.with_encryption("server_key");
let mut server = Server::with_config("127.0.0.1:27017".parse().unwrap(), config)?;
// Start server
server.start()?;
Documents are JSON values represented by serde_json::Value.
use serde_json::json;
let doc = json!({
"name": "Alice",
"age": 30,
"city": "New York",
"interests": ["reading", "hiking"]
});
Each document automatically gets an _id field of type DocId when inserted.
Queries are built using the Query struct and its methods.
use luckdb::{Query, Value};
use serde_json::json;
// Simple equality query
let query = Query::new().eq("name", "Alice".into());
// Complex query with multiple conditions
let query = Query::new()
.eq("city", "New York".into())
.gt("age", json!(25))
.in_("interests", vec!["reading".into(), "hiking".into()]);
// Using logical operators
let query1 = Query::new().eq("city", "New York".into());
let query2 = Query::new().eq("city", "San Francisco".into());
let query = Query::new().or(vec![query1, query2]);
eq(key, value): Field equals valuene(key, value): Field not equal to valuegt(key, value): Field greater than valuegte(key, value): Field greater than or equal to valuelt(key, value): Field less than valuelte(key, value): Field less than or equal to valuein_(key, values): Field in array of valuesnin(key, values): Field not in array of valuesexists(key, exists): Field exists (or not)regex(key, pattern): Field matches regex patternand(queries): Logical AND of queriesor(queries): Logical OR of queriesnor(queries): Logical NOR of queriesnot(query): Logical NOT of queryall(key, values): Array contains all valueselem_match(key, query): Array element matches querysize(key, size): Array has specified sizenear(key, point, max_distance): Geospatial near querywithin(key, shape): Geospatial within queryintersects(key, shape): Geospatial intersects queryUpdate operations are built using the UpdateDocument struct.
use luckdb::UpdateDocument;
use serde_json::json;
let update = UpdateDocument::new()
.set("status", "active".into())
.inc("login_count", json!(1))
.push("tags", "premium".into());
set(key, value): Set field to valueunset(key): Remove fieldinc(key, value): Increment field by valuemul(key, value): Multiply field by valuerename(old_key, new_key): Rename fieldset_on_insert(key, value): Set field on insertmin(key, value): Set field to minimum of current and valuemax(key, value): Set field to maximum of current and valuecurrent_date(key, type_spec): Set field to current datepush(key, value): Push value to array fieldpush_all(key, values): Push all values to array fieldadd_to_set(key, value): Add value to array if not presentpop(key, pos): Remove first or last element of arraypull(key, condition): Remove elements matching conditionpull_all(key, values): Remove all specified values from arraybit(key, operation): Bitwise operationIndexes improve query performance.
use luckdb::{Index, IndexType};
// Create a simple index
let index = Index::new("name_index".to_string(), vec![("name".to_string(), IndexType::Ascending)]);
collection.create_index(index)?;
// Create a compound index
let index = Index::new("compound_index".to_string(), vec![
("city".to_string(), IndexType::Ascending),
("age".to_string(), IndexType::Descending)
]);
collection.create_index(index)?;
// Create a unique index
let index = Index::new("email_index".to_string(), vec![("email".to_string(), IndexType::Ascending)])
.unique(true);
collection.create_index(index)?;
Aggregation pipelines transform and analyze data.
use luckdb::{AggregationStage, GroupOperation, GroupId, SortOrder};
use serde_json::json;
let pipeline = vec![
// Match documents
AggregationStage::Match(Query::new().eq("status", "active".into())),
// Group by city and count
AggregationStage::Group(GroupSpecification {
id: GroupId::Field("city".to_string()),
operations: {
let mut ops = std::collections::HashMap::new();
ops.insert("count".to_string(), GroupOperation::Sum(json!(1)));
ops.insert("avg_age".to_string(), GroupOperation::Avg("$age".into()));
ops
},
}),
// Sort by count
AggregationStage::Sort(vec![("count".to_string(), SortOrder::Descending)]),
// Limit results
AggregationStage::Limit(10),
];
let results = collection.aggregate(pipeline)?;
use luckdb::{Client, Query, UpdateDocument};
use serde_json::json;
fn main() -> luckdb::Result<()> {
let mut client = Client::new();
let db = client.db("test");
let collection = db.collection("users");
// Create
let doc = json!({
"name": "Alice",
"age": 30,
"city": "New York",
"interests": ["reading", "hiking"]
});
let id = collection.insert(doc)?;
println!("Created document with ID: {}", id);
// Read
let query = Query::new().eq("name", "Alice".into());
let results = collection.find(query, None)?;
for (id, doc) in results {
println!("Found document {}: {}", id, doc);
}
// Update
let update = UpdateDocument::new()
.set("age", json!(31))
.push("interests", "travel".into());
let count = collection.update_one(Query::new().eq("name", "Alice".into()), update, false)?;
println!("Updated {} documents", count);
// Delete
let count = collection.delete_one(Query::new().eq("name", "Alice".into()))?;
println!("Deleted {} documents", count);
Ok(())
}
use luckdb::{Query, Value};
use serde_json::json;
fn main() -> luckdb::Result<()> {
let mut client = Client::new();
let db = client.db("test");
let collection = db.collection("users");
// Insert some test data
collection.insert(json!({"name": "Alice", "age": 30, "city": "New York", "active": true}))?;
collection.insert(json!({"name": "Bob", "age": 25, "city": "San Francisco", "active": false}))?;
collection.insert(json!({"name": "Charlie", "age": 35, "city": "New York", "active": true}))?;
collection.insert(json!({"name": "David", "age": 40, "city": "Chicago", "active": true}))?;
// Find active users in New York
let query = Query::new()
.eq("city", "New York".into())
.eq("active", Value::Bool(true));
let results = collection.find(query, None)?;
println!("Active users in New York:");
for (id, doc) in results {
println!(" {}: {}", id, doc);
}
// Find users older than 30 or inactive users
let query1 = Query::new().gt("age", json!(30));
let query2 = Query::new().eq("active", Value::Bool(false));
let query = Query::new().or(vec![query1, query2]);
let results = collection.find(query, None)?;
println!("Users older than 30 or inactive:");
for (id, doc) in results {
println!(" {}: {}", id, doc);
}
// Find users with specific interests
collection.insert(json!({
"name": "Eve",
"age": 28,
"city": "Boston",
"interests": ["reading", "coding", "hiking"]
}))?;
let query = Query::new().all("interests", vec!["reading".into(), "hiking".into()]);
let results = collection.find(query, None)?;
println!("Users with both reading and hiking interests:");
for (id, doc) in results {
println!(" {}: {}", id, doc);
}
Ok(())
}
use luckdb::{Client, Index, IndexType, Query};
use serde_json::json;
fn main() -> luckdb::Result<()> {
let mut client = Client::new();
let db = client.db("test");
let collection = db.collection("users");
// Insert test data
for i in 0..1000 {
collection.insert(json!({
"name": format!("User {}", i),
"age": i % 50 + 20,
"city": ["New York", "San Francisco", "Chicago", "Boston"][i % 4],
"active": i % 3 != 0
}))?;
}
// Create indexes
let name_index = Index::new("name_index".to_string(), vec![("name".to_string(), IndexType::Ascending)]);
collection.create_index(name_index)?;
let city_age_index = Index::new("city_age_index".to_string(), vec![
("city".to_string(), IndexType::Ascending),
("age".to_string(), IndexType::Descending)
]);
collection.create_index(city_age_index)?;
// Query using indexes
let query = Query::new().eq("city", "New York".into()).gt("age", json!(30));
let results = collection.find(query, None)?;
println!("Found {} users in New York older than 30", results.len());
// List indexes
let indexes = collection.list_indexes()?;
println!("Indexes:");
for index in indexes {
println!(" {}: {:?}", index.name, index.key);
}
Ok(())
}
use luckdb::{Client, AggregationStage, GroupOperation, GroupId, SortOrder, Query};
use serde_json::json;
fn main() -> luckdb::Result<()> {
let mut client = Client::new();
let db = client.db("test");
let collection = db.collection("sales");
// Insert test data
collection.insert(json!({
"product": "Laptop",
"category": "Electronics",
"price": 1200,
"quantity": 1,
"date": "2023-01-15",
"customer": "Alice"
}))?;
collection.insert(json!({
"product": "Phone",
"category": "Electronics",
"price": 800,
"quantity": 2,
"date": "2023-01-16",
"customer": "Bob"
}))?;
collection.insert(json!({
"product": "Desk Chair",
"category": "Furniture",
"price": 200,
"quantity": 1,
"date": "2023-01-17",
"customer": "Alice"
}))?;
collection.insert(json!({
"product": "Monitor",
"category": "Electronics",
"price": 300,
"quantity": 1,
"date": "2023-01-18",
"customer": "Charlie"
}))?;
// Calculate total sales by category
let pipeline = vec![
AggregationStage::Group(GroupSpecification {
id: GroupId::Field("category".to_string()),
operations: {
let mut ops = std::collections::HashMap::new();
ops.insert("total_sales".to_string(),
GroupOperation::Sum(json!({ "$multiply": ["$price", "$quantity"] })));
ops.insert("count".to_string(), GroupOperation::Sum(json!(1)));
ops
},
}),
AggregationStage::Sort(vec![("total_sales".to_string(), SortOrder::Descending)])
];
let results = collection.aggregate(pipeline)?;
println!("Sales by category:");
for doc in results {
println!(" {}", doc);
}
// Find top customers
let pipeline = vec![
AggregationStage::Group(GroupSpecification {
id: GroupId::Field("customer".to_string()),
operations: {
let mut ops = std::collections::HashMap::new();
ops.insert("total_spent".to_string(),
GroupOperation::Sum(json!({ "$multiply": ["$price", "$quantity"] })));
ops.insert("purchase_count".to_string(), GroupOperation::Sum(json!(1)));
ops
},
}),
AggregationStage::Sort(vec![("total_spent".to_string(), SortOrder::Descending)]),
AggregationStage::Limit(3)
];
let results = collection.aggregate(pipeline)?;
println!("Top customers:");
for doc in results {
println!(" {}", doc);
}
Ok(())
}
use luckdb::{Client, config::DatabaseConfig};
use serde_json::json;
fn main() -> luckdb::Result<()> {
// Create a client with encrypted storage
let config = DatabaseConfig::with_storage_path("./data")
.with_encryption("secure_password");
let mut client = Client::with_config(config)?;
// Load existing data if any (will decrypt automatically)
client.load()?;
// Get or create database and collection
let db = client.db("myapp");
let collection = db.collection("users");
// Insert a document
let doc = json!({
"name": "Alice",
"email": "alice@example.com",
"created_at": "2023-01-01T00:00:00Z",
"sensitive_data": "encrypted information"
});
let id = collection.insert(doc)?;
println!("Inserted document with ID: {}", id);
// Save data to disk (will be encrypted automatically)
client.save()?;
println!("Data saved to disk with encryption");
// Later, load the data again
let config2 = DatabaseConfig::with_storage_path("./data")
.with_encryption("secure_password");
let mut client2 = Client::with_config(config2)?;
client2.load()?;
let db2 = client2.db("myapp");
let collection2 = db2.collection("users");
// Query the loaded data (decrypted automatically)
let results = collection2.find(luckdb::Query::new(), None)?;
println!("Loaded {} documents", results.len());
for (id, doc) in results {
println!(" {}: {}", id, doc);
}
Ok(())
}
use luckdb::{Client, config::DatabaseConfig};
fn main() -> luckdb::Result<()> {
// Create configuration
let mut config = DatabaseConfig::new();
config.storage_path = Some("./app_data".into());
config.encryption_enabled = true;
config.encryption_password = Some("app_secret_key".to_string());
config.auth_username = Some("admin".to_string());
config.auth_password = Some("admin_password".to_string());
config.max_backup_files = 10;
// Save configuration to file
config.save_to_file("luckdb_config.toml")?;
println!("Configuration saved to luckdb_config.toml");
// Load configuration from file
let loaded_config = DatabaseConfig::load_from_file("luckdb_config.toml")?;
let mut client = Client::with_config(loaded_config)?;
// Use client with loaded configuration
let db = client.db("production");
println!("Database client ready with configuration");
Ok(())
}
LuckDB includes a comprehensive command-line interface for running production-ready servers with advanced configuration options, logging, and signal handling.
Build the CLI tool as part of the LuckDB project:
cargo build --release
# The CLI binary will be at:
# Linux/macOS: target/release/luckdb-server-cli
# Windows: target/release/luckdb-server-cli.exe
# Start server with default settings
luckdb-server-cli
# Start server with encryption and authentication
luckdb-server-cli --encrypt --password "secure_password" --auth --username admin --auth-password "auth_pass"
# Start server with configuration file
luckdb-server-cli --config config.toml
# Development mode (in-memory, debug logging)
luckdb-server-cli --dev --log-level debug
Generate configuration templates for different environments:
# Create production configuration
luckdb-server-cli config --output prod.toml --config-type prod
# Create development configuration
luckdb-server-cli config --output dev.toml --config-type dev
# Create test configuration
luckdb-server-cli config --output test.toml --config-type test
| Option | Description | Example |
|---|---|---|
--config |
Path to TOML configuration file | --config config.toml |
--address |
Server bind address | --address 127.0.0.1 |
--port |
Server port | --port 27017 |
--storage |
Storage path for database files | --storage ./data |
--encrypt |
Enable AES-256 encryption | --encrypt |
--password |
Encryption password | --password "secure_pass" |
--auth |
Enable authentication | --auth |
--username |
Authentication username | --username admin |
--auth-password |
Authentication password | --auth-password "auth_pass" |
--log-level |
Log level (error, warn, info, debug, trace) | --log-level info |
--log-file |
Log file path | --log-file server.log |
--dev |
Development mode | --dev |
--auto-save |
Auto-save interval in seconds | --auto-save 300 |
For production deployments, use a configuration file:
# 1. Create production configuration
luckdb-server-cli config --output /etc/luckdb/config.toml --config-type prod
# 2. Edit configuration with secure passwords
vim /etc/luckdb/config.toml
# 3. Set appropriate file permissions
chmod 600 /etc/luckdb/config.toml
# 4. Start server
luckdb-server-cli --config /etc/luckdb/config.toml
[luckdb]
# Production storage path
storage_path = "/var/lib/luckdb/data"
# Always enable encryption in production
encryption_enabled = true
encryption_password = "CHANGE_ME_IN_PRODUCTION"
# Authentication
auth_username = "admin"
auth_password = "CHANGE_ME_IN_PRODUCTION"
# Server address
server_address = "127.0.0.1:27017"
# Auto-save every 5 minutes
auto_save_interval_seconds = 300
# Maximum backup files
max_backup_files = 30
# Logging
log_level = "info"
log_file = "/var/log/luckdb/server.log"
All configuration options can be overridden with environment variables:
export LUCKDB_ENCRYPTION_PASSWORD="secure_password"
export LUCKDB_LOG_LEVEL="debug"
luckdb-server-cli
The CLI tool supports graceful shutdown:
For detailed documentation, see CLI_README.md.
use luckdb::encryption::{AesEncryption, EncryptionKey};
fn main() -> luckdb::Result<()> {
// Create encryption key from password
let key = EncryptionKey::from_password("my_secure_password", b"unique_salt_2024");
// Encrypt sensitive data
let sensitive_data = r#"{
"api_key": "sk-1234567890abcdef",
"database_password": "db_secret_2024",
"private_key": "-----BEGIN RSA PRIVATE KEY-----\n...\n-----END RSA PRIVATE KEY-----"
}"#;
let encrypted_data = AesEncryption::encrypt_string(sensitive_data, &key)?;
println!("Encrypted data: {}", encrypted_data);
// Decrypt data
let decrypted_data = AesEncryption::decrypt_string(&encrypted_data, &key)?;
println!("Decrypted data: {}", decrypted_data);
// Verify integrity
assert_eq!(sensitive_data, decrypted_data);
println!("Encryption/decryption successful and integrity verified");
Ok(())
}
use luckdb::{Server, config::DatabaseConfig};
use std::net::SocketAddr;
fn main() -> luckdb::Result<()> {
// Start the server in a separate thread with configuration
let server_thread = std::thread::spawn(|| {
let server_config = DatabaseConfig::with_storage_path("./server_data")
.with_auth("admin", "password123")
.with_encryption("server_secret");
let addr: SocketAddr = "127.0.0.1:27017".parse().unwrap();
let mut server = Server::with_config(addr, server_config).unwrap();
if let Err(e) = server.start() {
eprintln!("Server error: {}", e);
}
});
// Give the server time to start
std::thread::sleep(std::time::Duration::from_millis(500));
// Connect to the server (using a TCP client would go here)
// Note: RemoteClient implementation would be needed for full client-server functionality
println!("Server started with authentication and encryption");
println!("Connect to luckdb://127.0.0.1:27017");
println!("Username: admin, Password: password123");
// For demonstration, we'll use a local client instead
use luckdb::Client;
let mut local_client = Client::with_storage_path("luckdb://localhost", "./local_data")
.with_encryption("local_secret")?;
let db = local_client.db("testdb");
let collection = db.collection("local_users");
use serde_json::json;
let doc = json!({"name": "Local User", "type": "client"});
let id = collection.insert(doc)?;
local_client.save()?;
println!("Local client inserted document: {}", id);
server_thread.join().unwrap();
Ok(())
}
Note: This is just a basic example to give you an idea of how to use LuckDB. In a real-world application, you may want to add more error handling and security features.
#![allow(warnings)]
use luckdb::{Client, Server};
use std::net::SocketAddr;
use std::path::PathBuf;
fn main() -> luckdb::Result<()> {
// Start the server in a separate thread
let server_thread = std::thread::spawn(|| {
// Create server with authentication
let addr: SocketAddr = "127.0.0.1:27017".parse().unwrap();
let storage_path = Some(PathBuf::from("./data"));
let mut server = Server::new(addr, storage_path)
.with_auth("admin".to_string(), "password123".to_string());
server.start().unwrap();
});
// Give the server time to start
std::thread::sleep(std::time::Duration::from_millis(100));
// Connect to the server
let remote_client = luckdb::RemoteClient::new("127.0.0.1:27017".parse().unwrap());
let mut connection = remote_client.connect()?;
let response = connection.send_command("AUTH admin password123")?;
println!("Authentication response: {}", response);
// Send commands
let response = connection.send_command("INSERT mydb users {\"name\":\"Alice\",\"age\":30}")?;
println!("Server response: {}", response);
let response = connection.send_command("FIND mydb users {\"name\":\"Alice\"}")?;
println!("Server response: {}", response);
let response = connection.send_command("SAVE")?;
println!("Server response: {}", response);
// Close the connection
// connection.close()?;
// Stop the server
let response = connection.send_command("EXIT")?;
println!("Server response: {}", response);
server_thread.join().unwrap();
Ok(())
}
LuckDB is licensed under the MIT License. See LICENSE for details.