| Crates.io | prax-duckdb |
| lib.rs | prax-duckdb |
| version | 0.5.0 |
| created_at | 2025-12-29 01:37:09.110398+00 |
| updated_at | 2026-01-07 18:42:46.651606+00 |
| description | DuckDB database driver for Prax ORM - optimized for analytical workloads |
| homepage | |
| repository | https://github.com/pegasusheavy/prax-orm |
| max_upload_size | |
| id | 2009790 |
| size | 174,413 |
DuckDB database driver for the Prax ORM, optimized for analytical workloads (OLAP).
DuckDB excels at:
For OLTP workloads (many small transactions), consider PostgreSQL or SQLite instead.
Add to your Cargo.toml:
[dependencies]
prax-duckdb = "0.3"
use prax_duckdb::{DuckDbPool, DuckDbConfig, DuckDbEngine};
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// In-memory database
let pool = DuckDbPool::new(DuckDbConfig::in_memory()).await?;
let engine = DuckDbEngine::new(pool);
// Create a table
engine.raw_sql_batch(r#"
CREATE TABLE sales (
date DATE,
region VARCHAR,
revenue DECIMAL(10,2)
);
INSERT INTO sales VALUES
('2024-01-01', 'North', 1000.00),
('2024-01-01', 'South', 1500.00),
('2024-01-02', 'North', 1200.00);
"#).await?;
// Query with aggregation
let results = engine.execute_raw(
"SELECT region, SUM(revenue) as total FROM sales GROUP BY region",
&[]
).await?;
for result in results {
println!("{}", result.json());
}
Ok(())
}
let config = DuckDbConfig::in_memory();
let config = DuckDbConfig::from_path("./analytics.duckdb")?;
let config = DuckDbConfig::from_url(
"duckdb:///path/to/db.duckdb?threads=4&memory_limit=4GB"
)?;
let config = DuckDbConfig::builder()
.path("./analytics.duckdb")
.threads(8)
.memory_limit("8GB")
.read_only()
.build();
let sql = r#"
SELECT
date,
revenue,
SUM(revenue) OVER (
PARTITION BY region
ORDER BY date
ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW
) as cumulative_revenue,
AVG(revenue) OVER (
PARTITION BY region
ORDER BY date
ROWS BETWEEN 6 PRECEDING AND CURRENT ROW
) as rolling_avg
FROM sales
"#;
let results = engine.execute_raw(sql, &[]).await?;
// Query Parquet files directly
let results = engine.query_parquet("./data/*.parquet").await?;
// Export to Parquet
engine.copy_to_parquet(
"SELECT * FROM sales WHERE date >= '2024-01-01'",
"./export.parquet"
).await?;
// Query CSV files
let results = engine.query_csv("./data.csv", true).await?; // true = has header
// Export to CSV
engine.copy_to_csv(
"SELECT * FROM sales",
"./export.csv",
true // include header
).await?;
// Query JSON files
let results = engine.query_json("./data.json").await?;
let pool = DuckDbPool::builder()
.in_memory()
.max_connections(10)
.min_connections(2)
.build()
.await?;
// Get a connection
let conn = pool.get().await?;
// Connection is automatically returned to pool when dropped
let conn = pool.get().await?;
// Manual transaction management
conn.execute_batch("BEGIN TRANSACTION").await?;
conn.execute("INSERT INTO table VALUES (?)", &[value]).await?;
conn.execute_batch("COMMIT").await?;
// Or use savepoints
conn.execute_batch("SAVEPOINT sp1").await?;
// ... operations ...
conn.execute_batch("RELEASE SAVEPOINT sp1").await?;
use prax_duckdb::{DuckDbError, DuckDbResult};
fn handle_error(result: DuckDbResult<()>) {
match result {
Ok(_) => println!("Success"),
Err(DuckDbError::Query(msg)) => println!("Query error: {}", msg),
Err(DuckDbError::Connection(msg)) => println!("Connection error: {}", msg),
Err(DuckDbError::Parquet(msg)) => println!("Parquet error: {}", msg),
Err(e) => println!("Other error: {}", e),
}
}
| Feature | Description |
|---|---|
bundled |
Bundle DuckDB library (default) |
json |
JSON extension support |
parquet |
Parquet file support |
chrono |
Chrono date/time support |
serde_json |
Serde JSON support |
uuid |
UUID support |
extensions-full |
All extensions (json, parquet, etc.) |
memory_limit to prevent OOMMIT OR Apache-2.0