| Crates.io | diesel-timescaledb |
| lib.rs | diesel-timescaledb |
| version | 0.1.2 |
| created_at | 2025-09-03 20:54:06.74839+00 |
| updated_at | 2025-09-03 20:54:06.74839+00 |
| description | Diesel extensions for TimescaleDB functionality |
| homepage | |
| repository | https://github.com/hermes-capital-io/hermes-platform |
| max_upload_size | |
| id | 1823196 |
| size | 81,970 |
A pure Diesel extension crate that provides comprehensive TimescaleDB functionality for Rust applications. This crate seamlessly integrates TimescaleDB's powerful time-series capabilities with Diesel's type-safe query builder, offering a robust solution for time-series data management.
Add this to your Cargo.toml:
[dependencies]
diesel = { version = "2.1", features = ["postgres", "chrono"] }
diesel-timescaledb = "0.1.0"
This crate currently has no feature flags. All functionality is included by default and requires:
diesel with postgres and chrono featuresuse diesel::prelude::*;
use diesel_timescaledb::prelude::*;
// Define your table schema
table! {
metrics (id) {
id -> Int4,
timestamp -> Timestamptz,
value -> Float8,
device_id -> Text,
}
}
// Make it a hypertable
hypertable!(metrics, timestamp);
fn main() -> Result<(), Box<dyn std::error::Error>> {
// Establish connection
let mut conn = TimescaleDbConnection::establish("postgresql://user:pass@localhost/tsdb")?;
// Create hypertable
metrics::table::create_hypertable(&mut conn)?;
// Use time bucket for aggregation
let hourly_averages = metrics::table
.select((
time_bucket(Interval::from_hours(1), metrics::timestamp),
diesel::dsl::avg(metrics::value),
))
.group_by(time_bucket(Interval::from_hours(1), metrics::timestamp))
.load::<(TimestampTz, Option<f64>)>(&mut conn)?;
Ok(())
}
connectionProvides the TimescaleDbConnection wrapper that extends Diesel's PgConnection with TimescaleDB-specific capabilities.
use diesel_timescaledb::connection::TimescaleDbConnection;
let mut conn = TimescaleDbConnection::establish("postgresql://...")?;
typesTimescaleDB-specific type mappings for seamless integration with Rust's type system.
use diesel_timescaledb::types::{TimestampTz, TimeDimension};
// Work with timezone-aware timestamps
let now = TimestampTz::now();
// Define time dimensions for continuous aggregates
let dimension = TimeDimension::new("timestamp", "1 hour");
functionsComplete set of TimescaleDB SQL functions available through Diesel's expression system.
use diesel_timescaledb::functions::*;
// Time bucketing
time_bucket(interval, timestamp_column)
time_bucket_with_origin(interval, timestamp_column, origin)
// Gapfilling
time_bucket_gapfill(interval, timestamp_column)
locf(value_column) // Last observation carried forward
interpolate(value_column)
// Statistics
first(value, timestamp) // First value in time range
last(value, timestamp) // Last value in time range
histogram(value, min, max, buckets)
// Approximation functions
approx_percentile(value, percentile)
percentile_agg(value)
dslQuery DSL extensions that make time-series queries more ergonomic.
use diesel_timescaledb::dsl::*;
// Use the TimeSeriesAggregation pattern
let aggregation = TimeSeriesAggregation::new(
"metrics",
"timestamp",
"value",
"1 hour"
)?;
// Chain time-series specific operations
let result = metrics::table
.time_bucket(metrics::timestamp, "1 hour")
.filter(metrics::timestamp.gt(now - 1.week()))
.load(&mut conn)?;
schemaUtilities for managing TimescaleDB schema objects like hypertables and continuous aggregates.
use diesel_timescaledb::schema::*;
// Configure continuous aggregates
let config = ContinuousAggregateConfig {
name: "hourly_metrics",
view_query: "SELECT time_bucket('1 hour', timestamp), avg(value) FROM metrics",
materialized_only: false,
refresh_interval: Some("1 hour"),
};
// Create the continuous aggregate
create_continuous_aggregate(&mut conn, config)?;
hypertable!Macro for declaring a table as a hypertable and generating helper methods.
// Declare a hypertable
hypertable!(metrics, timestamp);
// This generates:
// - metrics::table::create_hypertable(&mut conn)
// - metrics::table::drop_hypertable(&mut conn)
// - metrics::table::add_compression_policy(&mut conn, "7 days")
// - metrics::table::add_retention_policy(&mut conn, "1 year")
use diesel_timescaledb::prelude::*;
// Define a continuous aggregate for hourly statistics
let hourly_stats = ContinuousAggregateConfig {
name: "metrics_hourly",
view_query: r#"
SELECT
time_bucket('1 hour', timestamp) AS hour,
device_id,
avg(value) as avg_value,
max(value) as max_value,
min(value) as min_value,
count(*) as sample_count
FROM metrics
GROUP BY hour, device_id
"#,
materialized_only: false,
refresh_interval: Some("30 minutes"),
};
// Create the aggregate
create_continuous_aggregate(&mut conn, hourly_stats)?;
// Query the aggregate
table! {
metrics_hourly (hour, device_id) {
hour -> Timestamptz,
device_id -> Text,
avg_value -> Float8,
max_value -> Float8,
min_value -> Float8,
sample_count -> Int8,
}
}
let recent_stats = metrics_hourly::table
.filter(metrics_hourly::hour.gt(now - 1.day()))
.load(&mut conn)?;
// Add compression policy to compress chunks older than 7 days
metrics::table::add_compression_policy(&mut conn, "7 days")?;
// Manually compress specific chunks
compress_chunk(&mut conn, "metrics", older_than = "30 days")?;
// Automatically drop data older than 1 year
metrics::table::add_retention_policy(&mut conn, "1 year")?;
// Configure cascading policies
let retention_config = RetentionPolicy {
table: "metrics",
drop_after: "1 year",
cascade_to_continuous_aggregates: true,
};
apply_retention_policy(&mut conn, retention_config)?;
use diesel_timescaledb::functions::*;
// Fill gaps in time-series data
let filled_data = metrics::table
.select((
time_bucket_gapfill(
Interval::from_hours(1),
metrics::timestamp,
now - 1.day(),
now,
),
locf(diesel::dsl::avg(metrics::value)), // Last observation carried forward
))
.group_by(time_bucket_gapfill(
Interval::from_hours(1),
metrics::timestamp,
now - 1.day(),
now,
))
.load::<(TimestampTz, Option<f64>)>(&mut conn)?;
use diesel_timescaledb::types::TimestampTz;
use chrono::{DateTime, Utc, TimeZone};
use chrono_tz::US::Pacific;
// Convert between time zones
let utc_time = TimestampTz::now();
let pacific_time = Pacific.from_utc_datetime(&utc_time.naive_utc());
// Query with timezone-aware timestamps
let results = metrics::table
.filter(metrics::timestamp.between(
TimestampTz::from_utc(start_time),
TimestampTz::from_utc(end_time),
))
.load(&mut conn)?;
// Configure chunk time interval for optimal performance
alter_chunk_time_interval(&mut conn, "metrics", "6 hours")?;
// Create optimized indexes for time-series queries
diesel::sql_query(
"CREATE INDEX ON metrics (device_id, timestamp DESC) WHERE timestamp > NOW() - INTERVAL '7 days'"
).execute(&mut conn)?;
-- Enable TimescaleDB extension
CREATE EXTENSION IF NOT EXISTS timescaledb;
-- Verify installation
SELECT extversion FROM pg_extension WHERE extname = 'timescaledb';
All operations return Result types with detailed error information:
use diesel_timescaledb::error::TimescaleDbError;
match metrics::table::create_hypertable(&mut conn) {
Ok(_) => println!("Hypertable created successfully"),
Err(TimescaleDbError::AlreadyExists) => println!("Hypertable already exists"),
Err(e) => eprintln!("Error creating hypertable: {}", e),
}
Contributions are welcome! Please feel free to submit a Pull Request. For major changes, please open an issue first to discuss what you would like to change.
# Clone the repository
git clone https://github.com/hermes-capital-io/hermes-platform.git
cd hermes-platform/Crates/diesel-timescaledb
# Run tests
cargo test
# Run tests with all features
cargo test --all-features
# Build documentation
cargo doc --open
# Run linting
cargo clippy --all-targets --all-features
Tests require a PostgreSQL database with TimescaleDB installed. Set the DATABASE_URL environment variable:
export DATABASE_URL=postgresql://user:password@localhost/test_db
cargo test
Check out the examples directory for more comprehensive examples:
basic_usage.rs - Getting started with hypertables and time bucketingcontinuous_aggregates.rs - Working with continuous aggregatescompression.rs - Implementing compression strategiesgapfilling.rs - Handling missing data pointsRun examples with:
cargo run --example basic_usage
This project is dual-licensed under either:
at your option.
For issues and questions:
Part of the Hermes Platform - High-performance financial infrastructure built with Rust.