| Crates.io | chatdelta |
| lib.rs | chatdelta |
| version | 0.8.0 |
| created_at | 2025-07-14 02:08:00.607506+00 |
| updated_at | 2025-09-22 17:33:41.557916+00 |
| description | A unified Rust library for connecting to multiple AI APIs with streaming, conversations, and parallel execution |
| homepage | https://github.com/ChatDelta/chatdelta-rs |
| repository | https://github.com/ChatDelta/chatdelta-rs |
| max_upload_size | |
| id | 1750985 |
| size | 267,190 |
A unified Rust library for connecting to multiple AI APIs (OpenAI, Google Gemini, Anthropic Claude) with a common interface. Supports parallel execution, conversations, streaming, retry logic, and extensive configuration options.
AiClient) for all AI providersClientConfig::builder()Add this to your Cargo.toml:
[dependencies]
chatdelta = "0.8"
tokio = { version = "1", features = ["full"] }
use chatdelta::{AiClient, ClientConfig, create_client};
use std::time::Duration;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let config = ClientConfig::builder()
.timeout(Duration::from_secs(30))
.retries(3)
.temperature(0.7)
.max_tokens(1024)
.build();
let client = create_client("openai", "your-api-key", "gpt-4o", config)?;
let response = client.send_prompt("Hello, world!").await?;
println!("{}", response);
Ok(())
}
use chatdelta::{AiClient, ClientConfig, Conversation, create_client};
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let config = ClientConfig::builder()
.system_message("You are a helpful assistant")
.temperature(0.7)
.build();
let client = create_client("anthropic", "your-api-key", "claude-3-5-sonnet-20241022", config)?;
let mut conversation = Conversation::new();
conversation.add_user("What's the capital of France?");
conversation.add_assistant("The capital of France is Paris.");
conversation.add_user("What's its population?");
let response = client.send_conversation(&conversation).await?;
println!("{}", response);
Ok(())
}
use chatdelta::{create_client, execute_parallel, ClientConfig};
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let config = ClientConfig::builder()
.retries(2)
.temperature(0.7)
.build();
let clients = vec![
create_client("openai", "openai-key", "gpt-4o", config.clone())?,
create_client("anthropic", "claude-key", "claude-3-5-sonnet-20241022", config.clone())?,
create_client("google", "gemini-key", "gemini-1.5-pro", config)?,
];
let results = execute_parallel(clients, "Explain quantum computing").await;
for (name, result) in results {
match result {
Ok(response) => println!("{}: {}", name, response),
Err(e) => eprintln!("{} failed: {}", name, e),
}
}
Ok(())
}
Two methods are available for streaming responses:
use chatdelta::{AiClient, ClientConfig, create_client};
use tokio_stream::StreamExt;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let config = ClientConfig::builder()
.temperature(0.8)
.build();
let client = create_client("openai", "your-api-key", "gpt-4o", config)?;
if client.supports_streaming() {
let mut stream = client.stream_prompt("Tell me a story").await?;
while let Some(chunk) = stream.next().await {
match chunk {
Ok(chunk) => {
print!("{}", chunk.content);
if chunk.finished {
println!("\n[Stream finished]");
break;
}
}
Err(e) => eprintln!("Stream error: {}", e),
}
}
} else {
println!("Client doesn't support streaming");
}
Ok(())
}
use chatdelta::{AiClient, ClientConfig, create_client, StreamChunk};
use tokio::sync::mpsc;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let config = ClientConfig::builder()
.temperature(0.8)
.build();
let client = create_client("openai", "your-api-key", "gpt-4o", config)?;
// Create a channel for receiving stream chunks
let (tx, mut rx) = mpsc::unbounded_channel::<StreamChunk>();
// Start streaming in a task
let client_clone = client.clone();
tokio::spawn(async move {
if let Err(e) = client_clone.send_prompt_streaming("Tell me a story", tx).await {
eprintln!("Streaming error: {}", e);
}
});
// Receive and print chunks
while let Some(chunk) = rx.recv().await {
print!("{}", chunk.content);
if chunk.finished {
println!("\n[Stream finished]");
break;
}
}
Ok(())
}
"openai", "gpt", or "chatgpt""gpt-4", "gpt-3.5-turbo", etc."google" or "gemini""gemini-1.5-pro", "gemini-1.5-flash", etc."anthropic" or "claude""claude-3-5-sonnet-20241022", "claude-3-haiku-20240307", etc.Optional features can be enabled in your Cargo.toml:
[dependencies]
chatdelta = { version = "0.8", features = ["experimental"] }
# Or specific features:
# chatdelta = { version = "0.8", features = ["orchestration", "metrics-export"] }
Available features:
orchestration: Multi-model orchestration and consensusprompt-optimization: Advanced prompt engineeringexperimental: Enables all experimental featuresmetrics-export: Prometheus and OpenTelemetry metrics exportThe ClientConfig supports extensive configuration through a builder pattern:
use chatdelta::ClientConfig;
use std::time::Duration;
let config = ClientConfig::builder()
.timeout(Duration::from_secs(60)) // Request timeout
.retries(3) // Number of retry attempts
.temperature(0.8) // Response creativity (0.0-2.0)
.max_tokens(2048) // Maximum response length
.top_p(0.9) // Top-p sampling (0.0-1.0)
.frequency_penalty(0.1) // Frequency penalty (-2.0 to 2.0)
.presence_penalty(0.1) // Presence penalty (-2.0 to 2.0)
.system_message("You are a helpful assistant") // System message for conversation context
.build();
| Parameter | Description | Default | Supported By |
|---|---|---|---|
timeout |
HTTP request timeout | 30 seconds | All |
retries |
Number of retry attempts | 0 | All |
temperature |
Response creativity (0.0-2.0) | None | All |
max_tokens |
Maximum response length | 1024 | All |
top_p |
Top-p sampling (0.0-1.0) | None | OpenAI |
frequency_penalty |
Frequency penalty (-2.0 to 2.0) | None | OpenAI |
presence_penalty |
Presence penalty (-2.0 to 2.0) | None | OpenAI |
system_message |
System message for conversations | None | All |
The library provides comprehensive error handling through the ClientError enum with detailed error types:
use chatdelta::{ClientError, ApiErrorType, NetworkErrorType};
match result {
Err(ClientError::Network(net_err)) => {
match net_err.error_type {
NetworkErrorType::Timeout => println!("Request timed out"),
NetworkErrorType::ConnectionFailed => println!("Connection failed"),
_ => println!("Network error: {}", net_err.message),
}
}
Err(ClientError::Api(api_err)) => {
match api_err.error_type {
ApiErrorType::RateLimit => println!("Rate limit exceeded"),
ApiErrorType::QuotaExceeded => println!("API quota exceeded"),
ApiErrorType::InvalidModel => println!("Invalid model specified"),
_ => println!("API error: {}", api_err.message),
}
}
Err(ClientError::Authentication(auth_err)) => {
println!("Authentication failed: {}", auth_err.message);
}
Err(ClientError::Configuration(config_err)) => {
println!("Configuration error: {}", config_err.message);
}
Err(ClientError::Parse(parse_err)) => {
println!("Parse error: {}", parse_err.message);
}
Err(ClientError::Stream(stream_err)) => {
println!("Stream error: {}", stream_err.message);
}
Ok(response) => println!("Success: {}", response),
}
This project is licensed under the MIT License - see the LICENSE file for details.
We welcome contributions! To get started, clone the repository and install the Rust toolchain. Before opening a pull request, run the following commands:
# Check formatting
cargo fmt -- --check
# Run the linter
cargo clippy -- -D warnings
# Execute tests
cargo test
This project uses GitHub Actions to run the same checks automatically on every pull request.