| Crates.io | anthropic_rust |
| lib.rs | anthropic_rust |
| version | 0.1.3 |
| created_at | 2025-08-25 16:43:32.935626+00 |
| updated_at | 2025-09-04 06:53:49.511983+00 |
| description | A modern, type-safe Rust SDK for the Anthropic API, providing async-first access to Claude models with comprehensive error handling and streaming support. |
| homepage | https://github.com/anthropics/anthropic-sdk-rust |
| repository | https://github.com/anthropics/anthropic-sdk-rust |
| max_upload_size | |
| id | 1809799 |
| size | 621,759 |
A modern, idiomatic Rust SDK for the Anthropic API, providing type-safe, async-first access to Claude models.
Add this to your Cargo.toml:
[dependencies]
anthropic_rust = "0.1.3"
tokio = { version = "1.0", features = ["full"] }
use anthropic_rust::{Client, Model, ContentBlock};
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// Create a client (reads API key from ANTHROPIC_API_KEY env var)
let client = Client::new(Model::Claude35Sonnet20241022)?;
// Send a message
let request = client.chat_builder()
.user_message(ContentBlock::text("Hello, Claude!"))
.build();
let response = client.execute_chat(request).await?;
// Print Claude's response
for content in response.content {
if let ContentBlock::Text { text, .. } = content {
println!("Claude: {}", text);
}
}
Ok(())
}
Set your API key as an environment variable:
export ANTHROPIC_API_KEY="your-api-key-here"
Or configure it explicitly:
let client = Client::builder()
.api_key("your-api-key")
.model(Model::Claude35Sonnet20241022)
.build()?;
use anthropic_rust::{Client, Model, ContentBlock, Role, MessageParam};
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let client = Client::new(Model::Claude35Sonnet20241022)?;
// Build a conversation with history
let request = client.chat_builder()
.system("You are a helpful assistant.")
.user_message(ContentBlock::text("What's the capital of France?"))
.assistant_message(ContentBlock::text("The capital of France is Paris."))
.user_message(ContentBlock::text("What's its population?"))
.build();
let response = client.execute_chat(request).await?;
if let Some(ContentBlock::Text { text, .. }) = response.content.first() {
println!("Claude: {}", text);
}
Ok(())
}
use anthropic_rust::{Client, Model, ContentBlock, StreamEvent};
use futures::StreamExt;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let client = Client::new(Model::Claude35Sonnet20241022)?;
let request = client.chat_builder()
.user_message(ContentBlock::text("Write a short poem about Rust"))
.build();
let mut stream = client.stream_chat(request).await?;
print!("Claude: ");
while let Some(event) = stream.next().await {
match event? {
StreamEvent::ContentBlockDelta { delta, .. } => {
if let anthropic_rust::ContentDelta::TextDelta { text } = delta {
print!("{}", text);
}
}
StreamEvent::MessageStop => break,
_ => {}
}
}
println!();
Ok(())
}
use anthropic_rust::{Client, Model, ContentBlock, Tool};
use serde_json::json;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let client = Client::new(Model::Claude35Sonnet20241022)?;
// Define a tool
let weather_tool = Tool::new("get_weather")
.description("Get current weather for a location")
.schema_value(json!({
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "City name"
}
},
"required": ["location"]
}))
.build();
let request = client.chat_builder()
.user_message(ContentBlock::text("What's the weather in San Francisco?"))
.tool(weather_tool)
.build();
let response = client.execute_chat(request).await?;
// Handle tool use in response
for content in response.content {
match content {
ContentBlock::Text { text, .. } => {
println!("Claude: {}", text);
}
ContentBlock::ToolUse { name, input, .. } => {
println!("Claude wants to use tool: {} with input: {}", name, input);
// Implement your tool logic here
}
_ => {}
}
}
Ok(())
}
use anthropic_rust::{Client, Model, ContentBlock, ImageMediaType};
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let client = Client::new(Model::Claude35Sonnet20241022)?;
// Load and encode an image
let image_data = std::fs::read("path/to/image.jpg")?;
let base64_image = base64::encode(&image_data);
let request = client.chat_builder()
.user_message(ContentBlock::image_base64(
ImageMediaType::Jpeg,
base64_image
))
.user_message(ContentBlock::text("What do you see in this image?"))
.build();
let response = client.execute_chat(request).await?;
if let Some(ContentBlock::Text { text, .. }) = response.content.first() {
println!("Claude: {}", text);
}
Ok(())
}
use anthropic_rust::{Client, Model, RetryConfig};
use std::time::Duration;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let client = Client::builder()
.api_key("your-api-key")
.model(Model::Claude35Sonnet20241022)
.max_tokens(2000)
.timeout(Duration::from_secs(30))
.retry_config(RetryConfig {
max_retries: 5,
initial_delay: Duration::from_millis(1000),
max_delay: Duration::from_secs(60),
backoff_multiplier: 2.0,
})
.build()?;
// Use the configured client...
Ok(())
}
use anthropic_rust::{Client, Model, ContentBlock, types::{CountTokensRequest, MessageParam, Role}};
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let client = Client::new(Model::Claude35Sonnet20241022)?;
let request = CountTokensRequest {
messages: vec![
MessageParam {
role: Role::User,
content: vec![ContentBlock::text("How many tokens is this message?")],
}
],
system: None,
tools: None,
};
let token_count = client.count_tokens(request).await?;
println!("This message uses {} input tokens", token_count.input_tokens);
Ok(())
}
The SDK provides comprehensive error handling with detailed error types and user-friendly messages:
use anthropic_rust::{Client, Model, Error};
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let client = Client::new(Model::Claude35Sonnet20241022)?;
// ... create request ...
match client.execute_chat(request).await {
Ok(response) => {
println!("Success: {:?}", response);
}
Err(Error::Authentication(msg)) => {
eprintln!("Authentication failed: {}", msg);
// This is a configuration error - check your API key
}
Err(Error::RateLimit { retry_after, .. }) => {
eprintln!("Rate limited. Retry after: {:?}", retry_after);
// This is an integration test error - reduce request frequency
}
Err(Error::Network(err)) => {
eprintln!("Network error: {}", err);
// This is an integration test error - check connectivity
}
Err(err) => {
// Get user-friendly error message with context
eprintln!("Error: {}", err.user_message());
// Get debugging information for development
if cfg!(debug_assertions) {
eprintln!("{}", err.debug_info());
}
}
}
Ok(())
}
Errors are categorized to help with debugging and error handling:
Choose the right model for your use case:
| Model | Best For | Speed | Cost |
|---|---|---|---|
Claude3Haiku20240307 |
Simple tasks, fast responses | Fastest | Lowest |
Claude35Sonnet20241022 |
Balanced performance | Medium | Medium |
Claude3Opus20240229 |
Complex reasoning, analysis | Slower | Higher |
// Use different models for different tasks
let client = Client::new(Model::Claude35Sonnet20241022)?;
// Use Haiku for simple, fast responses
let simple_response = client.execute_chat_with_model(
Model::Claude3Haiku20240307,
simple_request
).await?;
// Use Opus for complex analysis
let complex_response = client.execute_chat_with_model(
Model::Claude3Opus20240229,
complex_request
).await?;
The SDK uses a comprehensive testing strategy with both unit tests and integration tests:
# Run all tests
cargo test
# Run only unit tests (Miri-compatible)
cargo test --lib
# Run integration tests (network-dependent)
cargo test --test integration_tests
# Run memory safety tests with Miri
cargo miri test --lib
See TESTING.md for detailed testing guidelines and patterns.
Contributions are welcome! Please feel free to submit a Pull Request. For major changes, please open an issue first to discuss what you would like to change.
After cloning the repository, set up your development environment:
# Install git hooks for automatic code quality checks
./register-hooks.sh
This will install pre-commit hooks that run formatting, linting, and tests automatically before each commit.
When contributing:
This project is licensed under the MIT License - see the LICENSE file for details.