| Crates.io | ferrous-llm-openai |
| lib.rs | ferrous-llm-openai |
| version | 0.6.1 |
| created_at | 2025-07-12 06:24:19.683318+00 |
| updated_at | 2025-08-31 10:11:11.709991+00 |
| description | OpenAI provider for the LLM library |
| homepage | https://www.eurora-labs.com |
| repository | https://github.com/eurora-labs/ferrous-llm.git |
| max_upload_size | |
| id | 1749026 |
| size | 155,495 |
OpenAI provider implementation for the ferrous-llm ecosystem. This crate provides a complete implementation of OpenAI's API, including chat completions, text completions, embeddings, streaming, and tool calling capabilities.
Add this to your Cargo.toml:
[dependencies]
ferrous-llm-openai = "0.2.0"
Or use the main ferrous-llm crate with the OpenAI feature:
[dependencies]
ferrous-llm = { version = "0.2.0", features = ["openai"] }
use ferrous_llm_openai::{OpenAIConfig, OpenAIProvider};
use ferrous_llm_core::{ChatProvider, ChatRequest, Message, MessageContent, Role};
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// Load configuration from environment
let config = OpenAIConfig::from_env()?;
let provider = OpenAIProvider::new(config)?;
// Create a chat request
let request = ChatRequest {
messages: vec![
Message {
role: Role::User,
content: MessageContent::Text("Explain quantum computing".to_string()),
name: None,
tool_calls: None,
tool_call_id: None,
created_at: chrono::Utc::now(),
}
],
parameters: Default::default(),
metadata: Default::default(),
};
// Send the request
let response = provider.chat(request).await?;
println!("Response: {}", response.content());
Ok(())
}
use ferrous_llm_openai::{OpenAIConfig, OpenAIProvider};
use ferrous_llm_core::{StreamingProvider, ChatRequest};
use futures::StreamExt;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let config = OpenAIConfig::from_env()?;
let provider = OpenAIProvider::new(config)?;
let request = ChatRequest {
// ... request setup
..Default::default()
};
let mut stream = provider.chat_stream(request).await?;
while let Some(chunk) = stream.next().await {
match chunk {
Ok(data) => print!("{}", data.content()),
Err(e) => eprintln!("Stream error: {}", e),
}
}
Ok(())
}
Set these environment variables for automatic configuration:
export OPENAI_API_KEY="sk-your-api-key-here"
export OPENAI_MODEL="gpt-4" # Optional, defaults to gpt-3.5-turbo
export OPENAI_BASE_URL="https://api.openai.com/v1" # Optional
export OPENAI_ORGANIZATION="org-your-org-id" # Optional
export OPENAI_PROJECT="proj-your-project-id" # Optional
use ferrous_llm_openai::OpenAIConfig;
use std::time::Duration;
// Simple configuration
let config = OpenAIConfig::new("sk-your-api-key", "gpt-4");
// Using the builder pattern
let config = OpenAIConfig::builder()
.api_key("sk-your-api-key")
.model("gpt-4")
.organization("org-your-org-id")
.timeout(Duration::from_secs(60))
.max_retries(3)
.header("Custom-Header", "value")
.build();
// From environment with validation
let config = OpenAIConfig::from_env()?;
For OpenAI-compatible APIs (like Azure OpenAI):
let config = OpenAIConfig::builder()
.api_key("your-api-key")
.model("gpt-4")
.base_url("https://your-custom-endpoint.com/v1")?
.build();
gpt-4 - Most capable modelgpt-4-turbo - Latest GPT-4 with improved performancegpt-3.5-turbo - Fast and efficient for most tasksgpt-3.5-turbo-16k - Extended context lengthtext-embedding-ada-002 - Most capable embedding modeltext-embedding-3-small - Smaller, faster embedding modeltext-embedding-3-large - Larger, more capable embedding modeldall-e-3 - Latest image generation modeldall-e-2 - Previous generation image modelwhisper-1 - Speech-to-text transcriptiontts-1 - Text-to-speech synthesistts-1-hd - High-definition text-to-speechuse ferrous_llm_openai::{OpenAIConfig, OpenAIProvider};
use ferrous_llm_core::{ToolProvider, ChatRequest, Tool, ToolFunction};
let provider = OpenAIProvider::new(config)?;
let tools = vec![
Tool {
r#type: "function".to_string(),
function: ToolFunction {
name: "get_weather".to_string(),
description: Some("Get current weather".to_string()),
parameters: serde_json::json!({
"type": "object",
"properties": {
"location": {"type": "string"}
}
}),
},
}
];
let response = provider.chat_with_tools(request, &tools).await?;
use ferrous_llm_openai::{OpenAIConfig, OpenAIProvider};
use ferrous_llm_core::EmbeddingProvider;
let provider = OpenAIProvider::new(config)?;
let texts = vec![
"The quick brown fox".to_string(),
"jumps over the lazy dog".to_string(),
];
let embeddings = provider.embed(&texts).await?;
for embedding in embeddings {
println!("Embedding dimension: {}", embedding.vector.len());
}
use ferrous_llm_openai::{OpenAIConfig, OpenAIProvider};
use ferrous_llm_core::{ImageProvider, ImageRequest};
let provider = OpenAIProvider::new(config)?;
let request = ImageRequest {
prompt: "A futuristic city at sunset".to_string(),
n: Some(1),
size: Some("1024x1024".to_string()),
quality: Some("standard".to_string()),
style: Some("vivid".to_string()),
};
let response = provider.generate_image(request).await?;
for image in response.images() {
println!("Generated image URL: {}", image.url);
}
The crate provides comprehensive error handling:
use ferrous_llm_openai::{OpenAIError, OpenAIProvider};
use ferrous_llm_core::ErrorKind;
match provider.chat(request).await {
Ok(response) => println!("Success: {}", response.content()),
Err(e) => match e.kind() {
ErrorKind::Authentication => eprintln!("Invalid API key"),
ErrorKind::RateLimited => eprintln!("Rate limit exceeded"),
ErrorKind::InvalidRequest => eprintln!("Invalid request: {}", e),
ErrorKind::ServerError => eprintln!("OpenAI server error: {}", e),
ErrorKind::NetworkError => eprintln!("Network error: {}", e),
ErrorKind::Timeout => eprintln!("Request timeout"),
_ => eprintln!("Unknown error: {}", e),
}
}
Run the test suite:
# Unit tests
cargo test
# Integration tests (requires API key)
OPENAI_API_KEY=sk-your-key cargo test --test integration_tests
# End-to-end tests
OPENAI_API_KEY=sk-your-key cargo test --test e2e_tests
See the examples directory for complete working examples:
openai_chat.rs - Basic chat exampleopenai_chat_streaming.rs - Streaming chat exampleRun examples:
export OPENAI_API_KEY="sk-your-key"
cargo run --example openai_chat --features openai
The provider includes automatic retry logic with exponential backoff for rate-limited requests. Configure retry behavior:
let config = OpenAIConfig::builder()
.api_key("sk-your-key")
.max_retries(5) // Maximum retry attempts
.timeout(Duration::from_secs(30)) // Request timeout
.build();
This crate is compatible with:
This crate is part of the ferrous-llm workspace. See the main repository for contribution guidelines.
Licensed under the Apache License 2.0. See LICENSE for details.