| Crates.io | openai-sdk-rs |
| lib.rs | openai-sdk-rs |
| version | 0.1.1 |
| created_at | 2025-08-11 02:25:59.717611+00 |
| updated_at | 2025-08-11 02:36:56.456322+00 |
| description | Unofficial, minimal async OpenAI API client for Rust with support for Chat Completions, Embeddings, Responses API, and tool calling. |
| homepage | https://github.com/neeboo/openai-sdk-rs |
| repository | https://github.com/neeboo/openai-sdk-rs |
| max_upload_size | |
| id | 1789485 |
| size | 118,618 |
Unofficial, minimal, async OpenAI API client for Rust.
⚠️ Disclaimer: This is an unofficial implementation and is not affiliated with OpenAI. Use at your own discretion.
What it covers:
POST /v1/chat/completionsPOST /v1/embeddingsNot a full mirror of the API yet — intentionally small and focused.
Check out the examples/ directory for comprehensive usage examples:
cargo run --example chat - Basic chat completioncargo run --example chat_stream - Streaming chatcargo run --example responses - Basic responses APIcargo run --example responses_stream - Streaming responsescargo run --example responses_tool_call - Tool calling with functionscargo run --example responses_advanced - Advanced parameters and configurationscargo run --example images - Image generationAdd to your Cargo.toml:
[dependencies]
openai-sdk-rs = { path = "." }
Or once published:
openai-sdk-rs = "0.1"
Copy .env.example to .env and fill your values, or export env vars directly.
export OPENAI_API_KEY=sk-...
# optional scoping
export OPENAI_ORG_ID=org_...
export OPENAI_PROJECT_ID=proj_...
To use a proxy-compatible base URL:
export OPENAI_BASE_URL=https://api.openai.com/v1
Add this to your Cargo.toml:
[dependencies]
openai-sdk-rs = "0.1.0"
tokio = { version = "1.0", features = ["full"] }
Or install via cargo:
cargo add openai-sdk-rs
You'll need an OpenAI API key. Set it as an environment variable:
export OPENAI_API_KEY="your-api-key-here"
Chat:
use openai_sdk_rs::{OpenAI, types::chat::{ChatMessage, ChatCompletionRequest}};
#[tokio::main]
async fn main() -> anyhow::Result<()> {
let client = OpenAI::from_env()?;
let req = ChatCompletionRequest {
model: "gpt-4o-mini".to_string(),
messages: vec![
ChatMessage::system("You are a helpful assistant."),
ChatMessage::user("Write a haiku about Rust."),
],
..Default::default()
};
let resp = client.chat_completion(req).await?;
println!("{}", resp.first_choice_text().unwrap_or("<no text>"));
Ok(())
}
Embeddings:
use openai_sdk_rs::{OpenAI, types::embeddings::{EmbeddingsRequest, EmbeddingInput}};
#[tokio::main]
async fn main() -> anyhow::Result<()> {
let client = OpenAI::from_env()?;
let req = EmbeddingsRequest { model: "text-embedding-3-small".into(), input: EmbeddingInput::from("hello world"), user: None };
let resp = client.embeddings(req).await?;
println!("{} vectors", resp.data.len());
Ok(())
}
Streaming chat:
use openai_sdk_rs::{OpenAI, types::chat::{ChatMessage, ChatCompletionRequest}};
use futures_util::TryStreamExt;
#[tokio::main]
async fn main() -> anyhow::Result<()> {
let client = OpenAI::from_env()?;
let req = ChatCompletionRequest { model: "gpt-4o-mini".into(), messages: vec![ChatMessage::user("Stream a short line.")], ..Default::default() };
let mut stream = client.chat_completion_stream(req).await?;
while let Some(chunk) = stream.try_next().await? {
if let Some(text) = chunk.choices.get(0).and_then(|c| c.delta.content.as_deref()) {
print!("{}", text);
}
}
println!();
Ok(())
}
Responses API:
use openai_sdk_rs::{OpenAI, types::responses::ResponsesRequest};
#[tokio::main]
async fn main() -> anyhow::Result<()> {
let client = OpenAI::from_env()?;
let resp = client.responses(ResponsesRequest::text("gpt-4o-mini", "One sentence about Rust.")).await?;
println!("{}", resp.output_text().unwrap_or("<no text>".into()));
Ok(())
}
Streaming Responses:
use openai_sdk_rs::{OpenAI, types::responses::{ResponsesRequest, StreamOptions}};
use futures_util::TryStreamExt;
#[tokio::main]
async fn main() -> anyhow::Result<()> {
let client = OpenAI::from_env()?;
let mut req = ResponsesRequest::text("gpt-4o-mini", "Stream a short fact about whales.");
req.stream_options = Some(StreamOptions { include_usage: Some(true) });
let mut stream = client.responses_stream(req).await?;
while let Some(event) = stream.try_next().await? {
if let Some(text) = event.output_text {
print!("{}", text);
}
}
println!();
Ok(())
}
Aggregated streaming helpers:
use openai_sdk_rs::{OpenAI, types::chat::{ChatMessage, ChatCompletionRequest}, types::responses::ResponsesRequest};
#[tokio::main]
async fn main() -> anyhow::Result<()> {
let client = OpenAI::from_env()?;
// Chat text via streaming aggregation
let chat_text = client.chat_completion_stream_text(ChatCompletionRequest {
model: "gpt-4o-mini".into(),
messages: vec![ChatMessage::user("Say something short.")],
..Default::default()
}).await?;
println!("chat: {}", chat_text);
// Responses text via streaming aggregation
let resp_text = client.responses_stream_text(ResponsesRequest::text("gpt-4o-mini", "Stream one line.")).await?;
println!("responses: {}", resp_text);
Ok(())
}
SSE helper utilities:
use openai_sdk_rs::sse::{extract_data_lines_from_bytes, extract_json_values_from_bytes, extract_data_lines_from_str, try_extract_json_values_from_str};
let raw = b"data: {\"a\":1}\n\n:data comment\n\ndata: [DONE]\n";
let lines = extract_data_lines_from_bytes(raw);
assert_eq!(lines, vec!["{\"a\":1}".to_string()]);
let jsons = extract_json_values_from_bytes(raw);
assert_eq!(jsons[0]["a"], 1);
// String-based variants and Result-returning versions
let text = "data: {\"a\":1}\n\n";
let lines_str = extract_data_lines_from_str(text);
assert_eq!(lines_str, vec!["{\"a\":1}"]);
let jsons_str = try_extract_json_values_from_str(text).unwrap();
assert_eq!(jsons_str[0]["a"], 1);
use openai_sdk_rs::{OpenAI, types::images::{ImageGenerationRequest, ImageResponseFormat}};
#[tokio::main]
async fn main() -> anyhow::Result<()> {
let client = OpenAI::from_env()?;
let req = ImageGenerationRequest {
model: "dall-e-3".into(),
prompt: "A tiny Rust crab".into(),
n: Some(1),
size: Some("1024x1024".into()),
response_format: Some(ImageResponseFormat::B64Json)
};
let resp = client.images_generate(req).await?;
println!("variants: {}", resp.data.len());
Ok(())
}
Supply tools via ResponsesRequest.tools with JSON Schema:
use openai_sdk_rs::OpenAI;
#[tokio::main]
async fn main() -> anyhow::Result<()> {
let client = OpenAI::from_env()?;
let files = client.files_list().await?;
println!("{} files", files.data.len());
// Download and delete
if let Some(f) = files.data.first() {
let bytes = client.files_download(&f.id).await?;
println!("downloaded {} bytes from {}", bytes.len(), f.filename);
let del = client.files_delete(&f.id).await?;
println!("deleted {}: {}", del.id, del.deleted);
}
Ok(())
}
use openai_sdk_rs::types::responses::{ResponsesRequest, ToolSpec};
use serde_json::json;
let req = ResponsesRequest {
model: "gpt-4o-mini".into(),
input: Some(json!("What's the weather in SF?")),
tools: Some(vec![ToolSpec {
type_: "function".to_string(),
name: "get_weather".to_string(),
description: Some("Get weather by city".to_string()),
parameters: Some(json!({
"type": "object",
"properties": {"city": {"type": "string"}},
"required": ["city"],
})),
}]),
..Default::default()
};
// After calling `client.responses(req).await?`:
// let resp: ResponsesResponse = ...;
// for call in resp.function_calls() {
// println!("tool: {} args: {}", call.name, call.arguments);
// }
// For streaming events:
// let mut stream = client.responses_stream(req).await?;
// while let Some(ev) = stream.try_next().await? {
// for call in ev.function_calls() {
// println!("stream tool: {} args: {}", call.name, call.arguments);
// }
// }
timeout(Duration) set request timeoutmax_retries(u32) and retry_base_delay(Duration) configure retriesproxy(url) set an HTTP(S) proxy for all requestsIf you need full control over HTTP behavior (proxies, pools, TLS, UA), inject your own reqwest::Client:
use std::time::Duration;
use openai_sdk_rs::{OpenAI, types::chat::{ChatMessage, ChatCompletionRequest}};
#[tokio::main]
async fn main() -> anyhow::Result<()> {
let http = reqwest::Client::builder()
.timeout(Duration::from_secs(10))
.user_agent("my-app/0.1")
.build()?;
let api_key = std::env::var("OPENAI_API_KEY")?;
let oai = OpenAI::with_http_client(http, api_key)?;
let resp = oai.chat_completion(ChatCompletionRequest {
model: "gpt-4o-mini".into(),
messages: vec![ChatMessage::user("Hello from custom client!")],
..Default::default()
}).await?;
println!("{}", resp.first_choice_text().unwrap_or("<no text>"));
Ok(())
}
Or via the builder:
let http = reqwest::Client::builder().build()?;
let oai = OpenAI::builder()
.http_client(http)
.api_key(std::env::var("OPENAI_API_KEY")?)
.build()?;
Note: when injecting a client, builder options like timeout, proxy, and user_agent are not applied; configure them on your reqwest::Client.
MIT or Apache-2.0, at your option.