| Crates.io | ai-sdk-core |
| lib.rs | ai-sdk-core |
| version | 0.3.0 |
| created_at | 2025-11-13 04:55:20.526915+00 |
| updated_at | 2025-11-23 06:16:21.028984+00 |
| description | High-level APIs for AI SDK - text generation, embeddings, and tool execution |
| homepage | |
| repository | https://github.com/khongtrunght/ai-sdk-rust |
| max_upload_size | |
| id | 1930404 |
| size | 318,872 |
High-level, ergonomic APIs for working with AI models in Rust. This crate provides simple, composable functions for text generation, streaming, embeddings, and tool calling.
generate_text() API with builder patternstream_text()embed() and embed_many()Add this to your Cargo.toml:
[dependencies]
ai-sdk-core = "0.1"
ai-sdk-openai = "0.1" # Or any other provider
ai-sdk-provider = "0.1"
use ai_sdk_core::generate_text;
use ai_sdk_openai::openai;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let api_key = std::env::var("OPENAI_API_KEY")?;
let result = generate_text()
.model(openai("gpt-4").api_key(api_key))
.prompt("Explain quantum computing in simple terms")
.execute()
.await?;
println!("Response: {}", result.text());
println!("Usage: {:?}", result.usage());
Ok(())
}
use ai_sdk_core::stream_text;
use ai_sdk_openai::openai;
use futures::StreamExt;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let api_key = std::env::var("OPENAI_API_KEY")?;
let mut stream = stream_text()
.model(openai("gpt-4").api_key(api_key))
.prompt("Write a haiku about Rust")
.execute()
.await?;
while let Some(chunk) = stream.next().await {
match chunk? {
ai_sdk_core::StreamPart::TextDelta(text) => {
print!("{}", text);
}
ai_sdk_core::StreamPart::FinishReason(reason) => {
println!("\n\nFinished: {:?}", reason);
}
_ => {}
}
}
Ok(())
}
use ai_sdk_core::{generate_text, Tool, ToolContext, ToolError};
use ai_sdk_openai::openai;
use async_trait::async_trait;
use std::sync::Arc;
struct WeatherTool;
#[async_trait]
impl Tool for WeatherTool {
fn name(&self) -> &str {
"get_weather"
}
fn description(&self) -> &str {
"Get the current weather for a location"
}
fn input_schema(&self) -> serde_json::Value {
serde_json::json!({
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city and state, e.g. San Francisco, CA"
}
},
"required": ["location"]
})
}
async fn execute(
&self,
input: serde_json::Value,
_ctx: &ToolContext,
) -> Result<serde_json::Value, ToolError> {
let location = input["location"].as_str().unwrap_or("Unknown");
Ok(serde_json::json!({
"location": location,
"temperature": 72,
"condition": "Sunny"
}))
}
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let api_key = std::env::var("OPENAI_API_KEY")?;
let result = generate_text()
.model(openai("gpt-4").api_key(api_key))
.prompt("What's the weather in San Francisco?")
.tools(vec![Arc::new(WeatherTool)])
.max_tool_calls(5)
.execute()
.await?;
println!("Response: {}", result.text());
println!("Tool calls made: {}", result.tool_calls().len());
Ok(())
}
use ai_sdk_core::embed;
use ai_sdk_openai::openai_embedding;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let api_key = std::env::var("OPENAI_API_KEY")?;
let result = embed()
.model(openai_embedding("text-embedding-3-small").api_key(api_key))
.value("Hello, world!")
.execute()
.await?;
println!("Embedding dimension: {}", result.embedding().len());
Ok(())
}
use ai_sdk_core::embed_many;
use ai_sdk_openai::openai_embedding;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let api_key = std::env::var("OPENAI_API_KEY")?;
let texts = vec![
"First document",
"Second document",
"Third document",
];
let result = embed_many()
.model(openai_embedding("text-embedding-3-small").api_key(api_key))
.values(texts)
.execute()
.await?;
for (i, embedding) in result.embeddings().iter().enumerate() {
println!("Document {}: {} dimensions", i, embedding.len());
}
Ok(())
}
Control when text generation should stop:
use ai_sdk_core::{generate_text, StopCondition};
use ai_sdk_openai::openai;
let result = generate_text()
.model(openai("gpt-4").api_key(api_key))
.prompt("Count from 1 to 100")
.stop_condition(StopCondition::MaxTokens(50))
.execute()
.await?;
Configure automatic retries with exponential backoff:
use ai_sdk_core::{generate_text, RetryConfig};
use ai_sdk_openai::openai;
use std::time::Duration;
let result = generate_text()
.model(openai("gpt-4").api_key(api_key))
.prompt("Hello")
.retry(RetryConfig {
max_retries: 3,
initial_delay: Duration::from_secs(1),
max_delay: Duration::from_secs(10),
})
.execute()
.await?;
This crate is part of the AI SDK for Rust workspace:
This crate is part of the AI SDK for Rust workspace.
We welcome contributions! Please see the Contributing Guide for guidelines.
Licensed under either of:
at your option.