Crates.io | ai-types |
lib.rs | ai-types |
version | 0.2.0 |
created_at | 2025-06-29 15:24:11.682356+00 |
updated_at | 2025-07-02 17:14:30.472416+00 |
description | Providing unified trait abstractions for AI models |
homepage | |
repository | https://github.com/lexoliu/ai-types |
max_upload_size | |
id | 1730832 |
size | 168,496 |
Write AI applications that work with any provider π
Unified trait abstractions for AI models in Rust. Switch between OpenAI, Anthropic, local models, and more without changing your application logic.
βββββββββββββββββββ ββββββββββββββββββββ βββββββββββββββββββ
β Your App βββββΆβ ai-types ββββββ Providers β
β β β (this crate) β β β
β - Chat bots β β β β - openai β
β - Search β β - LanguageModel β β - anthropic β
β - Content gen β β - EmbeddingModel β β - llama.cpp β
β - Voice apps β β - ImageGenerator β β - whisper β
βββββββββββββββββββ ββββββββββββββββββββ βββββββββββββββββββ
async
/await
and streamingCapability | Trait | Description |
---|---|---|
Language Models | LanguageModel |
Text generation, conversations, streaming |
Text Streaming | TextStream |
Unified interface for streaming text responses |
Embeddings | EmbeddingModel |
Convert text to vectors for semantic search |
Image Generation | ImageGenerator |
Create images with progressive quality |
Text-to-Speech | AudioGenerator |
Generate speech audio from text |
Speech-to-Text | AudioTranscriber |
Transcribe audio to text |
Content Moderation | Moderation |
Detect policy violations |
[dependencies]
ai-types = "0.0.1"
use ai_types::{LanguageModel, llm::{Message, Request}};
use futures_lite::StreamExt;
async fn chat_example(model: impl LanguageModel) -> ai_types::Result {
let messages = [
Message::system("You are a helpful assistant"),
Message::user("What's the capital of France?")
];
let request = Request::new(messages);
let mut response = model.respond(request);
let mut full_response = String::new();
while let Some(chunk) = response.next().await {
full_response.push_str(&chunk?);
}
Ok(full_response)
}
The TextStream
trait provides a unified interface for streaming text responses from language models. It implements both Stream
for chunk-by-chunk processing and IntoFuture
for collecting the complete response.
use ai_types::{TextStream, LanguageModel, llm::{Request, Message}};
use futures_lite::StreamExt;
// Process text as it streams in
async fn process_streaming_response(model: impl LanguageModel) -> ai_types::Result {
let request = Request::new([Message::user("Write a poem about Rust")]);
let mut stream = model.respond(request);
let mut full_poem = String::new();
while let Some(chunk) = stream.next().await {
let text = chunk?;
print!("{}", text); // Display each chunk as it arrives
full_poem.push_str(&text);
}
Ok(full_poem)
}
// Collect complete response using IntoFuture
async fn get_complete_response(model: impl LanguageModel) -> ai_types::Result {
let request = Request::new([Message::user("Explain quantum computing")]);
let stream = model.respond(request);
// TextStream implements IntoFuture, so you can await it directly
let complete_explanation = stream.await?;
Ok(complete_explanation)
}
// Generic function that works with any TextStream
async fn stream_to_completion<S: TextStream>(stream: S) -> Result<String, S::Error> {
// Either collect manually...
let mut result = String::new();
let mut stream = stream;
while let Some(chunk) = stream.next().await {
result.push_str(&chunk?);
}
Ok(result)
// ...or use the built-in IntoFuture implementation
// stream.await
}
use ai_types::{LanguageModel, llm::{Message, Request, Tool}};
use serde::{Deserialize, Serialize};
use schemars::JsonSchema;
#[derive(JsonSchema, Deserialize, Serialize)]
struct WeatherQuery {
location: String,
units: Option<String>,
}
struct WeatherTool;
impl Tool for WeatherTool {
const NAME: &str = "get_weather";
const DESCRIPTION: &str = "Get current weather for a location";
type Arguments = WeatherQuery;
async fn call(&mut self, args: Self::Arguments) -> ai_types::Result {
Ok(format!("Weather in {}: 22Β°C, sunny", args.location))
}
}
async fn weather_bot(model: impl LanguageModel) -> ai_types::Result {
let request = Request::new([
Message::user("What's the weather like in Tokyo?")
]).with_tool(WeatherTool);
let response: String = model.generate(request).await?;
Ok(response)
}
use ai_types::EmbeddingModel;
async fn find_similar_docs(
model: impl EmbeddingModel,
query: &str,
) -> ai_types::Result<Vec<f32>> {
let query_embedding = model.embed(query).await?;
println!("Embedding dimension: {}", query_embedding.len());
Ok(query_embedding)
}
use ai_types::{ImageGenerator, image::{Prompt, Size}};
use futures_lite::StreamExt;
async fn generate_image(generator: impl ImageGenerator) -> ai_types::Result<Vec<u8>> {
let prompt = Prompt::new("A beautiful sunset over mountains");
let size = Size::square(1024);
let mut image_stream = generator.create(prompt, size);
let mut final_image = Vec::new();
while let Some(image_result) = image_stream.next().await {
final_image = image_result?;
println!("Received image update, {} bytes", final_image.len());
}
Ok(final_image)
}
MIT License - see LICENSE for details.