| Crates.io | gemini-rust |
| lib.rs | gemini-rust |
| version | 1.4.0 |
| created_at | 2025-03-25 01:10:15.892531+00 |
| updated_at | 2025-09-13 09:15:29.469871+00 |
| description | Rust client for Google Gemini API |
| homepage | |
| repository | https://github.com/flachesis/gemini-rust |
| max_upload_size | |
| id | 1604614 |
| size | 2,052,070 |
A comprehensive Rust client library for Google's Gemini 2.5 API.
serde supporttokio for high-performance async operationsAdd this to your Cargo.toml:
[dependencies]
gemini-rust = "1.4.0"
use gemini_rust::Gemini;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let api_key = std::env::var("GEMINI_API_KEY")?;
let client = Gemini::new(api_key)?;
let response = client
.generate_content()
.with_system_prompt("You are a helpful assistant.")
.with_user_message("Hello, how are you?")
.execute()
.await?;
println!("Response: {}", response.text());
Ok(())
}
use gemini_rust::Gemini;
use futures::TryStreamExt;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let client = Gemini::new(std::env::var("GEMINI_API_KEY")?)?;
let mut stream = client
.generate_content()
.with_user_message("Tell me a story about programming")
.execute_stream()
.await?;
while let Some(chunk) = stream.try_next().await? {
print!("{}", chunk.text());
}
Ok(())
}
use gemini_rust::{Gemini, FunctionDeclaration, FunctionParameters, PropertyDetails};
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let client = Gemini::new(std::env::var("GEMINI_API_KEY")?)?;
// Define a custom function
let weather_function = FunctionDeclaration::new(
"get_weather",
"Get the current weather for a location",
FunctionParameters::object()
.with_property(
"location",
PropertyDetails::string("The city and state, e.g., San Francisco, CA"),
true,
)
);
let response = client
.generate_content()
.with_user_message("What's the weather like in Tokyo?")
.with_function(weather_function)
.execute()
.await?;
// Handle function calls
if let Some(function_call) = response.function_calls().first() {
println!("Function: {}", function_call.name);
println!("Args: {}", function_call.args);
}
Ok(())
}
use gemini_rust::Gemini;
use base64::{engine::general_purpose::STANDARD as BASE64, Engine};
use std::fs;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let client = Gemini::with_model(
std::env::var("GEMINI_API_KEY")?,
"models/gemini-2.5-flash-image-preview".to_string()
)?;
let response = client
.generate_content()
.with_user_message(
"Create a photorealistic image of a cute robot sitting in a garden, \
surrounded by colorful flowers. The robot should have a friendly \
expression and be made of polished metal."
)
.execute()
.await?;
// Save generated images
for candidate in response.candidates.iter() {
if let Some(parts) = &candidate.content.parts {
for part in parts.iter() {
if let gemini_rust::Part::InlineData { inline_data } = part {
let image_bytes = BASE64.decode(&inline_data.data)?;
fs::write("generated_image.png", image_bytes)?;
println!("Image saved as generated_image.png");
}
}
}
}
Ok(())
}
use gemini_rust::{Gemini, GenerationConfig, SpeechConfig, VoiceConfig, PrebuiltVoiceConfig};
use base64::{Engine as _, engine::general_purpose};
use std::fs::File;
use std::io::Write;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let client = Gemini::with_model(
std::env::var("GEMINI_API_KEY")?,
"models/gemini-2.5-flash-preview-tts".to_string()
)?;
let generation_config = GenerationConfig {
response_modalities: Some(vec!["AUDIO".to_string()]),
speech_config: Some(SpeechConfig {
voice_config: Some(VoiceConfig {
prebuilt_voice_config: Some(PrebuiltVoiceConfig {
voice_name: "Puck".to_string(),
}),
}),
multi_speaker_voice_config: None,
}),
..Default::default()
};
let response = client
.generate_content()
.with_user_message("Hello! Welcome to Gemini text-to-speech.")
.with_generation_config(generation_config)
.execute()
.await?;
// Save generated audio as PCM format
for candidate in response.candidates.iter() {
if let Some(parts) = &candidate.content.parts {
for part in parts.iter() {
if let gemini_rust::Part::InlineData { inline_data } = part {
if inline_data.mime_type.starts_with("audio/") {
let audio_bytes = general_purpose::STANDARD.decode(&inline_data.data)?;
let mut file = File::create("speech_output.pcm")?;
file.write_all(&audio_bytes)?;
println!("Audio saved as speech_output.pcm");
println!("Convert to WAV using: ffmpeg -f s16le -ar 24000 -ac 1 -i speech_output.pcm speech_output.wav");
}
}
}
}
}
Ok(())
}
use gemini_rust::{Gemini, Tool};
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let client = Gemini::new(std::env::var("GEMINI_API_KEY")?)?;
let response = client
.generate_content()
.with_user_message("What's the latest news about Rust programming language?")
.with_tool(Tool::google_search())
.execute()
.await?;
println!("Response: {}", response.text());
Ok(())
}
use gemini_rust::Gemini;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// Use Gemini 2.5 Pro for advanced thinking capabilities
let client = Gemini::pro(std::env::var("GEMINI_API_KEY")?)?;
let response = client
.generate_content()
.with_user_message("Explain quantum computing in simple terms")
.with_dynamic_thinking() // Let model decide thinking budget
.with_thoughts_included(true) // Include thinking process
.execute()
.await?;
// Access thinking summaries
for thought in response.thoughts() {
println!("Thought: {}", thought);
}
println!("Response: {}", response.text());
Ok(())
}
use gemini_rust::{Gemini, Model, TaskType};
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let client = Gemini::with_model(
std::env::var("GEMINI_API_KEY")?,
Model::TextEmbedding004
)?;
let response = client
.embed_content()
.with_text("Hello, this is my text to embed")
.with_task_type(TaskType::RetrievalDocument)
.execute()
.await?;
println!("Embedding dimensions: {}", response.embedding.values.len());
Ok(())
}
Cache large context (system instructions, conversation history) to reduce costs and improve performance for repeated API calls:
use gemini_rust::Gemini;
use std::time::Duration;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let client = Gemini::new(std::env::var("GEMINI_API_KEY")?)?;
// Create cached content with system instruction and conversation history
let cache = client
.create_cache()
.with_display_name("My Programming Assistant")?
.with_system_instruction("You are a helpful programming assistant.")
.with_user_message("Hello! I'm learning Rust.")
.with_model_message("Great! I'm here to help you learn Rust programming.")
.with_ttl(Duration::from_secs(3600)) // Cache for 1 hour
.execute()
.await?;
// Use the cached content for subsequent requests
let response = client
.generate_content()
.with_cached_content(&cache)
.with_user_message("Can you explain ownership?")
.execute()
.await?;
println!("Response: {}", response.text());
// Clean up when done
cache.delete().await.map_err(|(_, e)| e)?;
Ok(())
}
The library supports batching multiple content generation requests into a single operation. You can execute the batch directly for a small number of requests. For larger jobs, you should use the execute_as_file() method, which serializes the requests to a JSONL file, uploads it, and initiates the batch job.
use gemini_rust::{Gemini, Message};
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let client = Gemini::new(std::env::var("GEMINI_API_KEY")?)?;
// Create multiple requests
let request1 = client
.generate_content()
.with_user_message("What is the meaning of life?")
.build();
let request2 = client
.generate_content()
.with_user_message("What is the best programming language?")
.build();
// For smaller jobs, execute directly:
let batch = client
.batch_generate_content()
.with_request(request1)
.with_request(request2)
.execute()
.await?;
// For a large number of requests, use execute_as_file():
// let batch = client
// .batch_generate_content()
// .with_requests(many_requests)
// .execute_as_file()
// .await?;
println!("Batch Name: {}", batch.name());
// The `execute()` method polls for completion.
// You can then immediately check the final status.
match batch.status().await? {
gemini_rust::BatchStatus::Succeeded { results } => {
for item in results {
match item.response {
Ok(response) => {
println!("Result for key {}: {}", item.meta.key, response.text());
}
Err(e) => {
println!("Error for key {}: {:?}", item.meta.key, e);
}
}
}
}
status => println!("Batch finished with status: {:?}", status),
}
Ok(())
}
use gemini_rust::{Gemini, Blob};
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let client = Gemini::new(std::env::var("GEMINI_API_KEY")?)?;
// Load and encode image as base64
let image_data = std::fs::read("path/to/image.jpg")?;
let base64_image = base64::encode(&image_data);
let blob = Blob::new("image/jpeg", base64_image);
let response = client
.generate_content()
.with_user_message("What's in this image?")
.with_inline_data("image/jpeg", base64_image)
.execute()
.await?;
println!("Response: {}", response.text());
Ok(())
}
use gemini_rust::{Gemini, GenerationConfig};
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let client = Gemini::new(std::env::var("GEMINI_API_KEY")?)?;
let response = client
.generate_content()
.with_user_message("Write a creative story")
.with_generation_config(GenerationConfig {
temperature: Some(0.9),
max_output_tokens: Some(1000),
top_p: Some(0.8),
top_k: Some(40),
stop_sequences: Some(vec!["END".to_string()]),
..Default::default()
})
.execute()
.await?;
println!("Response: {}", response.text());
Ok(())
}
use gemini_rust::Gemini;
// Use Gemini 2.5 Flash (default)
let client = Gemini::new(api_key)?;
// Use Gemini 2.5 Pro for advanced tasks
let client = Gemini::pro(api_key)?;
// Use specific model
let client = Gemini::with_model(api_key, "models/gemini-1.5-pro".to_string())?;
use gemini_rust::Gemini;
// Custom endpoint
let client = Gemini::with_base_url(
api_key,
"https://custom-api.example.com/v1/".parse()?
)?;
// Custom model and endpoint
let client = Gemini::with_model_and_base_url(
api_key,
"models/gemini-pro".to_string(),
"https://custom-api.example.com/v1/".parse()?
)?;
The repository includes comprehensive examples:
| Example | Description |
|---|---|
simple.rs |
Basic text generation and function calling |
advanced.rs |
Advanced content generation with all parameters |
streaming.rs |
Real-time streaming responses |
tools.rs |
Custom function declarations |
google_search.rs |
Google Search integration |
google_search_with_functions.rs |
Google Search with custom functions |
thinking_basic.rs |
Gemini 2.5 thinking mode (basic) |
thinking_advanced.rs |
Gemini 2.5 thinking mode (advanced) |
batch_generate.rs |
Batch content generation |
batch_cancel.rs |
Batch operation cancellation |
batch_delete.rs |
Batch operation deletion |
batch_list.rs |
Batch operation listing with streaming |
batch_embedding.rs |
Batch text embedding generation |
cache_basic.rs |
Cached content creation and usage |
embedding.rs |
Text embedding generation |
error_handling.rs |
Error handling examples |
blob.rs |
Image and binary data processing |
files_delete_all.rs |
Delete all files |
files_lifecycle.rs |
Full file lifecycle |
simple_image_generation.rs |
Basic text-to-image generation |
image_generation.rs |
Advanced image generation examples |
image_editing.rs |
Image editing with text prompts |
simple_speech_generation.rs |
Basic text-to-speech generation |
multi_speaker_tts.rs |
Multi-speaker text-to-speech dialogue |
structured_response.rs |
Structured JSON output |
generation_config.rs |
Custom generation parameters |
custom_base_url.rs |
Using a custom API endpoint |
curl_equivalent.rs |
Equivalent cURL commands for API calls |
Run an example:
GEMINI_API_KEY="your-api-key" cargo run --example simple
Get your API key from Google AI Studio and set it as an environment variable:
export GEMINI_API_KEY="your-api-key-here"
Model::Gemini25FlashModel::Gemini25FlashLiteModel::Gemini25ProModel::TextEmbedding004Model::Custom(String) or string literals for other modelsContributions are welcome! Please feel free to submit a Pull Request. For major changes, please open an issue first to discuss what you would like to change.
This project is licensed under the MIT License - see the LICENSE file for details.