| Crates.io | ollm_sdk |
| lib.rs | ollm_sdk |
| version | 0.1.5 |
| created_at | 2025-12-12 07:29:19.529627+00 |
| updated_at | 2025-12-19 10:56:21.656679+00 |
| description | A type-safe Rust SDK for the OLLM API with compile-time error checking |
| homepage | |
| repository | https://github.com/max-de-bug/OLLM-sdk |
| max_upload_size | |
| id | 1981135 |
| size | 87,322 |
A production-ready, type-safe Rust SDK for the OLLM API with compile-time error checking and zero-cost abstractions.
thiserrorAdd this to your Cargo.toml:
[dependencies]
ollm_sdk = "0.1.5"
tokio = { version = "1", features = ["full"] }
use ollm_sdk::{ChatMessage, OllmClient, Model};
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let client = OllmClient::new(
"https://api.ollm.com/v1",
"your-api-key",
);
let response = client
.chat(
vec![ChatMessage {
role: "user".to_string(),
content: "Why is the sky blue?".to_string(),
}],
Model::NearGLM46.as_str(),
)
.await?;
println!("Response: {}", response.first_content()?);
Ok(())
}
use ollm_sdk::{ChatMessage, OllmClient, Model};
let client = OllmClient::new("https://api.ollm.com/v1", "your-api-key");
let response = client
.chat(
vec![ChatMessage {
role: "user".to_string(),
content: "Hello!".to_string(),
}],
Model::NearGLM46.as_str(),
)
.await?;
println!("{}", response.first_content()?);
let messages = vec![
ChatMessage {
role: "system".to_string(),
content: "You are a helpful assistant.".to_string(),
},
ChatMessage {
role: "user".to_string(),
content: "What is Rust?".to_string(),
},
];
let response = client.chat(messages, Model::NearGLM46.as_str()).await?;
match client.chat(messages, Model::NearGLM46.as_str()).await {
Ok(response) => {
println!("Success: {}", response.first_content()?);
}
Err(ollm_sdk::OllmError::ApiError { status, message }) => {
eprintln!("API Error {}: {}", status, message);
}
Err(e) => {
eprintln!("Request failed: {}", e);
}
}
let response = client.chat(messages, model).await?;
// Get all choices
for choice in &response.choices {
println!("Role: {}", choice.message.role);
println!("Content: {}", choice.message.content);
}
// Access metadata
if let Some(usage) = &response.usage {
println!("Tokens used: {}", usage.total_tokens.unwrap_or(0));
}
use ollm_sdk::Model;
// NEAR Provider Models
Model::NearDeepSeekV31.as_str() // "near/DeepSeek-V3.1"
Model::NearGLM46.as_str() // "near/GLM-4.6"
Model::NearGptOss120b.as_str() // "near/gpt-oss-120b"
Model::NearQwen330BA3BInstruct2507.as_str() // "near/Qwen3-30B-A3B-Instruct-2507"
// Phala Provider Models
Model::PhalaDeepSeekChatV30324.as_str() // "phala/deepseek-chat-v3-0324"
Model::PhalaDeepSeekChatV31.as_str() // "phala/deepseek-chat-v3.1"
Model::PhalaDeepSeekR10528.as_str() // "phala/deepseek-r1-0528"
Model::PhalaGemma327bIt.as_str() // "phala/gemma-3-27b-it"
Model::PhalaGLM46.as_str() // "phala/glm-4.6"
Model::PhalaGptOss120b.as_str() // "phala/gpt-oss-120b"
OllmClientThe main client for making API requests.
let client = OllmClient::new(base_url: impl Into<String>, api_key: impl Into<String>);
chat()Send a chat completion request.
pub async fn chat(
&self,
messages: Vec<ChatMessage>,
model: &str,
) -> Result<ChatResponse, OllmError>
ChatResponseThe response from a chat completion request.
pub struct ChatResponse {
pub id: Option<String>,
pub object: Option<String>,
pub created: Option<u64>,
pub model: String,
pub choices: Vec<ChatChoice>,
pub usage: Option<Usage>,
}
impl ChatResponse {
pub fn first_content(&self) -> Result<&str, OllmError>;
}
The SDK uses structured error types:
pub enum OllmError {
Request(reqwest::Error), // HTTP request failed
ApiError { status: u16, message: String }, // API returned error
InvalidResponse, // Response format invalid
MissingChoice, // No choices in response
}
Without SDK (30+ lines):
let response = client
.post("https://api.ollm.com/v1/chat/completions")
.header("Authorization", "Bearer your-api-key")
.header("Content-Type", "application/json")
.json(&json!({...}))
.send()
.await?;
// ... manual error handling, JSON parsing, etc.
With SDK (3 lines):
let response = client.chat(messages, model).await?;
println!("{}", response.first_content()?);
Licensed under either of:
at your option.
Contributions are welcome! Please feel free to submit a Pull Request.