| Crates.io | blackman-client |
| lib.rs | blackman-client |
| version | 0.0.10 |
| created_at | 2025-10-18 00:57:11.7509+00 |
| updated_at | 2025-12-10 04:13:08.210165+00 |
| description | Official Rust client for Blackman AI - The AI API proxy that optimizes token usage |
| homepage | https://www.useblackman.ai |
| repository | https://github.com/blackman-ai/rust-sdk |
| max_upload_size | |
| id | 1888641 |
| size | 168,390 |
Official Rust client for Blackman AI - The AI API proxy that optimizes token usage to reduce costs.
Add to your Cargo.toml:
[dependencies]
blackman-client = "0.0.10"
tokio = { version = "1.0", features = ["full"] }
use blackman_client::{apis, models};
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// Configure client
let config = apis::configuration::Configuration {
base_path: "https://app.useblackman.ai".to_string(),
bearer_access_token: Some("sk_your_blackman_api_key".to_string()),
..Default::default()
};
// Create completion request
let request = models::CompletionRequest {
provider: "OpenAI".to_string(),
model: "gpt-4o".to_string(),
messages: vec![
models::Message {
role: "user".to_string(),
content: "Explain quantum computing in simple terms".to_string(),
}
],
..Default::default()
};
// Send request
let response = apis::completions_api::completions(&config, request).await?;
println!("{}", response.choices[0].message.content);
println!("Tokens used: {}", response.usage.total_tokens);
Ok(())
}
Get your API key from the Blackman AI Dashboard.
let config = apis::configuration::Configuration {
base_path: "https://app.useblackman.ai".to_string(),
bearer_access_token: Some("sk_your_blackman_api_key".to_string()),
..Default::default()
};
use axum::{
extract::Json,
response::IntoResponse,
routing::post,
Router,
};
use blackman_client::{apis, models};
use serde::{Deserialize, Serialize};
use std::sync::Arc;
#[derive(Clone)]
struct AppState {
blackman_config: Arc<apis::configuration::Configuration>,
}
#[derive(Deserialize)]
struct ChatRequest {
message: String,
}
#[derive(Serialize)]
struct ChatResponse {
response: String,
tokens: i32,
}
async fn chat_handler(
axum::extract::State(state): axum::extract::State<AppState>,
Json(payload): Json<ChatRequest>,
) -> impl IntoResponse {
let request = models::CompletionRequest {
provider: "OpenAI".to_string(),
model: "gpt-4o".to_string(),
messages: vec![
models::Message {
role: "user".to_string(),
content: payload.message,
}
],
..Default::default()
};
match apis::completions_api::completions(&state.blackman_config, request).await {
Ok(response) => {
let chat_response = ChatResponse {
response: response.choices[0].message.content.clone(),
tokens: response.usage.total_tokens,
};
Json(chat_response).into_response()
}
Err(e) => {
eprintln!("Error: {:?}", e);
axum::http::StatusCode::INTERNAL_SERVER_ERROR.into_response()
}
}
}
#[tokio::main]
async fn main() {
let config = Arc::new(apis::configuration::Configuration {
base_path: "https://app.useblackman.ai".to_string(),
bearer_access_token: Some(
std::env::var("BLACKMAN_API_KEY").expect("BLACKMAN_API_KEY not set")
),
..Default::default()
});
let state = AppState {
blackman_config: config,
};
let app = Router::new()
.route("/chat", post(chat_handler))
.with_state(state);
let listener = tokio::net::TcpListener::bind("0.0.0.0:8080").await.unwrap();
axum::serve(listener, app).await.unwrap();
}
use actix_web::{post, web, App, HttpResponse, HttpServer, Responder};
use blackman_client::{apis, models};
use serde::{Deserialize, Serialize};
use std::sync::Arc;
struct AppState {
blackman_config: Arc<apis::configuration::Configuration>,
}
#[derive(Deserialize)]
struct ChatRequest {
message: String,
}
#[derive(Serialize)]
struct ChatResponse {
response: String,
}
#[post("/chat")]
async fn chat(
data: web::Data<AppState>,
req: web::Json<ChatRequest>,
) -> impl Responder {
let request = models::CompletionRequest {
provider: "OpenAI".to_string(),
model: "gpt-4o".to_string(),
messages: vec![
models::Message {
role: "user".to_string(),
content: req.message.clone(),
}
],
..Default::default()
};
match apis::completions_api::completions(&data.blackman_config, request).await {
Ok(response) => {
let chat_response = ChatResponse {
response: response.choices[0].message.content.clone(),
};
HttpResponse::Ok().json(chat_response)
}
Err(e) => {
eprintln!("Error: {:?}", e);
HttpResponse::InternalServerError().finish()
}
}
}
#[actix_web::main]
async fn main() -> std::io::Result<()> {
let config = Arc::new(apis::configuration::Configuration {
base_path: "https://app.useblackman.ai".to_string(),
bearer_access_token: Some(
std::env::var("BLACKMAN_API_KEY").expect("BLACKMAN_API_KEY not set")
),
..Default::default()
});
let state = web::Data::new(AppState {
blackman_config: config,
});
HttpServer::new(move || {
App::new()
.app_data(state.clone())
.service(chat)
})
.bind("0.0.0.0:8080")?
.run()
.await
}
use reqwest::Client;
use std::time::Duration;
let client = Client::builder()
.timeout(Duration::from_secs(60))
.build()?;
let config = apis::configuration::Configuration {
base_path: "https://app.useblackman.ai".to_string(),
bearer_access_token: Some("sk_your_blackman_api_key".to_string()),
client,
..Default::default()
};
match apis::completions_api::completions(&config, request).await {
Ok(response) => {
println!("{}", response.choices[0].message.content);
}
Err(e) => {
eprintln!("API Error: {:?}", e);
// Handle specific error types
match e {
apis::Error::Reqwest(e) => eprintln!("Network error: {}", e),
apis::Error::Serde(e) => eprintln!("Serialization error: {}", e),
apis::Error::Io(e) => eprintln!("IO error: {}", e),
_ => eprintln!("Unknown error"),
}
}
}
use tokio::time::{sleep, Duration};
async fn completions_with_retry(
config: &apis::configuration::Configuration,
request: models::CompletionRequest,
max_retries: u32,
) -> Result<models::CompletionResponse, apis::Error> {
let mut retries = 0;
let mut delay = Duration::from_millis(100);
loop {
match apis::completions_api::completions(config, request.clone()).await {
Ok(response) => return Ok(response),
Err(e) if retries < max_retries => {
eprintln!("Request failed, retrying in {:?}... (attempt {})", delay, retries + 1);
sleep(delay).await;
retries += 1;
delay *= 2; // Exponential backoff
}
Err(e) => return Err(e),
}
}
}
use futures::future::join_all;
let tasks: Vec<_> = messages.iter().map(|msg| {
let config = config.clone();
let request = models::CompletionRequest {
provider: "OpenAI".to_string(),
model: "gpt-4o".to_string(),
messages: vec![
models::Message {
role: "user".to_string(),
content: msg.clone(),
}
],
..Default::default()
};
tokio::spawn(async move {
apis::completions_api::completions(&config, request).await
})
}).collect();
let results = join_all(tasks).await;
MIT © Blackman AI