| Crates.io | mecha10-ai-llm |
| lib.rs | mecha10-ai-llm |
| version | 0.1.25 |
| created_at | 2025-11-24 18:43:19.985468+00 |
| updated_at | 2026-01-01 02:05:06.791193+00 |
| description | Large Language Model integration for Mecha10 - Claude, GPT-4, Gemini, and local models |
| homepage | |
| repository | https://github.com/mecha10/mecha10 |
| max_upload_size | |
| id | 1948384 |
| size | 109,973 |
Large Language Model (LLM) integration for Mecha10, providing behavior nodes that can interact with various LLM providers including Claude, OpenAI, and local models.
Add to your Cargo.toml:
[dependencies]
mecha10-ai-llm = "0.1"
use mecha10_ai_llm::prelude::*;
async fn example(ctx: &Context) -> anyhow::Result<()> {
// Create an LLM node with Claude
let mut llm = LlmNode::builder()
.provider("claude")
.api_key(std::env::var("ANTHROPIC_API_KEY")?)
.model("claude-3-5-sonnet-20241022")
.system_prompt("You are a helpful robotics assistant.")
.temperature(0.7)
.max_tokens(1024)
.build()?;
// Initialize the node
llm.on_init(ctx).await?;
// Execute (tick) the node
let status = llm.tick(ctx).await?;
println!("LLM status: {}", status);
// Get the response
if let Some(response) = llm.last_response() {
println!("Response: {}", response.content);
if let Some(usage) = &response.usage {
println!("Tokens used: {} in, {} out",
usage.input_tokens, usage.output_tokens);
println!("Estimated cost: ${:.4}",
usage.estimate_claude_cost(&llm.config().model));
}
}
Ok(())
}
use mecha10_ai_llm::prelude::*;
async fn conversation_example(ctx: &Context) -> anyhow::Result<()> {
let mut llm = LlmNode::builder()
.provider("openai")
.api_key(std::env::var("OPENAI_API_KEY")?)
.model("gpt-4")
.system_prompt("You are a robot navigation assistant.")
.build()?;
llm.on_init(ctx).await?;
// Add messages to conversation
llm.add_message("user", "What's the best path planning algorithm?");
llm.tick(ctx).await?;
llm.add_message("user", "Can you explain A* in detail?");
llm.tick(ctx).await?;
// History is automatically maintained
println!("Conversation history: {} messages", llm.history().len());
Ok(())
}
use mecha10_ai_llm::prelude::*;
async fn provider_example() -> anyhow::Result<()> {
// Create provider
let provider = ClaudeProvider::new(std::env::var("ANTHROPIC_API_KEY")?);
// Prepare configuration
let config = LlmConfig {
model: "claude-3-5-sonnet-20241022".to_string(),
temperature: 0.8,
max_tokens: 512,
system_prompt: Some("You are a robotics expert.".to_string()),
};
// Create messages
let messages = vec![
LlmMessage::user("Explain PID control"),
];
// Get completion
let response = provider.complete(&messages, &config).await?;
println!("Response: {}", response.content);
Ok(())
}
use mecha10_ai_llm::prelude::*;
async fn local_llm_example(ctx: &Context) -> anyhow::Result<()> {
let mut llm = LlmNode::builder()
.provider("local")
.endpoint("http://localhost:11434/api/chat")
.model("llama3.2")
.temperature(0.5)
.build()?;
llm.on_init(ctx).await?;
llm.add_message("user", "Hello, local LLM!");
llm.tick(ctx).await?;
Ok(())
}
let provider = ClaudeProvider::new("your-api-key")
.with_endpoint("https://api.anthropic.com/v1/messages");
Supported models:
claude-3-5-sonnet-20241022 (recommended)claude-3-opus-20240229claude-3-sonnet-20240229claude-3-haiku-20240307let provider = OpenAIProvider::new("your-api-key")
.with_endpoint("https://api.openai.com/v1/chat/completions");
Supported models:
gpt-4-turbogpt-4gpt-3.5-turbolet provider = LocalProvider::new("http://localhost:11434/api/chat");
// Or use convenience constructor:
let provider = LocalProvider::ollama();
Any Ollama-compatible model can be used.
pub struct LlmConfig {
/// Model identifier
pub model: String,
/// Temperature for sampling (0.0 = deterministic, 2.0 = very random)
pub temperature: f32,
/// Maximum tokens to generate
pub max_tokens: u32,
/// System prompt (optional)
pub system_prompt: Option<String>,
}
pub struct LlmUsage {
pub input_tokens: u32,
pub output_tokens: u32,
}
impl LlmUsage {
/// Estimate cost for Claude models
pub fn estimate_claude_cost(&self, model: &str) -> f64;
/// Estimate cost for OpenAI models
pub fn estimate_openai_cost(&self, model: &str) -> f64;
}
Cost estimation is based on current pricing (as of January 2025) and helps with budget tracking.
The LlmNode implements BehaviorNode, so it can be used in any behavior tree composition:
use mecha10_ai_llm::prelude::*;
use mecha10_behavior_runtime::prelude::*;
async fn behavior_tree_example(ctx: &Context) -> anyhow::Result<()> {
let llm = LlmNode::builder()
.provider("claude")
.api_key(std::env::var("ANTHROPIC_API_KEY")?)
.model("claude-3-5-sonnet-20241022")
.build()?;
// Use in a sequence with other behaviors
let sequence = SequenceNode::new()
.add_child(Box::new(llm))
.add_child(Box::new(other_behavior));
let mut executor = BehaviorExecutor::new(Box::new(sequence));
executor.run(ctx).await?;
Ok(())
}
All LLM operations return anyhow::Result for robust error handling:
match llm.tick(ctx).await {
Ok(NodeStatus::Success) => {
println!("LLM completed successfully");
}
Ok(NodeStatus::Running) => {
println!("LLM is still processing");
}
Ok(NodeStatus::Failure) => {
println!("LLM failed to complete");
}
Err(e) => {
eprintln!("Error: {}", e);
}
}
Common errors:
Never hardcode API keys. Use environment variables:
let api_key = std::env::var("ANTHROPIC_API_KEY")
.expect("ANTHROPIC_API_KEY must be set");
Monitor token usage to control costs:
if let Some(usage) = response.usage {
let cost = usage.estimate_claude_cost(&config.model);
if cost > 0.10 {
println!("Warning: High cost detected: ${:.2}", cost);
}
}
Clear history periodically to avoid context length limits:
llm.clear_history();
Use system prompts to set behavior and constraints:
.system_prompt("You are a robot safety monitor. Always prioritize safety.
Respond in JSON format with: {action: string, confidence: number}")
reqwest for HTTP with connection poolingSee the examples/ directory for more complete examples:
examples/llm-basic/ - Simple question-answeringexamples/llm-conversation/ - Multi-turn conversationsexamples/llm-robot-control/ - LLM-guided robot behaviormecha10-behavior-runtime - Core behavior tree executionmecha10-behavior-patterns - Advanced behavior patternsmecha10-planning - Path planning algorithmsPart of the Mecha10 framework. See repository root for license information.