| Crates.io | ggen-ai |
| lib.rs | ggen-ai |
| version | 5.0.2 |
| created_at | 2025-10-30 05:01:52.08155+00 |
| updated_at | 2025-12-21 05:02:16.503508+00 |
| description | Thin wrapper around genai for ggen - LLM integration with environment support |
| homepage | https://github.com/seanchatmangpt/ggen |
| repository | https://github.com/seanchatmangpt/ggen |
| max_upload_size | |
| id | 1907724 |
| size | 988,803 |
AI-powered code generation capabilities for ggen - Unified LLM integration using rust-genai for intelligent template generation, SPARQL queries, and RDF graph operations.
Major Update: Complete migration from custom LLM clients to rust-genai for production-ready multi-provider AI integration.
# Add to your Cargo.toml
[dependencies]
ggen-ai = "1.0"
dotenvy = "0.15" # For environment configuration
tokio = { version = "1.0", features = ["full"] }
use ggen_ai::{LlmClient, TemplateGenerator, LlmConfig};
use ggen_ai::client::GenAiClient;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// Load environment configuration
dotenvy::dotenv().ok();
// Configure LLM client (supports OpenAI, Anthropic, Ollama)
let config = LlmConfig {
model: "gpt-4o".to_string(),
max_tokens: Some(4096),
temperature: Some(0.7),
top_p: Some(0.9),
stop: None,
extra: std::collections::HashMap::new(),
};
// Initialize unified LLM client
let client = GenAiClient::new(config)?;
let generator = TemplateGenerator::new(Box::new(client));
// Generate template from description
let template = generator.generate_template(
"Generate a REST API controller for user management",
vec!["Include CRUD operations", "Use TypeScript"]
).await?;
println!("Generated template: {}", template);
Ok(())
}
# Generate template using AI
ggen ai generate -d "Database model" --provider openai --model gpt-4o
# Generate SPARQL query from graph
ggen ai sparql -d "Find all users" -g schema.ttl --provider anthropic
# Generate RDF ontology
ggen ai graph -d "E-commerce ontology" -o products.ttl --provider ollama
# Start MCP server for AI tools
ggen ai server --provider openai --model gpt-4o
use ggen_ai::{LlmClient, LlmConfig, LlmResponse, LlmChunk, UsageStats};
use ggen_ai::client::GenAiClient;
// Configuration for all LLM providers
#[derive(Debug, Clone)]
pub struct LlmConfig {
pub model: String,
pub max_tokens: Option<u32>,
pub temperature: Option<f32>,
pub top_p: Option<f32>,
pub stop: Option<Vec<String>>,
pub extra: HashMap<String, Value>,
}
// Response from LLM completion
#[derive(Debug, Clone)]
pub struct LlmResponse {
pub content: String,
pub usage: Option<UsageStats>,
pub model: String,
pub finish_reason: Option<String>,
pub extra: HashMap<String, Value>,
}
// Streaming chunk from LLM
#[derive(Debug, Clone)]
pub struct LlmChunk {
pub content: String,
pub model: String,
pub finish_reason: Option<String>,
pub usage: Option<UsageStats>,
pub extra: HashMap<String, Value>,
}
// Usage statistics
#[derive(Debug, Clone)]
pub struct UsageStats {
pub prompt_tokens: u32,
pub completion_tokens: u32,
pub total_tokens: u32,
}
use ggen_ai::generators::TemplateGenerator;
// Create generator with LLM client
let generator = TemplateGenerator::new(Box::new(client));
// Generate REST API controller
let template = generator.generate_rest_controller(
"User management API",
"TypeScript",
"Express"
).await?;
// Generate data model
let template = generator.generate_data_model(
"User entity",
"Rust"
).await?;
// Generate from natural language description
let template = generator.generate_template(
"E-commerce system with payment processing",
vec!["Include inventory management", "Add user registration"]
).await?;
use ggen_ai::generators::SparqlGenerator;
use ggen_core::Graph;
// Create generator with LLM client
let generator = SparqlGenerator::new(Box::new(client));
// Generate query from natural language intent
let query = generator.generate_query(
&graph,
"Find all users with email addresses"
).await?;
// Generate query with specific intent
let query = generator.generate_query_with_intent(
&graph,
"Find all people and their properties"
).await?;
use ggen_ai::generators::OntologyGenerator;
// Create generator with LLM client
let generator = OntologyGenerator::new(Box::new(client));
// Generate ontology from domain description
let ontology = generator.generate_ontology(
"E-commerce system",
vec!["Include Product and Customer classes", "Add Order relationships"]
).await?;
// Generate domain-specific ontology
let ontology = generator.generate_domain_ontology(
"Healthcare",
vec!["Patient", "Doctor", "Appointment"],
vec!["hasAppointment", "treats", "schedules"]
).await?;
use ggen_ai::generators::RefactorAssistant;
// Create refactoring assistant with LLM client
let assistant = RefactorAssistant::new(Box::new(client));
// Suggest refactoring improvements
let suggestions = assistant.suggest_refactoring(
&code,
"TypeScript",
vec!["performance", "readability"]
).await?;
// Get detailed suggestions with explanations
for suggestion in suggestions {
println!("Suggestion: {}", suggestion.description);
println!("Impact: {:?}", suggestion.impact);
println!("Confidence: {:.2}", suggestion.confidence);
}
The ggen-ai MCP server provides the following tools for AI assistant integration:
ai_generate_templateGenerate ggen templates from natural language descriptions.
Parameters:
description (string, required): Natural language descriptionexamples (array, optional): Example requirements or contextlanguage (string, optional): Target programming languageframework (string, optional): Target frameworkai_generate_sparqlGenerate SPARQL queries from natural language intent and RDF graphs.
Parameters:
intent (string, required): Natural language query descriptiongraph (string, required): RDF graph data in Turtle formatai_generate_ontologyGenerate RDF/OWL ontologies from domain descriptions.
Parameters:
domain (string, required): Domain descriptionrequirements (array, optional): Specific requirements or classesai_refactor_codeSuggest code refactoring improvements using AI analysis.
Parameters:
code (string, required): Code to analyze and refactorlanguage (string, optional): Programming language for contextai_explain_graphExplain RDF graph content in natural language.
Parameters:
graph (string, required): RDF graph data in Turtle formatfocus (string, optional): Specific aspect to explainai_suggest_deltaSuggest intelligent merge strategies for delta-driven projection.
Parameters:
baseline (string, required): Baseline versioncurrent (string, required): Current generated versionmanual (string, optional): Manual modifications madeConfigure LLM providers using environment variables:
# OpenAI Configuration
export OPENAI_API_KEY="sk-your-openai-key"
export OPENAI_BASE_URL="https://api.openai.com/v1" # Optional custom endpoint
export OPENAI_DEFAULT_MODEL="gpt-4o"
# Anthropic Configuration
export ANTHROPIC_API_KEY="sk-ant-your-anthropic-key"
export ANTHROPIC_BASE_URL="https://api.anthropic.com/v1"
export ANTHROPIC_DEFAULT_MODEL="claude-3-5-sonnet-20241022"
# Ollama Configuration (local models)
export OLLAMA_BASE_URL="http://localhost:11434"
export OLLAMA_DEFAULT_MODEL="ministral-3:3b"
# Global Configuration
export AI_DEFAULT_PROVIDER="openai"
export RUST_LOG="ggen_ai=info"
use ggen_ai::{LlmConfig, GenAiClient};
// Configure for OpenAI
let config = LlmConfig {
model: "gpt-4o".to_string(),
max_tokens: Some(4096),
temperature: Some(0.7),
top_p: Some(0.9),
stop: None,
extra: std::collections::HashMap::new(),
};
// Create client
let client = GenAiClient::new(config)?;
let generator = TemplateGenerator::new(Box::new(client));
use ggen_ai::{LlmClient, TemplateGenerator, LlmConfig, GenAiClient};
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// Load environment configuration
dotenvy::dotenv().ok();
// Configure and initialize LLM client
let config = LlmConfig {
model: "gpt-4o".to_string(),
max_tokens: Some(4096),
temperature: Some(0.7),
top_p: Some(0.9),
stop: None,
extra: std::collections::HashMap::new(),
};
let client = GenAiClient::new(config)?;
let generator = TemplateGenerator::new(Box::new(client));
// Generate REST API controller
let template = generator.generate_rest_controller(
"User management API with authentication",
"TypeScript",
"Express"
).await?;
// Save template
std::fs::write("user-api.tmpl", template.content)?;
println!("Template generated successfully!");
Ok(())
}
use ggen_ai::generators::SparqlGenerator;
use ggen_core::Graph;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// Load graph
let graph = Graph::new()?;
graph.insert_turtle(r#"
@prefix ex: <http://example.org/> .
ex:user1 a ex:Person ;
ex:hasName "John Doe" ;
ex:hasEmail "john@example.com" .
ex:user2 a ex:Person ;
ex:hasName "Jane Smith" ;
ex:hasEmail "jane@example.com" .
"#)?;
// Generate query
let generator = SparqlGenerator::new(Box::new(client));
let query = generator.generate_query(
&graph,
"Find all people with email addresses"
).await?;
println!("Generated query: {}", query);
// Execute query
let results = graph.query(&query)?;
println!("Results: {:?}", results);
Ok(())
}
# Run unit tests
cargo test
# Run integration tests
cargo test --test integration
# Run with debug logging
RUST_LOG=ggen_ai=debug cargo test
# Test specific provider
OPENAI_API_KEY="test-key" cargo test test_openai_client
cargo make commands, no direct cargo usage.unwrap() or .expect() in library codeMajor Update: ggen-ai v1.0.0 migrates from custom LLM clients to rust-genai for production-ready multi-provider support.
MIT