| Crates.io | llm-orchestrator-audit |
| lib.rs | llm-orchestrator-audit |
| version | 0.1.1 |
| created_at | 2025-11-14 22:54:27.60344+00 |
| updated_at | 2025-11-14 23:46:00.690061+00 |
| description | Tamper-proof audit logging system for LLM workflows with hash chain integrity |
| homepage | https://llm-devops.io/orchestrator |
| repository | https://github.com/llm-devops/llm-orchestrator |
| max_upload_size | |
| id | 1933681 |
| size | 157,376 |
Comprehensive audit logging for security events, workflow executions, and configuration changes in the LLM Orchestrator system.
Multiple Storage Backends
Comprehensive Event Types
Security Features
Retention Management
Query Interface
Add to your Cargo.toml:
[dependencies]
llm-orchestrator-audit = { path = "../llm-orchestrator-audit" }
# For database support
llm-orchestrator-audit = { path = "../llm-orchestrator-audit", features = ["database"] }
use llm_orchestrator_audit::{
AuditLogger, FileAuditStorage, RotationPolicy, AuditResult,
};
use std::sync::Arc;
use std::path::PathBuf;
use std::time::Duration;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// Create file-based storage
let storage = Arc::new(FileAuditStorage::new(
PathBuf::from("/var/log/orchestrator/audit.log"),
RotationPolicy::Daily,
)?);
// Create audit logger
let logger = AuditLogger::new(storage);
// Log a workflow execution
logger.log_workflow_execution(
"workflow-123",
"user-456",
AuditResult::Success,
Duration::from_millis(500),
).await?;
Ok(())
}
use llm_orchestrator_audit::{AuditLogger, DatabaseAuditStorage};
use std::sync::Arc;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// Create database storage
let storage = Arc::new(
DatabaseAuditStorage::new("postgresql://localhost/audit").await?
);
// Run migrations
storage.migrate().await?;
// Create audit logger
let logger = AuditLogger::new(storage);
// Log an authentication attempt
logger.log_auth_attempt(
"user@example.com",
true,
Some("192.168.1.1".to_string()),
).await?;
Ok(())
}
// Successful authentication
logger.log_auth_attempt(
"user@example.com",
true,
Some("192.168.1.1".to_string()),
).await?;
// Failed authentication
logger.log_auth_attempt(
"user@example.com",
false,
Some("192.168.1.1".to_string()),
).await?;
logger.log_authorization(
"user-123",
"workflow:execute",
"workflow-456",
true, // allowed
).await?;
// Workflow execution
logger.log_workflow_execution(
"workflow-123",
"user-456",
AuditResult::Success,
Duration::from_millis(500),
).await?;
// Workflow creation
logger.log_workflow_create(
"workflow-123",
"My Workflow",
"user-456",
).await?;
// Workflow update
logger.log_workflow_update(
"workflow-123",
"user-456",
serde_json::json!({"field": "new_value"}),
).await?;
// Workflow deletion
logger.log_workflow_delete(
"workflow-123",
"user-456",
).await?;
logger.log_secret_access(
"api_key",
"user-123",
chrono::Utc::now(),
).await?;
logger.log_config_change(
"max_concurrent_workflows",
Some("10"),
"20",
"admin-user",
).await?;
// API key creation
logger.log_api_key_create(
"key-123",
"user-456",
vec!["workflow:read".to_string(), "workflow:execute".to_string()],
).await?;
// API key revocation
logger.log_api_key_revoke(
"key-123",
"admin-user",
"Security policy update",
).await?;
use llm_orchestrator_audit::AuditFilter;
// Query all events for a user
let filter = AuditFilter::new()
.with_user_id("user-123".to_string())
.with_limit(100);
let events = storage.query(filter).await?;
// Query workflow execution events
let filter = AuditFilter::new()
.with_event_type(AuditEventType::WorkflowExecution)
.with_time_range(
Utc::now() - Duration::days(7),
Utc::now(),
);
let events = storage.query(filter).await?;
// Get a specific event by ID
let event = storage.get(event_id).await?;
// Count events
let count = storage.count(filter).await?;
use llm_orchestrator_audit::AuditRetentionManager;
let manager = AuditRetentionManager::new(storage, 90); // 90 days retention
// Run cleanup manually
let deleted = manager.cleanup().await?;
println!("Deleted {} old audit events", deleted);
use std::time::Duration;
let manager = Arc::new(AuditRetentionManager::new(storage, 90));
// Run cleanup daily
let _handle = manager.start_background_cleanup(Duration::from_secs(86400));
// The cleanup task runs in the background until the handle is dropped
let storage = FileAuditStorage::new(
PathBuf::from("/var/log/audit.log"),
RotationPolicy::Daily,
)?;
// Rotate when file reaches 100 MB
let storage = FileAuditStorage::new(
PathBuf::from("/var/log/audit.log"),
RotationPolicy::SizeBased(100 * 1024 * 1024),
)?;
let storage = FileAuditStorage::new(
PathBuf::from("/var/log/audit.log"),
RotationPolicy::Never,
)?;
The audit events table is created with the following structure:
CREATE TABLE audit_events (
id UUID PRIMARY KEY,
timestamp TIMESTAMP WITH TIME ZONE NOT NULL,
event_type VARCHAR(100) NOT NULL,
user_id VARCHAR(255),
action VARCHAR(255) NOT NULL,
resource_type VARCHAR(50) NOT NULL,
resource_id VARCHAR(255) NOT NULL,
result VARCHAR(50) NOT NULL,
result_error TEXT,
details JSONB,
ip_address INET,
user_agent TEXT,
request_id VARCHAR(255),
previous_hash VARCHAR(64),
event_hash VARCHAR(64)
);
idx_audit_timestamp: Time-based queries (most common)idx_audit_user_id: User-based queriesidx_audit_event_type: Event type filteringidx_audit_resource: Resource lookupsidx_audit_result: Result-based queriesidx_audit_request_id: Request correlationidx_audit_details: JSONB queries (GIN index)Each audit event includes a hash of the previous event, creating a tamper-evident chain:
event.previous_hash = previous_event.event_hash;
event.event_hash = compute_hash(event);
Any modification to a past event will break the chain, making tampering detectable.
The storage backends are designed to be append-only. The query interface does not provide update or delete methods for individual events, only bulk deletion for retention management.
This audit logging implementation supports compliance with:
Run the test suite:
cargo test -p llm-orchestrator-audit
Run tests with database support:
cargo test -p llm-orchestrator-audit --features database
See the examples/ directory for complete working examples:
basic_file_storage.rs: Simple file-based loggingdatabase_storage.rs: PostgreSQL-backed loggingretention_management.rs: Automatic cleanupquery_examples.rs: Filtering and querying eventsLicensed under either of:
at your option.