| Crates.io | mockforge-observability |
| lib.rs | mockforge-observability |
| version | 0.3.31 |
| created_at | 2025-10-16 21:05:40.809086+00 |
| updated_at | 2026-01-04 23:18:40.818497+00 |
| description | Observability features for MockForge including Prometheus metrics, OpenTelemetry tracing, and recording |
| homepage | https://mockforge.dev |
| repository | https://github.com/SaaSy-Solutions/mockforge |
| max_upload_size | |
| id | 1886762 |
| size | 152,045 |
Comprehensive observability features for MockForge including Prometheus metrics, OpenTelemetry tracing, structured logging, and system monitoring.
This crate provides enterprise-grade observability capabilities to monitor MockForge performance, track system health, and debug issues in production environments. Perfect for understanding how your mock servers behave under load and ensuring reliable testing infrastructure.
use mockforge_observability::prometheus::MetricsRegistry;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// Initialize global metrics registry
let registry = MetricsRegistry::new();
// Record HTTP request metrics
registry.record_http_request("GET", "/api/users", 200, 0.045);
// Record gRPC call metrics
registry.record_grpc_request("GetUser", 0, 0.032);
// Export metrics in Prometheus format
let metrics = registry.export_prometheus().await?;
println!("{}", metrics);
Ok(())
}
use mockforge_observability::{init_logging, LoggingConfig};
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// Initialize structured logging
let logging_config = LoggingConfig {
level: "info".to_string(),
json_format: true,
file_path: Some("./logs/mockforge.log".to_string()),
max_file_size_mb: 10,
max_files: 5,
};
init_logging(logging_config)?;
// Logs will now be structured JSON
tracing::info!("MockForge server started");
tracing::error!("Failed to connect to database");
Ok(())
}
use mockforge_observability::{init_with_otel, OtelTracingConfig};
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// Initialize OpenTelemetry tracing
let tracing_config = OtelTracingConfig {
service_name: "mockforge-server".to_string(),
jaeger_endpoint: Some("http://localhost:14268/api/traces".to_string()),
sampling_rate: 1.0,
};
init_with_otel(tracing_config).await?;
// Create spans for request tracing
let span = tracing::info_span!("http_request", method = "GET", path = "/api/users");
let _enter = span.enter();
// Your request handling code here...
Ok(())
}
Comprehensive metrics collection with automatic Prometheus export:
use mockforge_observability::prometheus::MetricsRegistry;
let registry = MetricsRegistry::new();
// HTTP metrics
registry.record_http_request("GET", "/api/users", 200, 0.045);
registry.record_http_response_size(2048); // bytes
// gRPC metrics
registry.record_grpc_request("GetUser", 0, 0.032); // method, status, duration
// WebSocket metrics
registry.record_websocket_connection();
registry.record_websocket_message(512); // message size
// GraphQL metrics
registry.record_graphql_request("GetUser", true, 0.028); // operation, success, duration
// Connection metrics
registry.record_active_connection();
registry.record_connection_closed();
mockforge_http_requests_total{method, path, status} - Total HTTP requestsmockforge_http_request_duration_seconds{method, path} - Request duration histogrammockforge_http_response_size_bytes - Response size distributionmockforge_http_active_connections - Current active connectionsmockforge_grpc_requests_total{method, status} - Total gRPC requestsmockforge_grpc_request_duration_seconds{method} - gRPC request durationmockforge_grpc_active_streams - Active gRPC streamsmockforge_websocket_connections_total - Total WebSocket connectionsmockforge_websocket_active_connections - Current active WebSocket connectionsmockforge_websocket_messages_total{direction} - WebSocket messages sent/receivedmockforge_websocket_message_size_bytes - WebSocket message size distributionmockforge_graphql_requests_total{operation, success} - Total GraphQL requestsmockforge_graphql_request_duration_seconds{operation} - GraphQL request durationmockforge_graphql_errors_total{type} - GraphQL error countmockforge_system_cpu_usage_percent - CPU usage percentagemockforge_system_memory_usage_bytes - Memory usage in bytesmockforge_system_threads_total - Total thread countJSON-formatted logging with configurable outputs:
use mockforge_observability::LoggingConfig;
let config = LoggingConfig {
level: "debug".to_string(), // error, warn, info, debug, trace
json_format: true, // JSON or human-readable
file_path: Some("./logs/app.log".to_string()),
max_file_size_mb: 10,
max_files: 5, // Log rotation
};
// Initialize logging
init_logging(config)?;
// Structured logs with context
tracing::info!(
method = "GET",
path = "/api/users",
status = 200,
duration_ms = 45,
"HTTP request completed"
);
Distributed tracing with multiple backends:
use mockforge_observability::OtelTracingConfig;
// Jaeger tracing
let jaeger_config = OtelTracingConfig {
service_name: "mockforge-api".to_string(),
jaeger_endpoint: Some("http://localhost:14268/api/traces".to_string()),
sampling_rate: 0.1, // 10% sampling
};
// OTLP tracing (generic OpenTelemetry protocol)
let otlp_config = OtelTracingConfig {
service_name: "mockforge-api".to_string(),
otlp_endpoint: Some("http://otel-collector:4317".to_string()),
sampling_rate: 1.0,
};
init_with_otel(jaeger_config).await?;
Automatic system resource monitoring:
use mockforge_observability::system_metrics::{start_system_metrics_collector, SystemMetricsConfig};
let config = SystemMetricsConfig {
collection_interval_seconds: 30, // Collect every 30 seconds
enabled: true,
};
start_system_metrics_collector(config).await?;
use mockforge_observability::LoggingConfig;
let logging_config = LoggingConfig {
level: "info".to_string(),
json_format: true,
file_path: Some("/var/log/mockforge.log".to_string()),
max_file_size_mb: 100,
max_files: 10,
};
use mockforge_observability::OtelTracingConfig;
let tracing_config = OtelTracingConfig {
service_name: "mockforge-server".to_string(),
environment: "production".to_string(),
jaeger_endpoint: Some("http://jaeger:14268/api/traces".to_string()),
otlp_endpoint: None,
sampling_rate: 0.5, // 50% sampling
};
Metrics are automatically configured with sensible defaults. Customize via environment variables:
# Metrics collection
export MOCKFORGE_METRICS_ENABLED=true
export MOCKFORGE_METRICS_PATH=/metrics
# System metrics
export MOCKFORGE_SYSTEM_METRICS_ENABLED=true
export MOCKFORGE_SYSTEM_METRICS_INTERVAL=30
use axum::{routing::get, Router, extract::State};
use mockforge_observability::{
prometheus::MetricsRegistry,
init_logging,
init_with_otel,
LoggingConfig,
OtelTracingConfig,
};
use std::sync::Arc;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// Initialize observability
init_logging(LoggingConfig {
level: "info".to_string(),
json_format: true,
..Default::default()
})?;
init_with_otel(OtelTracingConfig {
service_name: "mockforge-http".to_string(),
jaeger_endpoint: Some("http://localhost:14268/api/traces".to_string()),
sampling_rate: 1.0,
}).await?;
// Create metrics registry
let metrics = Arc::new(MetricsRegistry::new());
// Build application with metrics middleware
let app = Router::new()
.route("/api/users", get(get_users))
.route("/metrics", get(metrics_endpoint))
.with_state(metrics);
// Start server
let addr = "0.0.0.0:3000".parse()?;
println!("🚀 Server with full observability running at http://{}", addr);
axum::serve(tokio::net::TcpListener::bind(addr).await?, app).await?;
Ok(())
}
async fn get_users(State(metrics): State<Arc<MetricsRegistry>>) -> &'static str {
let start = std::time::Instant::now();
// Your business logic here...
let response = "{\"users\": [{\"id\": 1, \"name\": \"Alice\"}]}";
// Record metrics
let duration = start.elapsed().as_secs_f64();
metrics.record_http_request("GET", "/api/users", 200, duration);
response
}
async fn metrics_endpoint(State(metrics): State<Arc<MetricsRegistry>>) -> String {
metrics.export_prometheus().await.unwrap_or_default()
}
use mockforge_observability::{init_with_otel, OtelTracingConfig};
use tonic::{transport::Server, Request, Response, Status};
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// Initialize tracing
init_with_otel(OtelTracingConfig {
service_name: "mockforge-grpc".to_string(),
jaeger_endpoint: Some("http://localhost:14268/api/traces".to_string()),
sampling_rate: 1.0,
}).await?;
// Create gRPC server with tracing
let addr = "0.0.0.0:50051".parse()?;
let user_service = UserService::default();
println!("🚀 gRPC server with tracing running at http://{}", addr);
Server::builder()
.add_service(UserServiceServer::new(user_service))
.serve(addr)
.await?;
Ok(())
}
Metrics not appearing:
Logs not structured:
Tracing not working:
High memory usage:
#[cfg(test)]
mod tests {
use super::*;
use mockforge_observability::prometheus::MetricsRegistry;
#[tokio::test]
async fn test_metrics_collection() {
let registry = MetricsRegistry::new();
// Record some metrics
registry.record_http_request("GET", "/test", 200, 0.1);
registry.record_http_request("POST", "/test", 201, 0.05);
// Export and verify
let metrics = registry.export_prometheus().await.unwrap();
assert!(metrics.contains("mockforge_http_requests_total"));
assert!(metrics.contains("mockforge_http_request_duration_seconds"));
}
}
use prometheus::{register_counter, register_histogram, Counter, Histogram};
// Register custom metrics
lazy_static::lazy_static! {
static ref CUSTOM_COUNTER: Counter = register_counter!(
"mockforge_custom_operations_total",
"Total number of custom operations"
).unwrap();
static ref CUSTOM_HISTOGRAM: Histogram = register_histogram!(
"mockforge_custom_operation_duration_seconds",
"Duration of custom operations"
).unwrap();
}
// Use custom metrics
CUSTOM_COUNTER.inc();
let _timer = CUSTOM_HISTOGRAM.start_timer(); // Measures until dropped
See the examples directory for complete working examples including:
mockforge-core: Core mocking functionalityprometheus: Metrics collection librarytracing: Logging and tracing frameworkopentelemetry: Observability standardsLicensed under MIT OR Apache-2.0