| Crates.io | revoke-resilience |
| lib.rs | revoke-resilience |
| version | 0.3.0 |
| created_at | 2025-07-13 06:07:17.938927+00 |
| updated_at | 2025-07-13 06:07:17.938927+00 |
| description | Circuit breakers, rate limiting, and retry mechanisms for Revoke framework |
| homepage | |
| repository | https://github.com/revoke/revoke |
| max_upload_size | |
| id | 1750025 |
| size | 104,048 |
Resilience patterns module for the Revoke microservices framework, providing circuit breakers, rate limiters, retry mechanisms, and other fault tolerance patterns.
Add to your Cargo.toml:
[dependencies]
revoke-resilience = { version = "0.1.0" }
use revoke_resilience::{CircuitBreaker, RateLimiter, Retry};
use std::time::Duration;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// Circuit breaker
let breaker = CircuitBreaker::builder()
.failure_threshold(5)
.recovery_timeout(Duration::from_secs(60))
.build();
// Rate limiter
let limiter = RateLimiter::builder()
.max_requests(100)
.window(Duration::from_secs(1))
.build();
// Retry policy
let retry = Retry::builder()
.max_attempts(3)
.initial_delay(Duration::from_millis(100))
.build();
// Use together
let result = retry
.call(|| async {
limiter.acquire().await?;
breaker.call(async {
// Your operation here
make_http_request().await
}).await
})
.await?;
Ok(())
}
Prevents cascading failures by stopping requests to failing services:
use revoke_resilience::{CircuitBreaker, CircuitState};
let breaker = CircuitBreaker::builder()
.failure_threshold(5) // Open after 5 failures
.success_threshold(2) // Close after 2 successes
.recovery_timeout(Duration::from_secs(30))
.build();
// Execute with circuit breaker
match breaker.call(risky_operation()).await {
Ok(result) => println!("Success: {:?}", result),
Err(e) => {
if matches!(breaker.state(), CircuitState::Open) {
println!("Circuit is open, using fallback");
return use_fallback();
}
println!("Operation failed: {}", e);
}
}
// Check circuit state
println!("Circuit state: {:?}", breaker.state());
println!("Failure count: {}", breaker.failure_count());
Control request rates to prevent overload:
use revoke_resilience::{RateLimiter, RateLimitStrategy};
// Token bucket rate limiter
let limiter = RateLimiter::token_bucket()
.capacity(100) // Bucket capacity
.refill_rate(10.0) // Tokens per second
.build();
// Sliding window rate limiter
let limiter = RateLimiter::sliding_window()
.max_requests(1000)
.window(Duration::from_secs(60))
.build();
// Use rate limiter
if limiter.try_acquire() {
// Process request
} else {
// Rate limit exceeded
return Err("Too many requests");
}
// Async acquire (waits if needed)
limiter.acquire().await?;
// Check availability
let available = limiter.available_permits();
Automatic retry with various backoff strategies:
use revoke_resilience::{Retry, RetryPolicy, BackoffStrategy};
// Exponential backoff
let retry = Retry::exponential()
.max_attempts(5)
.initial_delay(Duration::from_millis(100))
.max_delay(Duration::from_secs(10))
.multiplier(2.0)
.build();
// Fixed delay
let retry = Retry::fixed()
.max_attempts(3)
.delay(Duration::from_secs(1))
.build();
// With jitter
let retry = Retry::exponential()
.with_jitter(0.1) // 10% jitter
.build();
// Custom retry condition
let retry = Retry::builder()
.max_attempts(3)
.retry_on(|error: &MyError| {
matches!(error, MyError::Transient(_))
})
.build();
// Execute with retry
let result = retry.call(|| async {
perform_operation().await
}).await?;
Isolate resources to prevent total failure:
use revoke_resilience::{Bulkhead, BulkheadStrategy};
// Semaphore bulkhead (limits concurrent executions)
let bulkhead = Bulkhead::semaphore()
.max_concurrent(10)
.queue_size(50)
.build();
// Thread pool bulkhead (isolates execution)
let bulkhead = Bulkhead::thread_pool()
.pool_size(5)
.queue_size(100)
.build();
// Execute with bulkhead
let result = bulkhead.call(async {
process_request().await
}).await?;
// Check metrics
println!("Active executions: {}", bulkhead.active_count());
println!("Queue size: {}", bulkhead.queue_length());
Prevent operations from hanging:
use revoke_resilience::Timeout;
let timeout = Timeout::new(Duration::from_secs(5));
// Execute with timeout
match timeout.call(slow_operation()).await {
Ok(result) => println!("Completed: {:?}", result),
Err(e) if e.is_timeout() => println!("Operation timed out"),
Err(e) => println!("Operation failed: {}", e),
}
// With custom timeout per call
let result = timeout
.with_duration(Duration::from_secs(10))
.call(very_slow_operation())
.await?;
Provide alternative responses when primary fails:
use revoke_resilience::Fallback;
let fallback = Fallback::new(|| async {
// Fallback value
Ok("default value".to_string())
});
// Execute with fallback
let result = fallback
.call(async {
fetch_from_primary().await
})
.await?;
// Conditional fallback
let fallback = Fallback::builder()
.when(|error: &MyError| {
matches!(error, MyError::ServiceUnavailable)
})
.with(|| async {
fetch_from_cache().await
})
.build();
Chain multiple resilience patterns for comprehensive protection:
use revoke_resilience::{ResilienceChain, CircuitBreaker, RateLimiter, Retry, Timeout};
let chain = ResilienceChain::new()
.add(RateLimiter::token_bucket()
.capacity(100)
.refill_rate(10.0)
.build())
.add(CircuitBreaker::builder()
.failure_threshold(5)
.build())
.add(Retry::exponential()
.max_attempts(3)
.build())
.add(Timeout::new(Duration::from_secs(5)))
.add(Fallback::new(|| async {
Ok(cached_response())
}));
// Execute through all patterns
let result = chain.call(|| async {
make_request().await
}).await?;
use revoke_resilience::metrics::{ResilienceMetrics, MetricsCollector};
let metrics = ResilienceMetrics::new();
// Register components
metrics.register("api_breaker", &circuit_breaker);
metrics.register("api_limiter", &rate_limiter);
// Collect metrics
let snapshot = metrics.collect();
println!("Circuit breaker state: {:?}", snapshot.circuit_breakers["api_breaker"]);
println!("Rate limiter metrics: {:?}", snapshot.rate_limiters["api_limiter"]);
// Export to Prometheus
let prometheus = metrics.export_prometheus();
Create custom resilience policies:
use revoke_resilience::{Policy, PolicyResult};
struct CustomPolicy {
// Your configuration
}
#[async_trait]
impl Policy for CustomPolicy {
type Error = MyError;
async fn execute<F, Fut, T>(&self, f: F) -> PolicyResult<T, Self::Error>
where
F: FnOnce() -> Fut,
Fut: Future<Output = Result<T, Self::Error>>,
{
// Your custom logic
f().await
}
}
React to resilience events:
use revoke_resilience::events::{ResilienceEvent, EventHandler};
let event_handler = EventHandler::new();
// Subscribe to events
event_handler.on_circuit_opened(|circuit_name| {
println!("Circuit {} opened!", circuit_name);
send_alert(circuit_name);
});
event_handler.on_rate_limit_exceeded(|limiter_name| {
metrics.increment_counter("rate_limit_exceeded", &[("limiter", limiter_name)]);
});
// Attach to components
circuit_breaker.set_event_handler(event_handler.clone());
rate_limiter.set_event_handler(event_handler.clone());
# resilience.yaml
circuit_breakers:
api_breaker:
failure_threshold: 5
success_threshold: 2
recovery_timeout: 30s
rate_limiters:
api_limiter:
strategy: token_bucket
capacity: 100
refill_rate: 10.0
retry_policies:
default:
max_attempts: 3
initial_delay: 100ms
max_delay: 10s
multiplier: 2.0
use revoke_resilience::config::ResilienceConfig;
let config = ResilienceConfig::from_file("resilience.yaml")?;
let breaker = config.circuit_breaker("api_breaker")?;
let limiter = config.rate_limiter("api_limiter")?;
use revoke_resilience::config::EnvConfig;
// RESILIENCE_CIRCUIT_BREAKER_THRESHOLD=5
// RESILIENCE_RATE_LIMIT_CAPACITY=100
let config = EnvConfig::load()?;
See the examples directory:
circuit_breaker.rs - Circuit breaker patternsrate_limiting.rs - Various rate limiting strategiesretry_patterns.rs - Retry with different backoff strategiescombined_resilience.rs - Combining multiple patternsservice_protection.rs - Complete service protection setup