| Crates.io | lmrc-kubernetes |
| lib.rs | lmrc-kubernetes |
| version | 0.3.16 |
| created_at | 2025-11-26 18:31:26.639182+00 |
| updated_at | 2025-12-11 13:27:31.060739+00 |
| description | Kubernetes management library for the LMRC Stack - comprehensive library with support for deployments, rollbacks, scaling, and cluster operations |
| homepage | https://gitlab.com/lemarco/lmrc-stack/tree/main/libs/kubernetes-manager |
| repository | https://gitlab.com/lemarco/lmrc-stack |
| max_upload_size | |
| id | 1952002 |
| size | 346,315 |
Part of the LMRC Stack - Infrastructure-as-Code toolkit for building production-ready Rust applications
A comprehensive Rust library for managing Kubernetes resources with a focus on deployments, services, secrets, and cluster operations.
Add this to your Cargo.toml:
[dependencies]
lmrc-kubernetes = "0.1"
tokio = { version = "1", features = ["full"] }
kube 2.0+ for Kubernetes clientk8s-openapi 0.26+ with v1_31 API supporttokio for async runtimeuse lmrc_kubernetes::{Client, ClientConfig};
use lmrc_kubernetes::deployment::{DeploymentSpec, ContainerSpec};
use lmrc_kubernetes::config::DeploymentOptions;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// Create a client
let config = ClientConfig::infer().await?;
let client = Client::new(config, "default").await?;
// Create a deployment
let container = ContainerSpec::new("app", "nginx:1.21")
.with_port(80)
.with_env("ENV", "production");
let deployment = DeploymentSpec::new("web-app")
.with_replicas(3)
.with_container(container);
// Apply the deployment
let options = DeploymentOptions::new().wait(true);
client.deployments().apply(&deployment, &options).await?;
println!("Deployment successful!");
Ok(())
}
use lmrc_kubernetes::deployment::{ContainerSpec, DeploymentSpec, ResourceRequirements};
let resources = ResourceRequirements::new()
.cpu_request("100m")
.cpu_limit("500m")
.memory_request("128Mi")
.memory_limit("512Mi");
let container = ContainerSpec::new("app", "myapp:v1.0.0")
.with_port(8080)
.with_resources(resources);
let deployment = DeploymentSpec::new("my-app")
.with_replicas(5)
.with_container(container);
use lmrc_kubernetes::deployment::{ContainerSpec, Probe};
let liveness = Probe::http("/healthz", 8080)
.initial_delay_seconds(30)
.period_seconds(10);
let readiness = Probe::http("/ready", 8080)
.initial_delay_seconds(5)
.timeout_seconds(5);
let container = ContainerSpec::new("app", "myapp:v1.0.0")
.with_liveness_probe(liveness)
.with_readiness_probe(readiness);
use lmrc_kubernetes::configmap::ConfigMapSpec;
use lmrc_kubernetes::deployment::{ContainerSpec, Volume};
// Create ConfigMap
let configmap = ConfigMapSpec::new("app-config")
.with_data("config.yaml", "setting: value");
client.configmaps().apply(&configmap).await?;
// Mount in deployment
let volume = Volume::from_configmap("config-volume", "app-config");
let container = ContainerSpec::new("app", "myapp:v1.0.0")
.with_volume_mount("config-volume", "/etc/config");
// Create a Docker registry secret
client.secrets()
.apply_docker_registry_secret(
"my-registry-secret",
"gcr.io",
"oauth2accesstoken",
&access_token,
)
.await?;
// Use in deployment
let deployment = DeploymentSpec::new("my-app")
.with_image_pull_secret("my-registry-secret")
.with_container(container);
use lmrc_kubernetes::service::{ServiceSpec, ServiceType};
let service = ServiceSpec::new("my-service")
.with_selector("app", "my-app")
.with_port(80, 8080)
.with_type(ServiceType::LoadBalancer);
client.services().apply(&service).await?;
use lmrc_kubernetes::ingress::{IngressSpec, IngressRule, IngressTLS};
let rule = IngressRule::new("example.com")
.with_path("/api", "api-service", 8080)
.with_path("/web", "web-service", 80);
let tls = IngressTLS::new()
.with_host("example.com")
.with_secret_name("tls-secret");
let ingress = IngressSpec::new("my-ingress")
.with_rule(rule)
.with_tls(tls);
client.ingress().apply(&ingress).await?;
use lmrc_kubernetes::networkpolicy::{NetworkPolicySpec, NetworkPolicyPeer, PolicyType};
let policy = NetworkPolicySpec::new("api-network-policy")
.with_pod_selector("app", "api")
.with_ingress_rule(vec![
NetworkPolicyPeer::from_pod_selector("role", "frontend")
], vec![8080])
.with_policy_type(PolicyType::Ingress);
client.network_policies().apply(&policy).await?;
use lmrc_kubernetes::job::JobSpec;
let job = JobSpec::new("data-migration")
.with_container("migrate", "migration:v1.0.0")
.with_parallelism(5)
.with_completions(10)
.with_backoff_limit(3);
client.jobs().apply(&job).await?;
use lmrc_kubernetes::cronjob::CronJobSpec;
let cronjob = CronJobSpec::new("backup", "0 2 * * *")
.with_container("backup", "backup:latest")
.with_restart_policy("OnFailure");
client.cronjobs().apply(&cronjob).await?;
use lmrc_kubernetes::pvc::{PersistentVolumeClaimSpec, AccessMode, StorageClass};
let pvc = PersistentVolumeClaimSpec::new("data-volume", "10Gi")
.with_access_mode(AccessMode::ReadWriteOnce)
.with_storage_class(StorageClass::Standard);
client.pvcs().apply(&pvc).await?;
use lmrc_kubernetes::hpa::{HpaSpec, MetricType, MetricTarget};
let hpa = HpaSpec::new("my-app-hpa", "my-app")
.with_replicas(2, 10)
.with_metric(MetricType::Resource {
name: "cpu".to_string(),
target: MetricTarget::Utilization(80),
});
client.hpas().apply(&hpa).await?;
use lmrc_kubernetes::config::RollbackOptions;
let rollback_opts = RollbackOptions::new()
.revision(3) // Optional: specify revision
.timeout_secs(300);
client.deployments()
.rollback("my-app", &rollback_opts)
.await?;
// Scale to 10 replicas
client.deployments().scale("my-app", 10).await?;
// Create clients for different namespaces
let prod_client = client.with_namespace("production");
let staging_client = client.with_namespace("staging");
// Deploy to production
prod_client.deployments().apply(&deployment, &options).await?;
// Deploy to staging
staging_client.deployments().apply(&deployment, &options).await?;
The library provides comprehensive error types with detailed context:
use lmrc_kubernetes::Error;
match client.deployments().apply(&deployment, &options).await {
Ok(_) => println!("Deployment successful"),
Err(Error::ValidationError(msg)) => eprintln!("Invalid spec: {}", msg),
Err(Error::ImagePullError { image, container }) => {
eprintln!("Failed to pull image {} for container {}", image, container)
}
Err(Error::ContainerCrashed { container, exit_code, reason }) => {
eprintln!("Container {} crashed with code {}: {}", container, exit_code, reason)
}
Err(e) => eprintln!("Error: {}", e),
}
The library is organized into focused modules:
client: Main client for accessing Kubernetes APIdeployment: Deployment specifications and managementservice: Service specifications and managementsecret: Secret managementnamespace: Namespace operationsconfigmap: ConfigMap managementingress: Ingress managementgateway: Gateway API managementnetworkpolicy: NetworkPolicy managementjob: Job managementcronjob: CronJob managementpvc: PersistentVolumeClaim managementhpa: HorizontalPodAutoscaler managementconfig: Configuration types and optionserror: Comprehensive error typesRun the test suite:
# Run unit tests
cargo test
# Run with output
cargo test -- --nocapture
# Run specific test
cargo test test_name
# Check code quality
cargo clippy -- -W clippy::all
cargo fmt --check
Contributions are welcome! Please see CONTRIBUTING.md for guidelines.
Part of the LMRC Stack project. Licensed under either of:
at your option.
See CHANGELOG.md for version history and release notes.