| Crates.io | lumen-core |
| lib.rs | lumen-core |
| version | 0.3.0 |
| created_at | 2025-11-06 06:41:19.967764+00 |
| updated_at | 2026-01-15 14:12:08.020069+00 |
| description | A tiny ML framework |
| homepage | |
| repository | |
| max_upload_size | |
| id | 1919157 |
| size | 328,594 |
A lightweight, statically typed Tensor library for Rust, featuring a PyTorch-like API and built-in Automatic Differentiation.
Unlike many dynamic tensor libraries, lumen leverages Rust's type system with Static DTypes (Tensor<T>). This ensures strict type safety at compile time and allows for optimized storage layouts.
Initialize tensors using constructors like new, zeros, rand, or arange.
use lumen_core::Tensor;
fn main() {
// Create from array
let a = Tensor::new(&[1, 2, 3]).unwrap();
println!("Shape: {:?}", a.shape());
// Create zeros
let b = Tensor::<f32>::zeros((2, 3)).unwrap();
println!("{}", b);
// Random values between 0 and 1
let c = Tensor::<f32>::rand(0., 1., (2, 3)).unwrap();
}
The library supports powerful indexing capabilities similar to Python's NumPy, allowing for efficient views of data.
let arr = Tensor::arange(0, 125).unwrap().reshape((5, 5, 5)).unwrap();
// Simple index
let sub = arr.index(1).unwrap();
// Range slicing using s! macro
let sub = arr.index(s!(1:3)).unwrap();
assert_eq!(sub.shape().dims(), &[2, 5, 5]);
// Complex slicing with strides and dimensions
let sub = arr.index((s!(1:3), s!(3:4), 1)).unwrap();
assert_eq!(sub.shape().dims(), &[2, 1]);
// Using unbounded ranges (..)
let sub = arr.index((s!(1:3), .., 1..2)).unwrap();
Perform matrix multiplication and reshaping with ease.
let a = Tensor::arange(0., 12.).unwrap().reshape((2, 2, 3)).unwrap();
let b = Tensor::arange(0., 12.).unwrap().reshape((2, 3, 2)).unwrap();
// Matrix multiplication
let c = a.matmul(&b).unwrap();
A wide array of unary and floating-point operations are supported directly on Tensor
The library includes a Var
// Define inputs and weights as Variables (Var) to track gradients
let w = Var::<f64>::new(&[[2.0, 3.0]]).unwrap(); // Shape (1, 2)
let x = Var::<f64>::new(&[[4.0], [5.0]]).unwrap(); // Shape (2, 1)
let b = Var::<f64>::new(&[[10.0]]).unwrap(); // Shape (1, 1)
// Forward pass: y = w x + b
let y = w.matmul(&x).unwrap().add(&b).unwrap();
// Backward pass: Compute gradients
let grads = y.backward().unwrap();
// Verify Gradients
// dy/dw = x^T
assert!(grads[&w].allclose(&Tensor::new(&[[4.0, 5.0]]).unwrap(), 1e-5, 1e-8));
// dy/dx = w^T
assert!(grads[&x].allclose(&Tensor::new(&[[2.0], [3.0]]).unwrap(), 1e-5, 1e-8));
// dy/db = 1
assert!(grads[&b].allclose(&Tensor::new(&[[1.0]]).unwrap(), 1e-5, 1e-8));
MIT