| Crates.io | scirs2-neural |
| lib.rs | scirs2-neural |
| version | 0.1.0-beta.2 |
| created_at | 2025-04-12 19:49:17.193071+00 |
| updated_at | 2025-09-20 08:59:28.435818+00 |
| description | Neural network building blocks module for SciRS2 (scirs2-neural) - Minimal Version |
| homepage | |
| repository | https://github.com/cool-japan/scirs |
| max_upload_size | |
| id | 1631197 |
| size | 13,490,772 |
🚀 Production-Ready Neural Network Module for the SciRS2 scientific computing library. This module provides comprehensive, battle-tested tools for building, training, and evaluating neural networks with state-of-the-art performance optimizations.
Version 0.1.0-beta.2 marks the first beta release and is production-ready with:
Add the following to your Cargo.toml:
[dependencies]
scirs2-neural = "0.1.0-beta.2"
To enable optimizations and optional features:
[dependencies]
scirs2-neural = { version = "0.1.0-beta.2", features = ["simd", "parallel"] }
# For performance optimization
scirs2-neural = { version = "0.1.0-beta.2", features = ["jit", "cuda"] }
# For integration with scirs2-metrics
scirs2-neural = { version = "0.1.0-beta.2", features = ["metrics_integration"] }
Here's a simple example to get you started:
use scirs2_neural::prelude::*;
use scirs2_neural::layers::{Sequential, Dense};
use scirs2_neural::losses::MeanSquaredError;
use ndarray::Array2;
use rand::rngs::SmallRng;
use rand::SeedableRng;
fn main() -> Result<(), Box<dyn std::error::Error>> {
let mut rng = SmallRng::seed_from_u64(42);
// Create a simple neural network
let mut model = Sequential::new();
model.add(Dense::new(2, 64, Some("relu"), &mut rng)?);
model.add(Dense::new(64, 32, Some("relu"), &mut rng)?);
model.add(Dense::new(32, 1, Some("sigmoid"), &mut rng)?);
// Create sample data (XOR problem)
let x = Array2::from_shape_vec((4, 2), vec![0.0, 0.0, 0.0, 1.0, 1.0, 0.0, 1.0, 1.0])?;
let y = Array2::from_shape_vec((4, 1), vec![0.0, 1.0, 1.0, 0.0])?;
// Forward pass
let predictions = model.forward(&x.into_dyn())?;
println!("Predictions: {:?}", predictions);
Ok(())
}
The library includes complete working examples for various use cases:
Detailed usage examples:
use scirs2_neural::prelude::*;
use scirs2_neural::layers::{Sequential, Dense, Conv2D, MaxPool2D, Dropout, BatchNorm};
use scirs2_neural::losses::{CrossEntropyLoss, MeanSquaredError};
use scirs2_neural::training::{TrainingConfig, ValidationSettings};
use rand::rngs::SmallRng;
use rand::SeedableRng;
use ndarray::Array4;
// Create a CNN for image classification
fn create_cnn() -> Result<Sequential<f32>, Box<dyn std::error::Error>> {
// Create a sequential model
let mut model = models::sequential::Sequential::new();
// Add layers
model.add_layer(layers::dense::Dense::new(2, 32, None, None)?);
model.add_layer(activations::relu::ReLU::new());
model.add_layer(layers::dense::Dense::new(32, 16, None, None)?);
model.add_layer(activations::relu::ReLU::new());
model.add_layer(layers::dense::Dense::new(16, 1, None, None)?);
model.add_layer(activations::sigmoid::Sigmoid::new());
// Set loss function and optimizer
let loss = losses::mse::MeanSquaredError::new();
let optimizer = optimizers::adam::Adam::new(0.001, 0.9, 0.999, 1e-8);
// Compile the model
model.compile(loss, optimizer);
// Sample training data (XOR problem)
let x_train = array![[0.0, 0.0], [0.0, 1.0], [1.0, 0.0], [1.0, 1.0]];
let y_train = array![[0.0], [1.0], [1.0], [0.0]];
// Train the model
model.fit(&x_train, &y_train, 1000, 4, None, None)?;
// Make predictions
let predictions = model.predict(&x_train)?;
println!("Predictions: {:?}", predictions);
Ok(())
}
// Using autograd for manual gradient computation
fn autograd_example() -> CoreResult<()> {
use scirs2_neural::autograd::{Variable, Graph};
// Create computation graph
let mut graph = Graph::new();
// Create input variables
let x = graph.variable(2.0);
let y = graph.variable(3.0);
// Build computation
let z = graph.add(&x, &y); // z = x + y
let w = graph.multiply(&z, &x); // w = z * x = (x + y) * x
// Forward pass
graph.forward()?;
println!("Result: {}", w.value()?); // Should be (2 + 3) * 2 = 10
// Backward pass to compute gradients
graph.backward(&w)?;
// Get gradients
println!("dw/dx: {}", x.gradient()?); // Should be d((x+y)*x)/dx = (x+y) + x*1 = 2+3 + 2*1 = 7
println!("dw/dy: {}", y.gradient()?); // Should be d((x+y)*x)/dy = x*1 = 2
Ok(())
}
Neural network layer implementations:
use scirs2_neural::layers::{
Layer, // Layer trait
dense::Dense, // Fully connected layer
dropout::Dropout, // Dropout layer
conv::Conv2D, // 2D convolutional layer
conv::Conv2DTranspose, // 2D transposed convolutional layer
pooling::MaxPool2D, // 2D max pooling layer
pooling::AvgPool2D, // 2D average pooling layer
pooling::GlobalPooling, // Global pooling layer
norm::BatchNorm, // Batch normalization layer
norm::LayerNorm, // Layer normalization layer
recurrent::LSTM, // Long Short-Term Memory layer
recurrent::GRU, // Gated Recurrent Unit layer
recurrent::RNN, // Simple RNN layer
attention::MultiHeadAttention, // Multi-head attention mechanism
attention::SelfAttention, // Self-attention mechanism
transformer::TransformerEncoder, // Transformer encoder block
transformer::TransformerDecoder, // Transformer decoder block
transformer::Transformer, // Full transformer architecture
};
Activation functions:
use scirs2_neural::activations::{
Activation, // Activation trait
relu::ReLU, // Rectified Linear Unit
sigmoid::Sigmoid, // Sigmoid activation
tanh::Tanh, // Hyperbolic tangent
softmax::Softmax, // Softmax activation
gelu::GELU, // Gaussian Error Linear Unit
swish::Swish, // Swish/SiLU activation
mish::Mish, // Mish activation
};
Loss function implementations:
use scirs2_neural::losses::{
Loss, // Loss trait
mse::MeanSquaredError, // Mean Squared Error
crossentropy::CrossEntropy, // Cross Entropy Loss
};
Neural network model implementations:
use scirs2_neural::models::{
sequential::Sequential, // Sequential model
trainer::Trainer, // Training utilities
};
Optimization algorithms:
use scirs2_neural::optimizers::{
Optimizer, // Optimizer trait
sgd::SGD, // Stochastic Gradient Descent
adagrad::AdaGrad, // Adaptive Gradient Algorithm
rmsprop::RMSprop, // Root Mean Square Propagation
adam::Adam, // Adaptive Moment Estimation
adamw::AdamW, // Adam with decoupled weight decay
radam::RAdam, // Rectified Adam
};
Automatic differentiation functionality:
use scirs2_neural::autograd::{
Variable, // Variable holding value and gradient
Graph, // Computation graph
Tape, // Gradient tape
Function, // Function trait
ops, // Basic operations
};
Helper utilities:
use scirs2_neural::utils::{
initializers, // Weight initialization functions
metrics, // Evaluation metrics
datasets, // Dataset utilities
};
// Model serialization
use scirs2_neural::serialization::{
SaveLoad, // Save/load trait for models
ModelConfig, // Configuration for model serialization
load_model, // Load model from file
};
This module integrates with other SciRS2 modules:
Example of using linear algebra functions:
use scirs2_neural::linalg::batch_operations;
use ndarray::Array3;
// Batch matrix multiplication
let a = Array3::<f64>::zeros((32, 10, 20));
let b = Array3::<f64>::zeros((32, 20, 15));
let result = batch_operations::batch_matmul(&a, &b);
With the metrics_integration feature, you can use scirs2-metrics for advanced evaluation:
use scirs2_metrics::integration::neural::{NeuralMetricAdapter, MetricsCallback};
use scirs2_neural::callbacks::ScirsMetricsCallback;
use scirs2_neural::evaluation::MetricType;
// Create metric adapters
let metrics = vec![
NeuralMetricAdapter::<f32>::accuracy(),
NeuralMetricAdapter::<f32>::precision(),
NeuralMetricAdapter::<f32>::f1_score(),
NeuralMetricAdapter::<f32>::mse(),
NeuralMetricAdapter::<f32>::r2(),
];
// Create callback for tracking metrics during training
let metrics_callback = ScirsMetricsCallback::new(metrics);
// Train model with metrics tracking
model.fit(&x_train, &y_train,
epochs,
batch_size,
Some(&[&metrics_callback]),
None
)?;
// Get evaluation metrics
let eval_results = model.evaluate(
&x_test,
&y_test,
Some(batch_size),
Some(vec![
MetricType::Accuracy,
MetricType::Precision,
MetricType::F1Score,
])
)?;
// Visualize results
let roc_viz = neural_roc_curve_visualization(&y_true, &y_pred, Some(auc))?;
This module is ready for production deployment in:
See the CONTRIBUTING.md file for contribution guidelines.
This project is dual-licensed under:
You can choose to use either license. See the LICENSE file for details.