| Crates.io | ruvector-gnn |
| lib.rs | ruvector-gnn |
| version | 0.1.31 |
| created_at | 2025-11-26 17:26:24.62054+00 |
| updated_at | 2026-01-08 03:55:13.160628+00 |
| description | Graph Neural Network layer for Ruvector on HNSW topology |
| homepage | |
| repository | https://github.com/ruvnet/ruvector |
| max_upload_size | |
| id | 1951883 |
| size | 280,787 |
Graph Neural Network layer for Ruvector on HNSW topology with SIMD-accelerated message passing.
ruvector-gnn provides production-ready Graph Neural Network implementations optimized for vector database topologies. It enables learned representations over HNSW index structures for enhanced similarity search and graph-based learning. Part of the Ruvector ecosystem.
Add ruvector-gnn to your Cargo.toml:
[dependencies]
ruvector-gnn = "0.1.1"
[dependencies]
# Default with SIMD and memory mapping
ruvector-gnn = { version = "0.1.1", features = ["simd", "mmap"] }
# WASM-compatible build
ruvector-gnn = { version = "0.1.1", default-features = false, features = ["wasm"] }
# Node.js bindings
ruvector-gnn = { version = "0.1.1", features = ["napi"] }
Available features:
simd (default): SIMD-optimized operationsmmap (default): Memory-mapped weight storagewasm: WebAssembly-compatible buildnapi: Node.js bindings via NAPI-RSuse ruvector_gnn::{GCNLayer, GNNConfig, MessagePassing};
use ndarray::Array2;
fn main() -> Result<(), Box<dyn std::error::Error>> {
// Configure GCN layer
let config = GNNConfig {
input_dim: 128,
output_dim: 64,
hidden_dim: 128,
num_heads: 4, // For GAT
dropout: 0.1,
activation: Activation::ReLU,
};
// Create GCN layer
let gcn = GCNLayer::new(config)?;
// Node features (num_nodes x input_dim)
let features = Array2::zeros((1000, 128));
// Adjacency list (HNSW neighbors)
let adjacency: Vec<Vec<usize>> = /* from HNSW index */;
// Forward pass
let output = gcn.forward(&features, &adjacency)?;
println!("Output shape: {:?}", output.shape());
Ok(())
}
use ruvector_gnn::{GATLayer, AttentionConfig};
// Configure multi-head attention
let config = AttentionConfig {
input_dim: 128,
output_dim: 64,
num_heads: 8,
concat_heads: true,
dropout: 0.1,
leaky_relu_slope: 0.2,
};
let gat = GATLayer::new(config)?;
// Forward with attention
let (output, attention_weights) = gat.forward_with_attention(&features, &adjacency)?;
// Attention weights for interpretability
for (node_id, weights) in attention_weights.iter().enumerate() {
println!("Node {}: attention weights = {:?}", node_id, weights);
}
use ruvector_gnn::{GraphSAGE, SAGEConfig, Aggregator};
let config = SAGEConfig {
input_dim: 128,
output_dim: 64,
num_layers: 2,
aggregator: Aggregator::Mean,
sample_sizes: vec![10, 5], // Neighbor sampling per layer
normalize: true,
};
let sage = GraphSAGE::new(config)?;
// Mini-batch training with neighbor sampling
let embeddings = sage.forward_minibatch(
&features,
&adjacency,
&batch_nodes, // Target nodes
)?;
use ruvector_core::VectorDB;
use ruvector_gnn::{HNSWMessagePassing, GNNEmbedder};
// Load vector database
let db = VectorDB::open("vectors.db")?;
// Create GNN that operates on HNSW structure
let gnn = GNNEmbedder::new(GNNConfig {
input_dim: db.dimensions(),
output_dim: 64,
num_layers: 2,
..Default::default()
})?;
// Get HNSW neighbors for message passing
let hnsw_graph = db.get_hnsw_graph()?;
// Compute GNN embeddings
let gnn_embeddings = gnn.encode(&db.get_all_vectors()?, &hnsw_graph)?;
// Enhanced search using GNN embeddings
let results = db.search_with_gnn(&query_vector, &gnn, 10)?;
// GNN layer configuration
pub struct GNNConfig {
pub input_dim: usize,
pub output_dim: usize,
pub hidden_dim: usize,
pub num_heads: usize,
pub dropout: f32,
pub activation: Activation,
}
// Message passing interface
pub trait MessagePassing {
fn aggregate(&self, features: &Array2<f32>, neighbors: &[Vec<usize>]) -> Array2<f32>;
fn update(&self, aggregated: &Array2<f32>, self_features: &Array2<f32>) -> Array2<f32>;
fn forward(&self, features: &Array2<f32>, adjacency: &[Vec<usize>]) -> Result<Array2<f32>>;
}
// Layer types
pub struct GCNLayer { /* ... */ }
pub struct GATLayer { /* ... */ }
pub struct GraphSAGE { /* ... */ }
impl GCNLayer {
pub fn new(config: GNNConfig) -> Result<Self>;
pub fn forward(&self, x: &Array2<f32>, adj: &[Vec<usize>]) -> Result<Array2<f32>>;
pub fn save_weights(&self, path: &str) -> Result<()>;
pub fn load_weights(&mut self, path: &str) -> Result<()>;
}
impl GATLayer {
pub fn new(config: AttentionConfig) -> Result<Self>;
pub fn forward(&self, x: &Array2<f32>, adj: &[Vec<usize>]) -> Result<Array2<f32>>;
pub fn forward_with_attention(&self, x: &Array2<f32>, adj: &[Vec<usize>])
-> Result<(Array2<f32>, Vec<Vec<f32>>)>;
}
Operation Latency (p50) GFLOPS
─────────────────────────────────────────────────
GCN forward (1 layer) ~15ms 12.5
GAT forward (8 heads) ~45ms 8.2
GraphSAGE (2 layers) ~25ms 10.1
Message aggregation ~5ms 25.0
Model Size Peak Memory
─────────────────────────────────────
128 -> 64 (1 layer) ~50MB
128 -> 64 (4 layers) ~150MB
With mmap weights ~10MB (+ disk)
MIT License - see LICENSE for details.