| Crates.io | ruvector-attention-node |
| lib.rs | ruvector-attention-node |
| version | 0.1.0 |
| created_at | 2025-12-09 01:22:50.04116+00 |
| updated_at | 2025-12-09 01:22:50.04116+00 |
| description | Node.js bindings for ruvector-attention |
| homepage | |
| repository | https://github.com/ruvnet/ruvector |
| max_upload_size | |
| id | 1974670 |
| size | 101,082 |
High-performance attention mechanisms for Node.js, powered by Rust.
npm install @ruvector/attention
const { DotProductAttention } = require('@ruvector/attention');
const attention = new DotProductAttention(512, 1.0);
const query = new Float32Array([/* ... */]);
const keys = [new Float32Array([/* ... */])];
const values = [new Float32Array([/* ... */])];
const output = attention.compute(query, keys, values);
const { MultiHeadAttention } = require('@ruvector/attention');
const mha = new MultiHeadAttention(512, 8); // 512 dim, 8 heads
const output = mha.compute(query, keys, values);
// Async version for large computations
const outputAsync = await mha.computeAsync(query, keys, values);
const { FlashAttention } = require('@ruvector/attention');
const flash = new FlashAttention(512, 64); // 512 dim, 64 block size
const output = flash.compute(query, keys, values);
const { HyperbolicAttention } = require('@ruvector/attention');
const hyperbolic = new HyperbolicAttention(512, -1.0); // negative curvature
const output = hyperbolic.compute(query, keys, values);
const { MoEAttention } = require('@ruvector/attention');
const moe = new MoEAttention({
dim: 512,
numExperts: 8,
topK: 2,
expertCapacity: 1.25
});
const output = moe.compute(query, keys, values);
const expertUsage = moe.getExpertUsage();
const { Trainer, AdamOptimizer } = require('@ruvector/attention');
// Configure training
const trainer = new Trainer({
learningRate: 0.001,
batchSize: 32,
numEpochs: 100,
weightDecay: 0.01,
gradientClip: 1.0,
warmupSteps: 1000
});
// Training step
const loss = trainer.trainStep(inputs, targets);
// Get metrics
const metrics = trainer.getMetrics();
console.log(`Loss: ${metrics.loss}, LR: ${metrics.learningRate}`);
// Custom optimizer
const optimizer = new AdamOptimizer(0.001, 0.9, 0.999, 1e-8);
const updatedParams = optimizer.step(gradients);
const { BatchProcessor, parallelAttentionCompute } = require('@ruvector/attention');
// Batch processor for efficient batching
const processor = new BatchProcessor({
batchSize: 32,
numWorkers: 4,
prefetch: true
});
const results = await processor.processBatch(queries, keys, values);
const throughput = processor.getThroughput();
// Parallel computation with automatic worker management
const results = await parallelAttentionCompute(
'multi-head',
queries,
keys,
values,
4 // number of workers
);
DotProductAttentionconstructor(dim: number, scale?: number)compute(query: Float32Array, keys: Float32Array[], values: Float32Array[]): Float32ArrayMultiHeadAttentionconstructor(dim: number, numHeads: number)compute(query: Float32Array, keys: Float32Array[], values: Float32Array[]): Float32ArraycomputeAsync(query: Float32Array, keys: Float32Array[], values: Float32Array[]): Promise<Float32Array>FlashAttentionconstructor(dim: number, blockSize: number)compute(query: Float32Array, keys: Float32Array[], values: Float32Array[]): Float32ArrayLinearAttentionconstructor(dim: number, numFeatures: number)compute(query: Float32Array, keys: Float32Array[], values: Float32Array[]): Float32ArrayHyperbolicAttentionconstructor(dim: number, curvature: number)compute(query: Float32Array, keys: Float32Array[], values: Float32Array[]): Float32ArrayMoEAttentionconstructor(config: MoEConfig)compute(query: Float32Array, keys: Float32Array[], values: Float32Array[]): Float32ArraygetExpertUsage(): number[]Trainerconstructor(config: TrainingConfig)trainStep(inputs: Float32Array[], targets: Float32Array[]): numbertrainStepAsync(inputs: Float32Array[], targets: Float32Array[]): Promise<number>getMetrics(): TrainingMetricsAdamOptimizerconstructor(learningRate: number, beta1?: number, beta2?: number, epsilon?: number)step(gradients: Float32Array[]): Float32Array[]getLearningRate(): numbersetLearningRate(lr: number): voidBatchProcessorconstructor(config: BatchConfig)processBatch(queries: Float32Array[], keys: Float32Array[][], values: Float32Array[][]): Promise<Float32Array[]>getThroughput(): numberparallelAttentionComputefunction parallelAttentionCompute(
attentionType: string,
queries: Float32Array[],
keys: Float32Array[][],
values: Float32Array[][],
numWorkers?: number
): Promise<Float32Array[]>
versionReturns the package version string.
This package uses Rust under the hood for optimal performance:
Pre-built binaries are provided for:
MIT OR Apache-2.0