| Crates.io | fastembed |
| lib.rs | fastembed |
| version | 5.2.0 |
| created_at | 2023-09-19 12:41:46.995685+00 |
| updated_at | 2025-09-15 19:55:14.806456+00 |
| description | Library for generating vector embeddings, reranking locally. |
| homepage | https://crates.io/crates/fastembed |
| repository | https://github.com/Anush008/fastembed-rs |
| max_upload_size | |
| id | 976821 |
| size | 686,832 |
clip-ViT-B-32-vision for image-to-text searchnomic-embed-vision-v1.5 for image-to-text searchTo support the library, please donate to our primary upstream dependency, ort - The Rust wrapper for the ONNX runtime.
Run the following in your project directory:
cargo add fastembed
Or add the following line to your Cargo.toml:
[dependencies]
fastembed = "5"
use fastembed::{TextEmbedding, InitOptions, EmbeddingModel};
// With default options
let mut model = TextEmbedding::try_new(Default::default())?;
// With custom options
let mut model = TextEmbedding::try_new(
InitOptions::new(EmbeddingModel::AllMiniLML6V2).with_show_download_progress(true),
)?;
let documents = vec![
"passage: Hello, World!",
"query: Hello, World!",
"passage: This is an example passage.",
// You can leave out the prefix but it's recommended
"fastembed-rs is licensed under Apache 2.0"
];
// Generate embeddings with the default batch size, 256
let embeddings = model.embed(documents, None)?;
println!("Embeddings length: {}", embeddings.len()); // -> Embeddings length: 4
println!("Embedding dimension: {}", embeddings[0].len()); // -> Embedding dimension: 384
use fastembed::{SparseEmbedding, SparseInitOptions, SparseModel, SparseTextEmbedding};
// With default options
let mut model = SparseTextEmbedding::try_new(Default::default())?;
// With custom options
let mut model = SparseTextEmbedding::try_new(
SparseInitOptions::new(SparseModel::SPLADEPPV1).with_show_download_progress(true),
)?;
let documents = vec![
"passage: Hello, World!",
"query: Hello, World!",
"passage: This is an example passage.",
"fastembed-rs is licensed under Apache 2.0"
];
// Generate embeddings with the default batch size, 256
let embeddings: Vec<SparseEmbedding> = model.embed(documents, None)?;
use fastembed::{ImageEmbedding, ImageInitOptions, ImageEmbeddingModel};
// With default options
let mut model = ImageEmbedding::try_new(Default::default())?;
// With custom options
let mut model = ImageEmbedding::try_new(
ImageInitOptions::new(ImageEmbeddingModel::ClipVitB32).with_show_download_progress(true),
)?;
let images = vec!["assets/image_0.png", "assets/image_1.png"];
// Generate embeddings with the default batch size, 256
let embeddings = model.embed(images, None)?;
println!("Embeddings length: {}", embeddings.len()); // -> Embeddings length: 2
println!("Embedding dimension: {}", embeddings[0].len()); // -> Embedding dimension: 512
use fastembed::{TextRerank, RerankInitOptions, RerankerModel};
// With default options
let mut model = TextRerank::try_new(Default::default())?;
// With custom options
let mut model = TextRerank::try_new(
RerankInitOptions::new(RerankerModel::BGERerankerBase).with_show_download_progress(true),
)?;
let documents = vec![
"hi",
"The giant panda (Ailuropoda melanoleuca), sometimes called a panda bear, is a bear species endemic to China.",
"panda is animal",
"i dont know",
"kind of mammal",
];
// Rerank with the default batch size, 256 and return document contents
let results = model.rerank("what is panda?", documents, true, None)?;
println!("Rerank result: {:?}", results);
Alternatively, local model files can be used for inference via the try_new_from_user_defined(...) methods of respective structs.