| Crates.io | scirs2-text |
| lib.rs | scirs2-text |
| version | 0.1.0-beta.2 |
| created_at | 2025-04-12 11:44:33.665071+00 |
| updated_at | 2025-09-20 09:02:44.459428+00 |
| description | Text processing module for SciRS2 (scirs2-text) |
| homepage | |
| repository | https://github.com/cool-japan/scirs |
| max_upload_size | |
| id | 1630862 |
| size | 1,775,296 |
Production-ready text processing module for SciRS2 (Scientific Computing in Rust - Next Generation). This crate provides comprehensive, high-performance text processing, natural language processing, and machine learning text utilities optimized for scientific and industrial applications.
🚀 Production Status: Version 0.1.0-beta.2 is the first beta release and is production-ready with stable APIs, comprehensive test coverage, and proven performance.
Add the following to your Cargo.toml:
[dependencies]
scirs2-text = "0.1.0-beta.2"
use scirs2_text::{
preprocess::{BasicNormalizer, BasicTextCleaner, TextCleaner, TextNormalizer},
tokenize::{NgramTokenizer, RegexTokenizer, Tokenizer, WordTokenizer},
vectorize::{CountVectorizer, TfidfVectorizer, Vectorizer},
stemming::{PorterStemmer, Stemmer},
};
// Text normalization
let normalizer = BasicNormalizer::default();
let normalized = normalizer.normalize("Hello, World!")?;
// Tokenization
let tokenizer = WordTokenizer::new(true);
let tokens = tokenizer.tokenize("The quick brown fox")?;
// N-gram tokenization
let ngram_tokenizer = NgramTokenizer::new(2)?;
let ngrams = ngram_tokenizer.tokenize("hello world test")?;
// Stemming
let stemmer = PorterStemmer::new();
let stemmed = stemmer.stem("running")?;
// Vectorization
let mut vectorizer = CountVectorizer::new(false);
let documents = vec!["Hello world", "World of Rust"];
let doc_refs: Vec<&str> = documents.iter().map(|s| s.as_ref()).collect();
vectorizer.fit(&doc_refs)?;
let vector = vectorizer.transform("Hello Rust")?;
See the examples/ directory for comprehensive demonstrations:
text_processing_demo.rs: Complete text processing pipelineword2vec_example.rs: Word embedding training and usageenhanced_vectorization_demo.rs: Advanced vectorization with n-grams and filteringuse scirs2_text::text_statistics::{TextStatistics, ReadabilityMetrics};
// Create text statistics analyzer
let stats = TextStatistics::new();
// Calculate readability metrics
let text = "The quick brown fox jumps over the lazy dog. This is a simple text passage used for demonstration purposes.";
let metrics = stats.get_all_metrics(text)?;
println!("Flesch Reading Ease: {}", metrics.flesch_reading_ease);
println!("Flesch-Kincaid Grade Level: {}", metrics.flesch_kincaid_grade_level);
println!("Gunning Fog Index: {}", metrics.gunning_fog);
println!("Lexical Diversity: {}", metrics.lexical_diversity);
println!("Word Count: {}", metrics.text_statistics.word_count);
println!("Average Sentence Length: {}", metrics.text_statistics.avg_sentence_length);
Run examples with:
cargo run --example text_processing_demo
cargo run --example word2vec_example
cargo run --example enhanced_vectorization_demo
use scirs2_text::tokenize::{RegexTokenizer, Tokenizer};
// Custom regex tokenizer
let tokenizer = RegexTokenizer::new(r"\b\w+\b", false)?;
let tokens = tokenizer.tokenize("Hello, world!")?;
// Tokenize with gaps (pattern matches separators)
let gap_tokenizer = RegexTokenizer::new(r"\s*,\s*", true)?;
let tokens = gap_tokenizer.tokenize("apple, banana, cherry")?;
use scirs2_text::tokenize::{NgramTokenizer, Tokenizer};
// Bigrams
let bigram_tokenizer = NgramTokenizer::new(2)?;
let bigrams = bigram_tokenizer.tokenize("Hello world test")?;
// Range of n-grams (2-3)
let range_tokenizer = NgramTokenizer::with_range(2, 3)?;
let ngrams = range_tokenizer.tokenize("Hello world test")?;
// Alphanumeric only
let alpha_tokenizer = NgramTokenizer::new(2)?.only_alphanumeric(true);
use scirs2_text::vectorize::{TfidfVectorizer, Vectorizer};
let mut tfidf = TfidfVectorizer::new(false, true, Some("l2".to_string()));
tfidf.fit(&documents)?;
let tfidf_matrix = tfidf.transform_batch(&documents)?;
use scirs2_text::enhanced_vectorize::{EnhancedCountVectorizer, EnhancedTfidfVectorizer};
// Count vectorizer with bigrams
let mut count_vec = EnhancedCountVectorizer::new()
.set_ngram_range((1, 2))?
.set_max_features(Some(100));
count_vec.fit(&documents)?;
// TF-IDF with document frequency filtering
let mut tfidf = EnhancedTfidfVectorizer::new()
.set_ngram_range((1, 3))?
.set_min_df(0.1)? // Minimum 10% document frequency
.set_smooth_idf(true)
.set_sublinear_tf(true);
tfidf.fit(&documents)?;
use scirs2_text::string_metrics::{
DamerauLevenshteinMetric, StringMetric, Soundex, Metaphone, PhoneticAlgorithm
};
use scirs2_text::weighted_distance::{
WeightedLevenshtein, WeightedDamerauLevenshtein, WeightedStringMetric,
LevenshteinWeights, DamerauLevenshteinWeights
};
use std::collections::HashMap;
// Damerau-Levenshtein distance with transpositions
let dl_metric = DamerauLevenshteinMetric::new();
let distance = dl_metric.distance("kitten", "sitting")?;
let similarity = dl_metric.similarity("kitten", "sitting")?;
// Restricted Damerau-Levenshtein (Optimal String Alignment)
let osa_metric = DamerauLevenshteinMetric::restricted();
let osa_distance = osa_metric.distance("kitten", "sitting")?;
// Weighted Levenshtein with custom operation costs
let weights = LevenshteinWeights::new(2.0, 1.0, 0.5); // insertions=2, deletions=1, substitutions=0.5
let weighted = WeightedLevenshtein::with_weights(weights);
let weighted_distance = weighted.distance("kitten", "sitting")?;
// Weighted Levenshtein with character-specific costs
let mut costs = HashMap::new();
costs.insert(('k', 's'), 0.1); // Make k->s substitution very cheap
let char_weights = LevenshteinWeights::default().with_substitution_costs(costs);
let custom_metric = WeightedLevenshtein::with_weights(char_weights);
// Weighted Damerau-Levenshtein with custom transposition cost
let dl_weights = DamerauLevenshteinWeights::new(1.0, 1.0, 1.0, 0.5); // transpositions cost 0.5
let weighted_dl = WeightedDamerauLevenshtein::with_weights(dl_weights);
let trans_distance = weighted_dl.distance("abc", "acb")?; // Returns 0.5 (one transposition)
// Soundex phonetic encoding
let soundex = Soundex::new();
let code = soundex.encode("Robert")?; // Returns "R163"
let sounds_like = soundex.sounds_like("Smith", "Smythe")?; // Returns true
// Metaphone phonetic algorithm
let metaphone = Metaphone::new();
let code = metaphone.encode("programming")?; // Returns "PRKRMN"
use scirs2_text::preprocess::{BasicNormalizer, BasicTextCleaner, TextPreprocessor};
// Create a complete preprocessing pipeline
let normalizer = BasicNormalizer::new(true, true);
let cleaner = BasicTextCleaner::new(true, true, true);
let preprocessor = TextPreprocessor::new(normalizer, cleaner);
let processed = preprocessor.process("Hello, WORLD! This is a TEST.")?;
// Output: "hello world test"
use scirs2_text::embeddings::{Word2Vec, Word2VecConfig, Word2VecAlgorithm};
// Configure Word2Vec
let config = Word2VecConfig {
vector_size: 100,
window: 5,
min_count: 2,
algorithm: Word2VecAlgorithm::SkipGram,
iterations: 15,
negative_samples: 5,
..Default::default()
};
// Train embeddings
let mut word2vec = Word2Vec::builder()
.config(config)
.build()?;
word2vec.train(&documents)?;
// Get word vectors
if let Some(vector) = word2vec.get_vector("hello") {
println!("Vector for 'hello': {:?}", vector);
}
// Find similar words
let similar = word2vec.most_similar("hello", 5)?;
Proven performance in production environments:
ndarray: N-dimensional arraysregex: Regular expressionsunicode-segmentation: Unicode text segmentationunicode-normalization: Unicode normalizationscirs2-core: Core utilities and parallel processinglazy_static: Lazy static initializationrustfmtThis project is dual-licensed under MIT OR Apache-2.0 license.
This is a production-ready crate. Contributions are welcome for:
Please ensure all contributions maintain the production quality standards: