| Crates.io | vuikit |
| lib.rs | vuikit |
| version | 0.1.0 |
| created_at | 2025-09-22 23:35:15.216153+00 |
| updated_at | 2025-09-22 23:35:15.216153+00 |
| description | A Rust library for building real-time voice user interfaces with streaming support for VAD, STT, and TTS |
| homepage | |
| repository | |
| max_upload_size | |
| id | 1850739 |
| size | 60,557 |
A Rust library for building real-time voice user interfaces with streaming support for Voice Activity Detection (VAD), Speech-to-Text (STT), and Text-to-Speech (TTS).
Add VUIKit to your Cargo.toml:
[dependencies]
vuikit = "0.1.0"
futures = "0.3"
tokio = { version = "1.0", features = ["sync", "macros", "rt"] }
ββββββββββββββββββββ ββββββββββββββββββββ βββββββββββββββββββ
β Your App β β Components β β Backends β
ββββββββββββββββββββ€ ββββββββββββββββββββ€ βββββββββββββββββββ€
β β’ Send audio βββββΆβ β’ Channel mgmt βββββΆβ β’ VAD algorithm β
β β’ Receive events ββββββ β’ Lifecycle ββββββ β’ STT model β
β β’ Business logic β β β’ Error handling β β β’ TTS engine β
ββββββββββββββββββββ ββββββββββββββββββββ βββββββββββββββββββ
VUIKit provides two levels of abstraction:
Direct backend usage for one-time processing:
use vuikit::backend::vad::{VadBackend, VadEvent};
use vuikit::core::audio::AudioChunk;
use vuikit::core::error::VuiResult;
use futures::{Stream, StreamExt, stream};
use std::pin::Pin;
// Implement your VAD backend
struct MyVadBackend;
impl VadBackend for MyVadBackend {
type VadStream = Pin<Box<dyn Stream<Item = VuiResult<VadEvent>> + Send>>;
fn process_stream<S>(self, audio_stream: S) -> Self::VadStream
where S: Stream<Item = VuiResult<AudioChunk>> + Send + Unpin + 'static
{
Box::pin(audio_stream.map(|chunk| {
match chunk {
Ok(audio_chunk) => Ok(VadEvent::Silence { audio_chunk }),
Err(e) => Err(e),
}
}))
}
}
fn get_microphone_stream() -> impl Stream<Item = VuiResult<AudioChunk>> + Send + Unpin {
stream::empty()
}
#[tokio::main]
async fn main() -> VuiResult<()> {
let vad = MyVadBackend;
let audio_stream = get_microphone_stream();
let mut vad_events = vad.process_stream(audio_stream);
while let Some(event) = vad_events.next().await {
match event? {
VadEvent::VoiceStarted { confidence, .. } => {
println!("Voice detected! Confidence: {}", confidence);
}
VadEvent::VoiceEnded => {
println!("Voice stopped");
}
_ => {}
}
}
Ok(())
}
Components for long-running voice applications:
use vuikit::components::vad::VadComponent;
use vuikit::backend::vad::{VadBackend, VadEvent};
use vuikit::core::audio::AudioChunk;
use vuikit::core::error::VuiResult;
use futures::{Stream, StreamExt, stream};
use std::pin::Pin;
// Hidden backend implementation for the example
struct MyVadBackend;
impl VadBackend for MyVadBackend {
type VadStream = Pin<Box<dyn Stream<Item = VuiResult<VadEvent>> + Send>>;
fn process_stream<S>(self, audio_stream: S) -> Self::VadStream
where S: Stream<Item = VuiResult<AudioChunk>> + Send + Unpin + 'static
{
Box::pin(audio_stream.map(|chunk| {
match chunk {
Ok(audio_chunk) => Ok(VadEvent::Silence { audio_chunk }),
Err(e) => Err(e),
}
}))
}
}
async fn capture_audio() -> AudioChunk {
AudioChunk::new(vec![0.0; 512], 16000, 1)
}
fn start_recording() {
println!("Started recording");
}
fn stop_recording() {
println!("Stopped recording");
}
#[tokio::main]
async fn main() {
// Wrap your backend in a component
let (vad_component, mut vad_channels) = VadComponent::new(MyVadBackend);
// Start component in background
tokio::spawn(async move {
vad_component.run().await.unwrap();
});
// Send audio continuously
for _ in 0..3 { // Limited loop for example
let audio_chunk = capture_audio().await;
vad_channels.audio_tx.send(audio_chunk).unwrap();
// Handle events as they arrive
while let Ok(event) = vad_channels.event_rx.try_recv() {
match event {
VadEvent::VoiceStarted { .. } => start_recording(),
VadEvent::VoiceEnded => stop_recording(),
_ => {}
}
}
}
}
Backends: Implement the actual VAD/STT/TTS algorithms
self to avoid borrowing issuesComponents: Wrapper abstractions around backends
VUIKit provides three main backend traits:
use vuikit::backend::vad::VadBackend;
use vuikit::backend::stt::SttBackend;
use vuikit::backend::tts::TtsBackend;
// These traits are already defined in VUIKit:
// VadBackend, SttBackend, TtsBackend
// See the actual trait definitions in the API documentation
Some backends provide multiple capabilities (e.g., OpenAI Whisper does both VAD and STT):
use vuikit::backend::vad::VadBackend;
use vuikit::backend::stt::SttBackend;
struct WhisperBackend {
// Backend-specific fields would go here
}
// A backend can implement multiple traits:
// impl VadBackend for WhisperBackend { /* VAD implementation */ }
// impl SttBackend for WhisperBackend { /* STT implementation */ }
All audio processing uses the AudioChunk type:
use vuikit::core::audio::AudioChunk;
let audio = AudioChunk::new(
vec![0.1, 0.2, 0.3], // samples as f32
16000, // sample rate
1 // channels
);
VUIKit uses a comprehensive error system:
use vuikit::core::error::{VuiError, VuiResult};
fn handle_result(result: VuiResult<()>) {
match result {
Ok(_) => println!("Success"),
Err(VuiError::BackendError(msg)) => eprintln!("Backend error: {}", msg),
Err(VuiError::StreamError(msg)) => eprintln!("Stream error: {}", msg),
Err(VuiError::InvalidAudioFormat(msg)) => eprintln!("Audio format error: {}", msg),
Err(VuiError::ConfigError(msg)) => eprintln!("Config error: {}", msg),
}
}
Here's a simple VAD backend implementation:
use vuikit::backend::vad::{VadBackend, VadEvent};
use vuikit::core::{audio::AudioChunk, error::VuiResult};
use futures::{Stream, StreamExt};
use std::pin::Pin;
pub struct SimpleVadBackend {
threshold: f32,
}
impl SimpleVadBackend {
pub fn new(threshold: f32) -> Self {
Self { threshold }
}
}
impl VadBackend for SimpleVadBackend {
type VadStream = Pin<Box<dyn Stream<Item = VuiResult<VadEvent>> + Send>>;
fn process_stream<S>(self, audio_stream: S) -> Self::VadStream
where S: Stream<Item = VuiResult<AudioChunk>> + Send + Unpin + 'static
{
Box::pin(audio_stream.map(move |chunk_result| {
let chunk = chunk_result?;
// Simple energy-based VAD
let energy: f32 = chunk.samples.iter()
.map(|&sample| sample * sample)
.sum::<f32>() / chunk.samples.len() as f32;
if energy > self.threshold {
Ok(VadEvent::VoiceStarted {
confidence: (energy / self.threshold).min(1.0),
audio_chunk: chunk
})
} else {
Ok(VadEvent::Silence { audio_chunk: chunk })
}
}))
}
}
Contributions are welcome! Please feel free to submit a Pull Request.
This project is licensed under the MIT License - see the LICENSE file for details.