| Crates.io | ai00-mem |
| lib.rs | ai00-mem |
| version | 0.1.0 |
| created_at | 2025-07-28 16:01:34.726917+00 |
| updated_at | 2025-07-28 16:01:34.726917+00 |
| description | AI00-MEM: A high-performance memory system for AI applications with advanced RAG capabilities, vector-graph hybrid storage, and adaptive learning. Built with Rust for scalability and efficiency. |
| homepage | |
| repository | https://github.com/Ai00-X/ai00-mem |
| max_upload_size | |
| id | 1771329 |
| size | 492,249 |
English | 中文
An advanced memory system based on A-Mem and HippoRAG papers, designed for personal AI assistants to provide intelligent memory storage, retrieval, and learning capabilities.
Add to your Cargo.toml:
[dependencies]
ai00-mem = "0.1.0"
tokio = { version = "1.0", features = ["full"] }
use ai00_mem::{MemoryManager, Config, CreateMemoryRequest, Context, Query};
use std::sync::Arc;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// 1. Create configuration
let mut config = Config::default();
config.vector.similarity_threshold = 0.7;
// 2. Create database connection
let db = Arc::new(ai00_mem::database::VectorGraphDB::new(&config).await?);
// 3. Create memory manager
let memory_manager = MemoryManager::new(db, config).await?;
// 4. Create memory request
let request = CreateMemoryRequest {
content: "Rust is a systems programming language focused on safety, concurrency, and performance.".to_string(),
context: Context {
session_id: Some("session_001".to_string()),
current_topic: Some("programming languages".to_string()),
..Default::default()
},
attributes: None,
force_connections: None,
};
// 5. Create memory
let memory = memory_manager.create_memory_from_request(request).await?;
println!("Created memory: {}", memory.id);
// 6. Query related memories
let query = Query {
text: "Rust programming language".to_string(),
memory_type: None,
limit: 10,
offset: 0,
sort_by: ai00_mem::core::SortBy::Relevance,
weights: Default::default(),
};
let results = memory_manager.retrieve_memories(query).await?;
println!("Found {} related memories", results.len());
for result in results {
println!("- {} (relevance: {:.2})",
result.memory.content,
result.relevance_score);
}
Ok(())
}
use ai00_mem::prelude::*;
#[tokio::main]
async fn main() -> Result<()> {
let config = Config::default();
let db = Arc::new(VectorGraphDB::new(config.clone()).await?);
// Initialize complete system
let memory_manager = MemoryManager::new(db.clone(), config.clone()).await?;
let retriever = HippoRAGRetriever::new(db.clone(), config.clone()).await?;
let learning_engine = LearningEngine::new(db.clone(), config.clone()).await?;
// Create contextual query
let query = Query {
text: "machine learning algorithms".to_string(),
user_id: "user_123".to_string(),
context: Some(Context {
session_id: "session_456".to_string(),
timestamp: Utc::now(),
user_state: UserState::Active,
task_context: Some("learning".to_string()),
emotional_state: Some(EmotionalState::Curious),
attention_level: Some(AttentionLevel::High),
..Default::default()
}),
filters: vec![
Filter::MemoryType(MemoryType::Knowledge),
Filter::TimeRange(Utc::now() - Duration::days(30), Utc::now()),
],
max_results: 5,
min_relevance: 0.6,
};
// Use HippoRAG retrieval
let results = retriever.hippocampus_retrieval(&query).await?;
// Record user feedback
let feedback = FeedbackRecord {
memory_id: results[0].memory.id.clone(),
user_id: "user_123".to_string(),
feedback_type: FeedbackType::Explicit,
score: 0.9,
context: FeedbackContext {
query: query.text.clone(),
result_position: 0,
session_id: "session_456".to_string(),
device_type: Some("desktop".to_string()),
time_of_day: 14,
day_of_week: 1,
},
timestamp: Utc::now(),
};
learning_engine.record_feedback(feedback).await?;
// Run learning cycle
let learning_results = learning_engine.run_learning_cycle().await?;
println!("Learning cycle completed, executed {} tasks", learning_results.len());
Ok(())
}
MemoryManager::new(db: Arc<VectorGraphDB>, config: Config) -> Result<Self>: Create new memory managercreate_memory_from_request(&self, request: CreateMemoryRequest) -> Result<Memory>: Create new memory from requestretrieve_memories(&self, query: Query) -> Result<Vec<RetrievalResult>>: Retrieve relevant memoriesupdate_memory(&self, request: UpdateMemoryRequest) -> Result<Memory>: Update memorydelete_memory(&self, memory_id: &str) -> Result<()>: Delete memorygenerate_embedding(&self, text: &str) -> Result<Vec<f32>>: Generate text embedding vectorCreateMemoryRequest: Memory creation request
content: String: Memory contentcontext: Context: Context informationattributes: Option<MemoryAttributes>: Memory attributes (optional)force_connections: Option<Vec<MemoryId>>: Force connection memory IDs (optional)UpdateMemoryRequest: Memory update request
memory_id: MemoryId: Memory IDupdates: Vec<UpdateType>: List of update operationsBased on cutting-edge research:
We welcome contributions! Please see CONTRIBUTING.md for guidelines.
This project is licensed under the MIT License - see the LICENSE file for details.