| Crates.io | hope_agents |
| lib.rs | hope_agents |
| version | 0.1.0 |
| created_at | 2025-12-17 22:57:21.170025+00 |
| updated_at | 2025-12-17 22:57:21.170025+00 |
| description | HOPE Agents: Hierarchical Optimizing Policy Engine for AIngle AI agents |
| homepage | https://apilium.com |
| repository | https://github.com/ApiliumCode/aingle |
| max_upload_size | |
| id | 1991271 |
| size | 399,388 |
Hierarchical Optimizing Policy Engine for AIngle AI agents.
HOPE Agents is a complete reinforcement learning framework for building autonomous AI agents that can:
┌─────────────────────────────────────────────────────────────┐
│ HOPE Agent │
├─────────────────────────────────────────────────────────────┤
│ │
│ Observation → State → Decision → Action → Learning │
│ │
│ ┌──────────────┐ ┌──────────────┐ ┌──────────────────┐ │
│ │ Predictive │ │ Hierarchical │ │ Learning │ │
│ │ Model │ │ Goal Solver │ │ Engine │ │
│ │ │ │ │ │ │ │
│ │ • Anomaly │ │ • Goals │ │ • Q-Learning │ │
│ │ • Forecast │ │ • Planning │ │ • SARSA │ │
│ │ • Patterns │ │ • Conflicts │ │ • TD Learning │ │
│ │ │ │ │ │ • Experience │ │
│ └──────────────┘ └──────────────┘ └──────────────────┘ │
│ │
└─────────────────────────────────────────────────────────────┘
use hope_agents::{Agent, SimpleAgent, Goal, Observation, Rule, Condition, Action};
// Create a simple reactive agent
let mut agent = SimpleAgent::new("sensor_monitor");
// Add a rule: if temperature > 30, alert
let rule = Rule::new(
"high_temp",
Condition::above("temperature", 30.0),
Action::alert("Temperature too high!"),
);
agent.add_rule(rule);
// Process observations
let obs = Observation::sensor("temperature", 35.0);
agent.observe(obs.clone());
let action = agent.decide();
let result = agent.execute(action.clone());
agent.learn(&obs, &action, &result);
use hope_agents::{HopeAgent, Observation, Goal, Priority, Outcome, ActionResult};
// Create a HOPE agent with learning, prediction, and hierarchical goals
let mut agent = HopeAgent::with_default_config();
// Set a goal
let goal = Goal::maintain("temperature", 20.0..25.0)
.with_priority(Priority::High);
agent.set_goal(goal);
// Agent loop with reinforcement learning
for episode in 0..100 {
let obs = Observation::sensor("temperature", 22.0);
let action = agent.step(obs.clone());
// Execute action in environment and get reward
let reward = 1.0;
let next_obs = Observation::sensor("temperature", 21.0);
let result = ActionResult::success(&action.id);
let outcome = Outcome::new(action, result, reward, next_obs, false);
agent.learn(outcome);
}
// Check statistics
let stats = agent.get_statistics();
println!("Episodes: {}", stats.episodes_completed);
println!("Success rate: {:.2}%", stats.success_rate * 100.0);
use hope_agents::{AgentCoordinator, HopeAgent, Message, Observation};
use std::collections::HashMap;
// Create coordinator
let mut coordinator = AgentCoordinator::new();
// Register agents
let agent1 = HopeAgent::with_default_config();
let agent2 = HopeAgent::with_default_config();
let id1 = coordinator.register_agent(agent1);
let id2 = coordinator.register_agent(agent2);
// Broadcast message
coordinator.broadcast(Message::new("update", "System status changed"));
// Step all agents
let mut observations = HashMap::new();
observations.insert(id1, Observation::sensor("temp", 20.0));
observations.insert(id2, Observation::sensor("humidity", 60.0));
let actions = coordinator.step_all(observations);
use hope_agents::{HopeAgent, AgentPersistence, CheckpointManager};
use std::path::Path;
let mut agent = HopeAgent::with_default_config();
// Train the agent...
// Save agent state
agent.save_to_file(Path::new("agent_state.json")).unwrap();
// Later, load agent state
let loaded_agent = HopeAgent::load_from_file(Path::new("agent_state.json")).unwrap();
// Or use checkpoint manager for automatic checkpointing
let mut manager = CheckpointManager::new(Path::new("checkpoints"), 5)
.with_interval(1000);
// During training
for step in 0..10000 {
// ... train agent ...
if manager.should_checkpoint(step) {
manager.save_checkpoint(&agent, step).unwrap();
}
}
HOPE agents support multiple operation modes:
agent.set_mode(OperationMode::Exploration); // High exploration
agent.set_mode(OperationMode::Exploitation); // Pure exploitation
agent.set_mode(OperationMode::Adaptive); // Auto-adjust
// Achieve a target value
let goal = Goal::achieve("temperature", 25.0);
// Maintain value in range
let goal = Goal::maintain("humidity", 40.0..60.0);
// Avoid certain values
let goal = Goal::avoid("pressure", 100.0);
// Explore and discover
let goal = Goal::explore("new_area");
Goals are automatically decomposed into subgoals:
let parent = Goal::achieve("optimize_system", 1.0)
.with_priority(Priority::High);
let goal_id = agent.set_goal(parent);
// Automatically creates subgoals for different aspects
let active_goals = agent.active_goals();
Agents can make group decisions through voting:
let mut coordinator = AgentCoordinator::new();
// Create proposal
let proposal_id = coordinator.create_proposal(
"new_policy",
"Should we adopt the new temperature policy?"
);
// Agents vote
// ... (voting happens through message passing) ...
// Check consensus
match coordinator.get_consensus(&proposal_id) {
Some(ConsensusResult::Decided { approved, votes_for, votes_against, .. }) => {
println!("Decision: {}", if approved { "Approved" } else { "Rejected" });
println!("Votes: {} for, {} against", votes_for, votes_against);
}
_ => println!("Voting in progress..."),
}
Optimized for resource-constrained devices:
use hope_agents::AgentConfig;
let config = AgentConfig::iot_mode();
let agent = SimpleAgent::with_config("iot_agent", config);
// Features:
// - Limited memory (128KB)
// - Disabled learning
// - Reduced buffer sizes
use hope_agents::{HopeConfig, LearningConfig, PredictiveConfig, LearningAlgorithm};
let config = HopeConfig {
learning: LearningConfig {
learning_rate: 0.1,
discount_factor: 0.95,
algorithm: LearningAlgorithm::QLearning,
epsilon: 0.2,
..Default::default()
},
predictive: PredictiveConfig {
history_size: 500,
..Default::default()
},
anomaly_sensitivity: 0.8,
auto_decompose_goals: true,
..Default::default()
};
let agent = HopeAgent::new(config);
HOPE Agents is designed for high performance:
Run benchmarks with:
cargo bench
Comprehensive test suite with 133 tests covering:
Run tests:
# All tests
cargo test
# Specific module
cargo test coordination
cargo test persistence
# Integration tests
cargo test --test integration_test
# With output
cargo test -- --nocapture
See the tests/integration_test.rs file for comprehensive examples demonstrating:
Full API documentation is available at docs.rs/hope_agents.
Generate local documentation:
cargo doc --open
HOPE Agents integrates seamlessly with the AIngle network:
// Observe network events
let obs = Observation::network_event("node_joined", node_id);
// Execute actions on the network
let action = Action::send_message("Hello, network!");
// Store data in AIngle
let action = Action::store_data(serde_json::to_string(&data).unwrap());
Future enhancements (beyond 100%):
Contributions are welcome! Please see CONTRIBUTING.md for guidelines.
Licensed under the Apache License, Version 2.0. See LICENSE for details.
If you use HOPE Agents in your research, please cite:
@software{hope_agents,
title = {HOPE Agents: Hierarchical Optimizing Policy Engine},
author = {Apilium Technologies},
year = {2025},
url = {https://github.com/ApiliumCode/aingle}
}
Status: ✅ 100% Complete
All core features implemented, tested, and documented.