| Crates.io | aimds-analysis |
| lib.rs | aimds-analysis |
| version | 0.1.0 |
| created_at | 2025-10-27 16:38:37.85933+00 |
| updated_at | 2025-10-27 16:38:37.85933+00 |
| description | Deep behavioral analysis layer for AIMDS with temporal neural verification |
| homepage | |
| repository | https://github.com/your-org/aimds |
| max_upload_size | |
| id | 1903240 |
| size | 121,178 |
Behavioral analysis and formal verification for AI threat detection - Temporal pattern analysis, LTL policy checking, and anomaly detection with sub-520ms latency.
Part of the AIMDS (AI Manipulation Defense System) by rUv - Production-ready adversarial defense for AI systems.
use aimds_core::{Config, PromptInput};
use aimds_analysis::AnalysisEngine;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// Initialize analysis engine
let config = Config::default();
let analyzer = AnalysisEngine::new(config).await?;
// Analyze behavioral patterns
let input = PromptInput::new(
"Unusual sequence of API calls...",
None
);
let result = analyzer.analyze(&input, None).await?;
println!("Anomaly score: {:.2}", result.anomaly_score);
println!("Attractor type: {:?}", result.attractor_type);
println!("Policy violations: {}", result.policy_violations.len());
println!("Latency: {}ms", result.latency_ms);
Ok(())
}
Add to your Cargo.toml:
[dependencies]
aimds-analysis = "0.1.0"
| Component | Target | Actual | Status |
|---|---|---|---|
| Behavioral Analysis | <100ms | ~80ms | ✅ |
| Policy Verification | <500ms | ~420ms | ✅ |
| Combined Deep Path | <520ms | ~500ms | ✅ |
| Anomaly Detection | <50ms | ~35ms | ✅ |
| Baseline Training | <1s | ~850ms | ✅ |
Benchmarks run on 4-core Intel Xeon, 16GB RAM. See ../../RUST_TEST_REPORT.md for details.
┌──────────────────────────────────────────────────────┐
│ aimds-analysis │
├──────────────────────────────────────────────────────┤
│ │
│ ┌──────────────┐ ┌──────────────┐ │
│ │ Behavioral │ │ Policy │ │
│ │ Analyzer │ │ Verifier │ │
│ └──────────────┘ └──────────────┘ │
│ │ │ │
│ └──────────┬─────────┘ │
│ │ │
│ ┌───────▼────────┐ │
│ │ Analysis │ │
│ │ Engine │ │
│ └───────┬────────┘ │
│ │ │
│ ┌──────────┴──────────┐ │
│ │ │ │
│ ┌──────▼─────┐ ┌───────▼──────┐ │
│ │ Attractor │ │ Temporal │ │
│ │ Studio │ │ Neural │ │
│ └────────────┘ └──────────────┘ │
│ │
│ Midstream Platform Integration │
│ │
└──────────────────────────────────────────────────────┘
Temporal Attractor Classification:
Lyapunov Exponent Calculation:
let result = analyzer.analyze(&sequence).await?;
match result.lyapunov_exponent {
x if x > 0.0 => println!("Chaotic behavior detected"),
x if x == 0.0 => println!("Periodic behavior"),
_ => println!("Stable behavior"),
}
Baseline Learning:
// Train baseline on normal behavior
analyzer.train_baseline(&normal_sequences).await?;
// Detect deviations
let result = analyzer.analyze(&new_input, None).await?;
if result.anomaly_score > 0.8 {
println!("Significant deviation from baseline");
}
Linear Temporal Logic (LTL):
Supports standard LTL operators:
Policy Examples:
use aimds_analysis::{PolicyVerifier, Policy};
let verifier = PolicyVerifier::new();
// "Users must always be authenticated"
let auth_policy = Policy::new(
"auth_required",
"G(authenticated)",
1.0 // priority
);
// "PII must eventually be redacted"
let pii_policy = Policy::new(
"pii_redaction",
"F(redacted)",
0.9
);
verifier.add_policy(auth_policy);
verifier.add_policy(pii_policy);
let result = verifier.verify(&trace).await?;
for violation in result.violations {
println!("Policy violated: {}", violation.policy_id);
}
Multi-Dimensional Analysis:
// Analyze sequence with multiple features
let sequence = vec![
vec![0.1, 0.2, 0.3], // Feature vector 1
vec![0.2, 0.3, 0.4], // Feature vector 2
// ... more vectors
];
let result = analyzer.analyze_sequence(&sequence).await?;
println!("Anomaly score: {:.2}", result.anomaly_score);
Statistical Metrics:
use aimds_analysis::AnalysisEngine;
use aimds_core::{Config, PromptInput};
let analyzer = AnalysisEngine::new(Config::default()).await?;
// Behavioral + Policy verification
let input = PromptInput::new("User request sequence", None);
let detection = detector.detect(&input).await?;
let result = analyzer.analyze(&input, Some(&detection)).await?;
println!("Threat level: {:?}", result.threat_level);
println!("Anomaly score: {:.2}", result.anomaly_score);
println!("Policy violations: {}", result.policy_violations.len());
println!("Attractor type: {:?}", result.attractor_type);
// Collect normal behavior samples
let normal_sequences = vec![
PromptInput::new("Normal query 1", None),
PromptInput::new("Normal query 2", None),
// ... 100+ samples recommended
];
// Train baseline
analyzer.train_baseline(&normal_sequences).await?;
// Now analyze new inputs against baseline
let result = analyzer.analyze(&new_input, None).await?;
use aimds_analysis::{PolicyVerifier, Policy, LTLChecker};
let mut verifier = PolicyVerifier::new();
// Add security policies
verifier.add_policy(Policy::new(
"rate_limit",
"G(requests_per_minute < 100)",
0.9
));
verifier.add_policy(Policy::new(
"auth_timeout",
"F(session_timeout)",
0.8
));
// Verify trace
let trace = vec![
("authenticated", true),
("requests_per_minute", 95),
("session_timeout", false),
];
let result = verifier.verify(&trace).await?;
for violation in result.violations {
println!("Violated: {} (confidence: {})",
violation.policy_id, violation.confidence);
}
// Adjust sensitivity based on environment
analyzer.update_threshold(0.7).await?; // More sensitive
// Or per-analysis
let result = analyzer.analyze_with_threshold(
&input,
None,
0.9 // Less sensitive
).await?;
# Behavioral analysis
AIMDS_BEHAVIORAL_ANALYSIS_ENABLED=true
AIMDS_BEHAVIORAL_THRESHOLD=0.75
AIMDS_BASELINE_MIN_SAMPLES=100
# Policy verification
AIMDS_POLICY_VERIFICATION_ENABLED=true
AIMDS_POLICY_TIMEOUT_MS=500
AIMDS_POLICY_STRICT_MODE=true
# Performance tuning
AIMDS_ANALYSIS_TIMEOUT_MS=520
AIMDS_MAX_SEQUENCE_LENGTH=10000
let config = Config {
behavioral_analysis_enabled: true,
behavioral_threshold: 0.75,
policy_verification_enabled: true,
..Config::default()
};
let analyzer = AnalysisEngine::new(config).await?;
The analysis layer uses production-validated Midstream crates:
All integrations use 100% real APIs (no mocks) with validated performance.
Run tests:
# Unit tests
cargo test --package aimds-analysis
# Integration tests
cargo test --package aimds-analysis --test integration_tests
# Benchmarks
cargo bench --package aimds-analysis
Test Coverage: 100% (27/27 tests passing)
Example tests:
Prometheus metrics exposed:
// Analysis metrics
aimds_analysis_requests_total{type="behavioral|policy|combined"}
aimds_analysis_latency_ms{component="behavioral|policy"}
aimds_anomaly_score_distribution
aimds_policy_violations_total{policy_id}
// Performance metrics
aimds_baseline_training_time_ms
aimds_attractor_classification_latency_ms
aimds_ltl_verification_latency_ms
Structured logs with tracing:
info!(
anomaly_score = result.anomaly_score,
attractor_type = ?result.attractor_type,
violations = result.policy_violations.len(),
latency_ms = result.latency_ms,
"Analysis complete"
);
Detect anomalous agent behavior:
// Analyze agent action sequences
let agent_trace = vec![
agent.action_at(t0),
agent.action_at(t1),
// ... temporal sequence
];
let result = analyzer.analyze_sequence(&agent_trace).await?;
if result.anomaly_score > 0.8 {
coordinator.flag_agent(agent.id, result).await?;
}
Enforce rate limits and access policies:
// Define policies
verifier.add_policy(Policy::new(
"rate_limit",
"G(requests_per_second < 100)",
1.0
));
// Verify each request
let result = verifier.verify(&request_trace).await?;
if !result.violations.is_empty() {
return Err("Policy violation");
}
Identify unusual transaction patterns:
// Train on normal transactions
analyzer.train_baseline(&normal_transactions).await?;
// Analyze new transaction
let result = analyzer.analyze(&new_transaction, None).await?;
if result.anomaly_score > 0.9 {
fraud_system.flag_for_review(new_transaction).await?;
}
See CONTRIBUTING.md for guidelines.
MIT OR Apache-2.0