| Crates.io | mcp-host |
| lib.rs | mcp-host |
| version | 0.1.12 |
| created_at | 2026-01-04 02:20:29.915129+00 |
| updated_at | 2026-01-23 00:42:15.869718+00 |
| description | Production-grade MCP host crate for building Model Context Protocol servers |
| homepage | |
| repository | https://github.com/seuros/mcphost-rs |
| max_upload_size | |
| id | 2021173 |
| size | 783,363 |
Production-grade Rust crate for building Model Context Protocol (MCP) servers.
breaker-machineschrono-machinesthrottle-machinesnotifications/message for LLM visibility[dependencies]
mcp-host = "0.1"
use mcp_host::prelude::*;
use async_trait::async_trait;
use serde_json::Value;
// Define a tool
struct EchoTool;
#[async_trait]
impl Tool for EchoTool {
fn name(&self) -> &str { "echo" }
fn description(&self) -> Option<&str> {
Some("Echoes back the input message")
}
fn input_schema(&self) -> Value {
serde_json::json!({
"type": "object",
"properties": {
"message": { "type": "string" }
},
"required": ["message"]
})
}
async fn execute(&self, ctx: ExecutionContext<'_>) -> Result<Vec<Box<dyn Content>>, ToolError> {
let msg = ctx.params["message"].as_str().unwrap_or("?");
Ok(vec![Box::new(TextContent::new(format!("Echo: {}", msg)))])
}
}
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let server = Server::new("my-server", "0.1.0");
server.tool_registry().register(EchoTool);
server.set_capabilities(ServerCapabilities {
tools: Some(mcp_host::protocol::capabilities::ToolsCapability {
list_changed: Some(false),
}),
..Default::default()
});
server.run(StdioTransport::new()).await
}
| Component | Description |
|---|---|
Server |
Main MCP server with request routing and session management |
ToolRegistry |
Thread-safe registry for tool implementations |
ResourceManager |
Manages URI-addressable resources |
PromptManager |
Manages reusable prompt templates |
MiddlewareChain |
Request processing pipeline |
// Implement custom tools
#[async_trait]
pub trait Tool: Send + Sync {
fn name(&self) -> &str;
fn description(&self) -> Option<&str>;
fn input_schema(&self) -> Value;
// Optional: contextual visibility (default: always visible)
fn is_visible(&self, ctx: &VisibilityContext) -> bool { true }
// Execute with full session context
async fn execute(&self, ctx: ExecutionContext<'_>) -> Result<Vec<Box<dyn Content>>, ToolError>;
}
// Implement custom resources
#[async_trait]
pub trait Resource: Send + Sync {
fn uri(&self) -> &str;
fn name(&self) -> &str;
fn description(&self) -> Option<&str>;
fn mime_type(&self) -> Option<&str>;
fn is_visible(&self, ctx: &VisibilityContext) -> bool { true }
async fn read(&self, ctx: ExecutionContext<'_>) -> Result<Vec<Box<dyn Content>>, ResourceError>;
}
// Implement custom prompts
#[async_trait]
pub trait Prompt: Send + Sync {
fn name(&self) -> &str;
fn description(&self) -> Option<&str>;
fn arguments(&self) -> Option<Vec<PromptArgument>>;
fn is_visible(&self, ctx: &VisibilityContext) -> bool { true }
async fn get(&self, ctx: ExecutionContext<'_>) -> Result<GetPromptResult, PromptError>;
}
Request data from clients:
// Request workspace roots (requires client roots capability)
let roots = server.request_roots("session-id", None).await?;
for root in roots {
println!("Root: {} ({})", root.name.unwrap_or_default(), root.uri);
}
// Request LLM completion (requires client sampling capability)
let params = CreateMessageParams {
messages: vec![SamplingMessage {
role: "user".to_string(),
content: SamplingContent::Text { text: "Hello!".to_string() },
}],
max_tokens: 1000,
..Default::default()
};
let result = server.request_sampling("session-id", params, None).await?;
Filter tools/resources/prompts based on session state:
struct GitCommitTool;
impl Tool for GitCommitTool {
// Only visible when in a git repo with uncommitted changes
fn is_visible(&self, ctx: &VisibilityContext) -> bool {
ctx.environment
.map(|e| e.has_git_repo() && !e.git_is_clean())
.unwrap_or(false)
}
// ...
}
struct AdminOnlyResource;
impl Resource for AdminOnlyResource {
// Only visible to admin users
fn is_visible(&self, ctx: &VisibilityContext) -> bool {
ctx.is_admin()
}
// ...
}
use mcp_host::prelude::*;
let server = server("my-server", "1.0.0")
.with_tools(true)
.with_resources(true, false)
.with_prompts(true)
.with_logging()
.with_logging_middleware()
.with_validation_middleware()
.build();
use mcp_host::prelude::*;
let server = server("resilient-server", "1.0.0")
.with_tools(true)
.with_resources(true, false)
// Circuit breaker: opens after 3 failures in 60s
.with_circuit_breaker(ToolBreakerConfig {
failure_threshold: 3,
failure_window_secs: 60.0,
half_open_timeout_secs: 10.0,
success_threshold: 2,
})
// Retry: exponential backoff with full jitter
.with_retry(ResourceRetryConfig {
max_attempts: 3,
base_delay_ms: 100,
multiplier: 2.0,
max_delay_ms: 5000,
jitter_factor: 1.0,
})
// Rate limit: 100 req/s with burst of 20
.with_rate_limit(100.0, 20)
.build();
use mcp_host::prelude::*;
// Create logger with notification channel
let logger = McpLogger::new(notification_tx, "my-tool");
// Log messages are visible to the LLM via notifications/message
logger.info("Tool initialized successfully");
logger.warning("Rate limit approaching threshold");
logger.error("External API unavailable");
Send notifications from background tasks:
let server = Server::new("server", "1.0.0");
let notification_tx = server.notification_sender();
// Spawn background task
tokio::spawn(async move {
notification_tx.send(JsonRpcNotification::new(
"notifications/progress",
Some(serde_json::json!({ "progress": 50 })),
)).ok();
});
| Flag | Description | Default |
|---|---|---|
stdio |
STDIO transport support | ✓ |
http |
HTTP transport via rama | |
macros |
Proc macros (#[mcp_tool], #[mcp_tool_router]) |
|
full |
All features enabled |
# Minimal (STDIO only)
mcp-host = "0.1"
# With HTTP transport
mcp-host = { version = "0.1", features = ["http"] }
# With macros for ergonomic tool definition
mcp-host = { version = "0.1", features = ["macros"] }
# Everything
mcp-host = { version = "0.1", features = ["full"] }
initialize / pingtools/list / tools/callresources/list / resources/read / resources/subscribe / resources/unsubscribeprompts/list / prompts/getcompletion/completetasks/list / tasks/get / tasks/cancelroots/list - Request workspace roots from clientsampling/createMessage - Request LLM completion from clientelicitation/create - Request structured user input with schema validation# Run the comprehensive PROMETHEUS example with all features
cargo run --example prometheus_project
# Run with HTTP transport (requires http feature)
cargo run --example prometheus_project --features http -- --http --port 8080
BSD-3-Clause