| Crates.io | nanofish |
| lib.rs | nanofish |
| version | 0.9.1 |
| created_at | 2025-06-19 08:31:34.486823+00 |
| updated_at | 2025-09-11 14:18:40.774481+00 |
| description | 🐟 A lightweight, `no_std` HTTP client and server for embedded systems built on top of Embassy networking. |
| homepage | |
| repository | https://github.com/rttfd/nanofish |
| max_upload_size | |
| id | 1717971 |
| size | 150,886 |

A lightweight, no_std HTTP client and server for embedded systems built on Embassy networking with zero-copy response handling.
Nanofish is designed for embedded systems with limited memory. It provides a simple HTTP client and server that works without heap allocation, making it suitable for microcontrollers and IoT devices. The library uses zero-copy response handling where response data is borrowed directly from user-provided buffers, keeping memory usage predictable and efficient.
no_std compatibility with no heap allocations[dependencies]
nanofish = "0.9.1"
[dependencies]
nanofish = { version = "0.9.1", features = ["tls"] }
tls - Enables HTTPS/TLS support via embedded-tls
Unlike traditional HTTP clients that copy response data multiple times, Nanofish uses a zero-copy approach:
Traditional HTTP Clients:
Network → Internal Buffer (copy #1) → Response Struct (copy #2) → User Code (copy #3)
Nanofish Zero-Copy:
Network → YOUR Buffer (direct) → Zero-Copy References → User Code (no copies!)
Here's a simple example showing how to use Nanofish:
use nanofish::{DefaultHttpClient, HttpHeader, ResponseBody, headers, mime_types};
use embassy_net::Stack;
async fn example(stack: &Stack<'_>) -> Result<(), nanofish::Error> {
...
// See crate docs for full async usage example
}
let client = DefaultHttpClient::new(unsafe { core::ptr::NonNull::dangling().as_ref() });
let mut response_buffer = [0u8; 8192];
let headers = [
HttpHeader::user_agent("Nanofish/0.9.1"),
HttpHeader::content_type(mime_types::JSON),
HttpHeader::authorization("Bearer token123"),
];
let custom_headers = [
HttpHeader { name: "X-Custom-Header", value: "custom-value" },
HttpHeader::new(headers::ACCEPT, mime_types::JSON),
];
let (response, bytes_read) = client.get(
"http://example.com/api/status",
&headers,
&mut response_buffer
).await?;
println!("Read {} bytes into buffer", bytes_read);
// Traditional approach (copies data):
// 1. Read from network → internal buffer (copy #1)
// 2. Parse response → response struct (copy #2)
// 3. User gets → copied data (copy #3)
// Nanofish zero-copy approach:
// 1. Read from network → YOUR buffer (direct)
// 2. Parse response → references to YOUR buffer (zero-copy)
// 3. User gets → direct references to YOUR buffer (zero-copy)
let mut small_buffer = [0u8; 1024]; // For small responses
let mut large_buffer = [0u8; 32768]; // For large responses
// Same API, different memory usage - YOU decide!
let (small_response, _) = client.get(url, &headers, &mut small_buffer).await?;
let (large_response, _) = client.get(url, &headers, &mut large_buffer).await?;
Nanofish provides helpful APIs for working with HTTP headers:
use nanofish::headers;
// Common header names
let content_type = headers::CONTENT_TYPE; // "Content-Type"
let authorization = headers::AUTHORIZATION; // "Authorization"
let user_agent = headers::USER_AGENT; // "User-Agent"
let accept = headers::ACCEPT; // "Accept"
use nanofish::mime_types;
// Common MIME types
let json = mime_types::JSON; // "application/json"
let xml = mime_types::XML; // "application/xml"
let text = mime_types::TEXT; // "text/plain"
let html = mime_types::HTML; // "text/html"
use nanofish::{HttpHeader, mime_types};
// Easy creation of common headers
let headers = [
HttpHeader::content_type(mime_types::JSON),
HttpHeader::authorization("Bearer your-token"),
HttpHeader::user_agent("MyApp/1.0"),
HttpHeader::accept(mime_types::JSON),
HttpHeader::api_key("your-api-key"),
];
Nanofish automatically determines the appropriate response body type based on the Content-Type header:
use nanofish::ResponseBody;
// The response body is automatically parsed based on content type
match &response.body {
ResponseBody::Text(text) => {
println!("Text response: {}", text);
}
ResponseBody::Binary(bytes) => {
println!("Binary response: {} bytes", bytes.len());
}
ResponseBody::Empty => {
println!("Empty response");
}
}
if response.is_success() {
println!("Request successful! Status: {}", response.status_code);
}
if response.is_client_error() {
println!("Client error: {}", response.status_code);
}
if response.is_server_error() {
println!("Server error: {}", response.status_code);
}
// You can also check status directly on the status code:
if response.status_code.is_success() {
println!("Success!");
}
if let Some(content_length) = response.content_length() {
println!("Content length: {} bytes", content_length);
}
Nanofish provides convenience methods for all standard HTTP verbs:
// All methods require a buffer and return (HttpResponse, bytes_read)
let mut buffer = [0u8; 4096];
// GET request
let (response, bytes_read) = client.get(
"http://api.example.com/users",
&headers,
&mut buffer
).await?;
// POST request with JSON body
let json_body = br#"{"name": "John", "email": "john@example.com"}"#;
let post_headers = [
HttpHeader::content_type(mime_types::JSON),
HttpHeader::authorization("Bearer token123"),
];
let (response, bytes_read) = client.post(
"http://api.example.com/users",
&post_headers,
json_body,
&mut buffer
).await?;
// PUT request
let (response, bytes_read) = client.put(
"http://api.example.com/users/123",
&headers,
update_data,
&mut buffer
).await?;
// DELETE request
let (response, bytes_read) = client.delete(
"http://api.example.com/users/123",
&headers,
&mut buffer
).await?;
// Other HTTP methods
let (response, _) = client.patch("http://api.example.com/users/123", &headers, patch_data, &mut buffer).await?;
let (response, _) = client.head("http://api.example.com/status", &headers, &mut buffer).await?;
let (response, _) = client.options("http://api.example.com", &headers, &mut buffer).await?;
let (response, _) = client.trace("http://api.example.com", &headers, &mut buffer).await?;
let (response, _) = client.connect("http://proxy.example.com", &headers, &mut buffer).await?;
All methods return a Result<(HttpResponse, usize), Error> where:
HttpResponse contains zero-copy references to data in your bufferusize is the number of bytes read into your bufferJust like the server, you can choose different client sizes:
use nanofish::{DefaultHttpClient, SmallHttpClient, HttpClient};
// Default client (4KB buffers) - good for most use cases
let client = DefaultHttpClient::new(stack);
// Small client (1KB buffers) - for memory-constrained devices
let client = SmallHttpClient::new(stack);
// Custom client with your own buffer sizes
type CustomClient<'a> = HttpClient<'a, 2048, 2048, 8192, 8192, 2048>;
// TCP_RX ↑ ↑ TCP_TX ↑ ↑ TLS_WRITE ↑ REQUEST
// TLS_READ ↑
let client = CustomClient::new(stack);
TCP_RX: TCP receive buffer size (default: 4096 bytes)TCP_TX: TCP transmit buffer size (default: 4096 bytes)TLS_READ: TLS read record buffer size (default: 4096 bytes)TLS_WRITE: TLS write record buffer size (default: 4096 bytes)RQ: HTTP request buffer size for building requests (default: 1024 bytes)Choose buffer sizes based on your memory constraints and expected payload sizes. The request buffer size determines the maximum size of HTTP requests that can be built, including headers and request line.
Choose your buffer size based on your needs:
// Scenario 1: Memory-constrained device (1KB buffer)
let mut tiny_buffer = [0u8; 1024];
let (response, _) = client.get(url, &headers, &mut tiny_buffer).await?;
// Perfect for small API responses, status checks, etc.
// Scenario 2: Streaming large data (32KB buffer)
let mut large_buffer = [0u8; 32768];
let (response, bytes_read) = client.get(large_url, &headers, &mut large_buffer).await?;
// Handle larger responses, file downloads, etc.
// Scenario 3: Reuse the same buffer for multiple requests
let mut shared_buffer = [0u8; 8192];
for url in urls {
let (response, _) = client.get(url, &headers, &mut shared_buffer).await?;
process_response(&response);
// Buffer is reused for each request - no allocations!
}
Nanofish includes a built-in HTTP server perfect for embedded systems and IoT devices. The server is async, lightweight, and has customizable timeouts.
Important Note: The server only supports plain HTTP connections, not HTTPS/TLS. While the Nanofish client supports both HTTP and HTTPS, the server implementation is HTTP-only. For secure connections in production, use a reverse proxy (like nginx) or load balancer that handles TLS termination.
use nanofish::{DefaultHttpServer, HttpHandler, HttpRequest, HttpResponse, ResponseBody, StatusCode};
use embassy_net::Stack;
// Create a simple request handler
struct MyHandler;
impl HttpHandler for MyHandler {
async fn handle_request(&mut self, request: &HttpRequest<'_>) -> Result<HttpResponse<'_>, nanofish::Error> {
match request.path {
"/" => Ok(HttpResponse {
status_code: StatusCode::Ok,
headers: Vec::new(),
body: ResponseBody::Text("<h1>Hello World!</h1>"),
}),
"/api/status" => Ok(HttpResponse {
status_code: StatusCode::Ok,
headers: Vec::new(),
body: ResponseBody::Text("{\"status\":\"ok\"}"),
}),
_ => Ok(HttpResponse {
status_code: StatusCode::NotFound,
headers: Vec::new(),
body: ResponseBody::Text("Not Found"),
}),
}
}
}
async fn run_server(stack: Stack<'_>) -> Result<(), nanofish::Error> {
let mut server = DefaultHttpServer::new(80); // Listen on port 80
let handler = MyHandler;
// This runs forever, handling requests
server.serve(stack, handler).await;
}
Just like the client, you can choose different server sizes:
use nanofish::{DefaultHttpServer, SmallHttpServer, HttpServer};
// Default server (4KB buffers) - good for most use cases
let server = DefaultHttpServer::new(80);
// Small server (1KB buffers) - for memory-constrained devices
let server = SmallHttpServer::new(80);
// Custom server with your own buffer sizes
type MyServer = HttpServer<2048, 2048, 1024, 8192>; // RX, TX, Request, Response buffer sizes
let server = MyServer::new(80);
You can customize how long the server waits for different operations:
use nanofish::{DefaultHttpServer, ServerTimeouts};
// Default timeouts: 10s accept, 30s read, 60s handler
let server = DefaultHttpServer::new(80);
// Custom timeouts
let timeouts = ServerTimeouts::new(
5, // 5 seconds to accept new connections
15, // 15 seconds to read request data
30 // 30 seconds for your handler to process requests
);
let server = DefaultHttpServer::with_timeouts(80, timeouts);
Your handler receives detailed information about each request:
impl HttpHandler for MyHandler {
async fn handle_request(&mut self, request: &HttpRequest<'_>) -> Result<HttpResponse<'_>, nanofish::Error> {
// Check the HTTP method
match request.method {
HttpMethod::GET => { /* handle GET */ }
HttpMethod::POST => { /* handle POST */ }
_ => { /* handle other methods */ }
}
// Look at the request path
println!("Path: {}", request.path);
// Check headers
for header in &request.headers {
println!("Header: {}: {}", header.name, header.value);
}
// Access request body (for POST, PUT, etc.)
if !request.body.is_empty() {
println!("Body: {} bytes", request.body.len());
}
// Return your response...
Ok(HttpResponse { /* ... */ })
}
}
For quick testing, you can use the built-in SimpleHandler:
use nanofish::{DefaultHttpServer, SimpleHandler};
async fn run_test_server(stack: Stack<'_>) {
let mut server = DefaultHttpServer::new(8080);
let handler = SimpleHandler; // Serves "/" and "/health" endpoints
server.serve(stack, handler).await;
}
The SimpleHandler provides:
GET / → HTML welcome pageGET /health → JSON status response