use log::info; /// This function works on *any* UdpFullStack, including embedded ones -- only main() is what makes /// this use POSIX sockets. (It does make use of a std based RNG, but that could be passed in just /// as well for no_std operation). async fn run(stack: &mut S) where S: embedded_nal_async::UdpStack, { let mut sock = stack .bind_multiple(embedded_nal_async::SocketAddr::new( "::".parse().unwrap(), 5683, )) .await .expect("Can't create a socket"); let log = Some(coap_message_demos::log::Log::start_once()); let mut handler = coap_message_demos::full_application_tree(log); info!("Server is ready."); let coap = embedded_nal_coap::CoAPShared::<3>::new(); let (client, server) = coap.split(); // going with an embassy_futures join instead of an async_std::task::spawn b/c CoAPShared is not // Sync, and async_std expects to work in multiple threads embassy_futures::join::join( async { use rand::SeedableRng; server .run( &mut sock, &mut handler, &mut rand::rngs::StdRng::from_entropy(), ) .await .expect("UDP error") }, run_client_operations(client) ) .await; } #[async_std::main] async fn main() { let mut stack = std_embedded_nal_async::Stack::default(); run(&mut stack).await; } /// In parallel to server operation, this function performs some operations as a client. /// /// This doubles as an experimentation ground for the client side of embedded_nal_coap and /// coap-request in general. async fn run_client_operations(client: embedded_nal_coap::CoAPRuntimeClient<'_, N>) { let demoserver = "[::1]:1234".parse().unwrap(); use coap_request::Stack; println!("Sending GET to {}...", demoserver); let response = client .to(demoserver) .request( coap_request_implementations::Code::get() .with_path("/other/separate") .processing_response_payload_through(|p| { println!("Got payload {:?}", p); }), ) .await; println!("Response {:?}", response); // This demonstrates that we don't leak requests, and (later) that we don't lock up when there // are too many concurrent requests, and still adhere to protocol. // // Well, except for rate limiting... println!( "Sending 10 (>> 3) requests in short succession, forgetting them after a moment" ); for _i in 0..10 { embassy_futures::select::select( client.to(demoserver).request( coap_request_implementations::Code::get() .with_path("/other/separate") .processing_response_payload_through(|p| { println!("Got payload {:?}", p); }), ), // Knowing that /other/separate takes some time, this is definitely faster async_std::task::sleep(std::time::Duration::from_millis(300)), ) // The other future is dropped. .await; } println!( "Sending 10 (>> 3) requests in parallel, keeping all of them around" ); // It's not NSTART that's limiting us here (although it should), it's . let build_request = || { // The async block allows us to keep the temporary client.to() that'd be otherwise limit // the request's lifetime inside the Future. let block = async { client.to(demoserver).request( coap_request_implementations::Code::get() .with_path("/other/separate") .processing_response_payload_through(|p| { println!("Got payload {:?} (truncated)", p.get(..5).unwrap_or(p)); }), ).await }; block }; // That's not even that easy without TAIT and other trickery... use embassy_futures::join::join; join(build_request(), join(build_request(), join(build_request(), join(build_request(), join(build_request(), join(build_request(), join(build_request(), join(build_request(), join(build_request(), build_request() // Hello LISP my old friend ))))))))) .await; println!("All through"); // What follows are experiments with the request payload setting functions of // coap-request-implementations that have so far not been used successfully without // force over lifetimes. // Which of these two signatures I take makes the difference in whether this works // (upper) or errs like the closure, no matter whether we go through paywriter_f or // not. fn paywriter(m: &mut S::RequestMessage<'_>) { // fn paywriter<'a, 'b>(m: &'a mut coap_message_utils::inmemory_write::Message<'b>) { use coap_message::MinimalWritableMessage; m.set_payload(b"Set time to 1955-11-05").unwrap(); } // let paywriter_f: &mut _ = &mut paywriter; // FIXME: This is needed for the with_request_callback variant that takes a function, // and I don't yet understand why. (Clearly it's unacceptable as a consequence of the // interface; question is, is it a consequence or did I just use it wrong). // let client: &embedded_nal_coap::CoAPRuntimeClient<'_, 3> = // unsafe { core::mem::transmute(&client) }; // let mut paywriter_direct = paywriter; // let paywriter_cl = |m: &mut as Stack>::RequestMessage<'_>| { // // or let paywriter_cl = |m: &mut coap_message_utils::inmemory_write::Message<'_>| { // use coap_message::MinimalWritableMessage; // m.set_payload(b"Set time to 1955-11-05") // }; let req = coap_request_implementations::Code::post() .with_path("/uppercase") // We can build everything up to this point outside, but pulling more of req // starts failing. Is that a hint as to where the lifetime trouble comes from? ; println!("Sending POST..."); let mut response = client .to(demoserver) ; let response = response .request( req // This works (but needs the unjustifiable lifetime extension above) // .with_request_callback(&mut paywriter_direct) // Does this work? // .with_request_callback(paywriter_f) // This fails with type mismatches // .with_request_callback(&mut paywriter_cl) // But this works because it is simple .with_request_payload_slice(b"Set time to 1955-11-05") .processing_response_payload_through(|p| { println!("Uppercase is {}", core::str::from_utf8(p).unwrap()) }) , ) ; let response = response .await; println!("Response {:?}", response); }