use std::time::Duration; use libcamera::{ camera::CameraConfigurationStatus, camera_manager::CameraManager, framebuffer::AsFrameBuffer, framebuffer_allocator::{FrameBuffer, FrameBufferAllocator}, framebuffer_map::MemoryMappedFrameBuffer, pixel_format::PixelFormat, properties, stream::StreamRole, }; // drm-fourcc does not have MJPEG type yet, construct it from raw fourcc identifier const PIXEL_FORMAT_MJPEG: PixelFormat = PixelFormat::new(u32::from_le_bytes([b'M', b'J', b'P', b'G']), 0); fn main() { let filename = std::env::args().nth(1).expect("Usage ./jpeg_capture "); let mgr = CameraManager::new().unwrap(); let cameras = mgr.cameras(); let cam = cameras.get(0).expect("No cameras found"); println!( "Using camera: {}", *cam.properties().get::().unwrap() ); let mut cam = cam.acquire().expect("Unable to acquire camera"); // This will generate default configuration for each specified role let mut cfgs = cam.generate_configuration(&[StreamRole::ViewFinder]).unwrap(); // Use MJPEG format so we can write resulting frame directly into jpeg file cfgs.get_mut(0).unwrap().set_pixel_format(PIXEL_FORMAT_MJPEG); println!("Generated config: {:#?}", cfgs); match cfgs.validate() { CameraConfigurationStatus::Valid => println!("Camera configuration valid!"), CameraConfigurationStatus::Adjusted => println!("Camera configuration was adjusted: {:#?}", cfgs), CameraConfigurationStatus::Invalid => panic!("Error validating camera configuration"), } // Ensure that pixel format was unchanged assert_eq!( cfgs.get(0).unwrap().get_pixel_format(), PIXEL_FORMAT_MJPEG, "MJPEG is not supported by the camera" ); cam.configure(&mut cfgs).expect("Unable to configure camera"); let mut alloc = FrameBufferAllocator::new(&cam); // Allocate frame buffers for the stream let cfg = cfgs.get(0).unwrap(); let stream = cfg.stream().unwrap(); let buffers = alloc.alloc(&stream).unwrap(); println!("Allocated {} buffers", buffers.len()); // Convert FrameBuffer to MemoryMappedFrameBuffer, which allows reading &[u8] let buffers = buffers .into_iter() .map(|buf| MemoryMappedFrameBuffer::new(buf).unwrap()) .collect::>(); // Create capture requests and attach buffers let mut reqs = buffers .into_iter() .map(|buf| { let mut req = cam.create_request(None).unwrap(); req.add_buffer(&stream, buf).unwrap(); req }) .collect::>(); // Completed capture requests are returned as a callback let (tx, rx) = std::sync::mpsc::channel(); cam.on_request_completed(move |req| { tx.send(req).unwrap(); }); cam.start(None).unwrap(); // Multiple requests can be queued at a time, but for this example we just want a single frame. cam.queue_request(reqs.pop().unwrap()).unwrap(); println!("Waiting for camera request execution"); let req = rx.recv_timeout(Duration::from_secs(2)).expect("Camera request failed"); println!("Camera request {:?} completed!", req); println!("Metadata: {:#?}", req.metadata()); // Get framebuffer for our stream let framebuffer: &MemoryMappedFrameBuffer = req.buffer(&stream).unwrap(); println!("FrameBuffer metadata: {:#?}", framebuffer.metadata()); // MJPEG format has only one data plane containing encoded jpeg data with all the headers let planes = framebuffer.data(); let jpeg_data = planes.get(0).unwrap(); // Actual JPEG-encoded data will be smalled than framebuffer size, its length can be obtained from metadata. let jpeg_len = framebuffer.metadata().unwrap().planes().get(0).unwrap().bytes_used as usize; std::fs::write(&filename, &jpeg_data[..jpeg_len]).unwrap(); println!("Written {} bytes to {}", jpeg_len, &filename); // Everything is cleaned up automatically by Drop implementations }