use glam::Vec3; use wgpu::util::DeviceExt; use vertix::prelude::*; pub async fn compute_shader( device: &wgpu::Device, queue: &wgpu::Queue, numbers: &[u32], ) -> Option> { // Loads the shader from WGSL let cs_module = device.create_shader_module(wgpu::ShaderModuleDescriptor { label: None, source: wgpu::ShaderSource::Wgsl(include_str!("compute.wgsl").into()), }); // Gets the size in bytes of the buffer. let size = std::mem::size_of_val(numbers) as wgpu::BufferAddress; // Instantiates buffer without data. // `usage` of buffer specifies how it can be used: // `BufferUsages::MAP_READ` allows it to be read (outside the shader). // `BufferUsages::COPY_DST` allows it to be the destination of the copy. let staging_buffer = device.create_buffer(&wgpu::BufferDescriptor { label: None, size, usage: wgpu::BufferUsages::MAP_READ | wgpu::BufferUsages::COPY_DST, mapped_at_creation: false, }); // Instantiates buffer with data (`numbers`). // Usage allowing the buffer to be: // A storage buffer (can be bound within a bind group and thus available to a shader). // The destination of a copy. // The source of a copy. let storage_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor { label: Some("Storage Buffer"), contents: bytemuck::cast_slice(numbers), usage: wgpu::BufferUsages::STORAGE | wgpu::BufferUsages::COPY_DST | wgpu::BufferUsages::COPY_SRC, }); // A bind group defines how buffers are accessed by shaders. // It is to WebGPU what a descriptor set is to Vulkan. // `binding` here refers to the `binding` of a buffer in the shader (`layout(set = 0, binding = 0) buffer`). // A pipeline specifies the operation of a shader // Instantiates the pipeline. let compute_pipeline = device.create_compute_pipeline(&wgpu::ComputePipelineDescriptor { label: None, layout: None, module: &cs_module, entry_point: "main", }); // Instantiates the bind group, once again specifying the binding of buffers. let bind_group_layout = compute_pipeline.get_bind_group_layout(0); let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor { label: None, layout: &bind_group_layout, entries: &[wgpu::BindGroupEntry { binding: 0, resource: storage_buffer.as_entire_binding(), }], }); // A command encoder executes one or many pipelines. // It is to WebGPU what a command buffer is to Vulkan. let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor { label: None }); { let mut cpass = encoder.begin_compute_pass(&wgpu::ComputePassDescriptor { label: None, }); cpass.set_pipeline(&compute_pipeline); cpass.set_bind_group(0, &bind_group, &[]); cpass.insert_debug_marker("compute collatz iterations"); cpass.dispatch_workgroups(numbers.len() as u32, 1, 1); // Number of cells to run, the (x,y,z) size of item being processed } // Sets adds copy operation to command encoder. // Will copy data from storage buffer on GPU to staging buffer on CPU. encoder.copy_buffer_to_buffer(&storage_buffer, 0, &staging_buffer, 0, size); // Submits command encoder for processing queue.submit(Some(encoder.finish())); // Note that we're not calling `.await` here. let buffer_slice = staging_buffer.slice(..); // Sets the buffer up for mapping, sending over the result of the mapping back to us when it is finished. let (sender, receiver) = futures_intrusive::channel::shared::oneshot_channel(); buffer_slice.map_async(wgpu::MapMode::Read, move |v| sender.send(v).unwrap()); // Poll the device in a blocking manner so that our future resolves. // In an actual application, `device.poll(...)` should // be called in an event loop or on another thread. device.poll(wgpu::Maintain::Wait); // Awaits until `buffer_future` can be read from if let Some(Ok(())) = receiver.receive().await { // Gets contents of buffer let data = buffer_slice.get_mapped_range(); // Since contents are got in bytes, this converts these bytes back to u32 let result = bytemuck::cast_slice(&data).to_vec(); // With the current interface, we have to make sure all mapped views are // dropped before we unmap the buffer. drop(data); staging_buffer.unmap(); // Unmaps buffer from memory // If you are familiar with C++ these 2 lines can be thought of similarly to: // delete myPointer; // myPointer = NULL; // It effectively frees the memory // Returns data from buffer Some(result) } else { panic!("failed to run compute on gpu!") } } fn main() { pollster::block_on(run()); } #[cfg_attr(target_arch = "wasm32", wasm_bindgen(start))] pub async fn run() { let camera = Camera::new(Vec3::new(0.0, 5.0, 10.0), f32::to_radians(-90.0), f32::to_radians(-20.0)); // State::new uses async code, so we're going to wait for it to finish let (mut state, _event_loop) = State::new(true, env!("OUT_DIR"), camera, 5.0, 2.0).await; let queue = state.world.get_resource_mut::().unwrap(); let steps = compute_shader(&state.device, &queue.queue, &[1, 2, 3, 4]).await.unwrap(); for num in steps{ println!("{},", num); } }