From 089d2b86491a6fd3801d6759a38e02098d0e9d50 Mon Sep 17 00:00:00 2001 From: druskus20 Date: Tue, 17 Dec 2024 23:24:59 +0100 Subject: [PATCH] feat: heavy refactor, compute_base to be shared across demos feat: heavy refactor, compute_base to be shared across demos --- src/camera.rs | 89 ++------- src/context.rs | 269 --------------------------- src/demos/compute_base.rs | 162 ++++++++++++++++ src/demos/mod.rs | 11 +- src/demos/simple/compute_pipeline.rs | 51 ++--- src/demos/simple/mod.rs | 139 ++++---------- src/demos/simple/volume.rs | 26 +-- src/event_loop.rs | 33 +++- src/main.rs | 21 ++- src/render_pipeline.rs | 115 ++++++++++-- src/rendering_context.rs | 79 ++++++++ src/state.rs | 78 ++++++++ 12 files changed, 549 insertions(+), 524 deletions(-) delete mode 100644 src/context.rs create mode 100644 src/demos/compute_base.rs create mode 100644 src/rendering_context.rs create mode 100644 src/state.rs diff --git a/src/camera.rs b/src/camera.rs index 3739204..8485e5c 100644 --- a/src/camera.rs +++ b/src/camera.rs @@ -1,8 +1,5 @@ -use std::time::Duration; -use bytemuck::{Pod, Zeroable}; use cgmath::{perspective, Deg, EuclideanSpace, Matrix4, Point3, Vector3}; -use wgpu::{util::DeviceExt, Buffer, Device, Queue}; use winit::{dpi::PhysicalPosition, event::MouseScrollDelta}; #[derive(Debug)] @@ -18,36 +15,10 @@ pub struct Camera { pub horizontal_angle: f32, pub vertical_angle: f32, pub distance: f32, - uniforms: CameraUniforms, - pub buffer: Buffer, - pub bind_group: wgpu::BindGroup, -} - -#[repr(C, align(16))] -#[derive(Debug, Copy, Clone, Pod, Zeroable)] -struct CameraUniforms { - view_matrix: [[f32; 4]; 4], - projection_matrix: [[f32; 4]; 4], - camera_position: [f32; 3], - _padding: f32, } impl Camera { - pub const DESC: wgpu::BindGroupLayoutDescriptor<'static> = wgpu::BindGroupLayoutDescriptor { - label: Some("Camera layout"), - entries: &[wgpu::BindGroupLayoutEntry { - binding: 0, - visibility: wgpu::ShaderStages::COMPUTE, - ty: wgpu::BindingType::Buffer { - ty: wgpu::BufferBindingType::Uniform, - has_dynamic_offset: false, - min_binding_size: None, - }, - count: None, - }], - }; - - pub fn new(aspect: f32, device: &Device) -> Self { + pub fn new(aspect: f32) -> Self { let position = Vector3::new(0.5, 0.5, 0.5); let target = Vector3::new(0.5, 0.5, 0.5); let up = Vector3::new(0.0, 1.0, 0.0); @@ -56,28 +27,6 @@ impl Camera { let znear: f32 = 0.001; let zfar: f32 = 1000000.0; - let uniforms = CameraUniforms { - view_matrix: view_matrix(position, target, up).into(), - projection_matrix: projection_matrix(fovy, aspect, znear, zfar).into(), - camera_position: position.into(), - _padding: 0.0, - }; - - let buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor { - label: Some("Camera Buffer"), - contents: bytemuck::cast_slice(&[uniforms]), - usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST, - }); - - let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor { - layout: &device.create_bind_group_layout(&Self::DESC), - entries: &[wgpu::BindGroupEntry { - binding: 0, - resource: buffer.as_entire_binding(), - }], - label: Some("camera_bind_group"), - }); - Self { position, aspect, @@ -89,9 +38,6 @@ impl Camera { distance: 2.0, target, up, - uniforms, - buffer, - bind_group, } } @@ -112,33 +58,22 @@ impl Camera { } pub fn view_matrix(&self) -> Matrix4 { - view_matrix(self.position, self.target, self.up) + { + Matrix4::look_at_rh( + Point3::from_vec(self.position), + Point3::from_vec(self.target), + self.up, + ) + } } pub fn projection_matrix(&self) -> Matrix4 { - projection_matrix(self.fovy, self.aspect, self.znear, self.zfar) - } - - pub fn update_buffer(&self, queue: &Queue, buffer: &Buffer) { - let uniforms = CameraUniforms { - view_matrix: self.view_matrix().into(), - projection_matrix: self.projection_matrix().into(), - camera_position: self.position.into(), - _padding: 0.0, - }; - - queue.write_buffer(buffer, 0, bytemuck::cast_slice(&[uniforms])); + { + perspective(Deg(self.fovy), self.aspect, self.znear, self.zfar) + } } } -pub fn view_matrix(position: Vector3, target: Vector3, up: Vector3) -> Matrix4 { - Matrix4::look_at_rh(Point3::from_vec(position), Point3::from_vec(target), up) -} - -pub fn projection_matrix(fovy: f32, aspect: f32, znear: f32, zfar: f32) -> Matrix4 { - perspective(Deg(fovy), aspect, znear, zfar) -} - #[derive(Debug)] pub struct CameraController { rotate_horizontal: f32, @@ -173,7 +108,7 @@ impl CameraController { }; } - pub fn update_camera(&mut self, camera: &mut Camera, dt: Duration) { + pub fn update_camera(&mut self, camera: &mut Camera) { camera.orbit(self.rotate_horizontal, self.rotate_vertical, self.scroll); self.rotate_horizontal = 0.0; diff --git a/src/context.rs b/src/context.rs deleted file mode 100644 index 24cea7e..0000000 --- a/src/context.rs +++ /dev/null @@ -1,269 +0,0 @@ -/// Rendering context -use std::path::Path; - -// lib.rs -use crate::{camera::Camera, render_pipeline}; -use tracing::{debug, info}; -use winit::{ - event::{ElementState, MouseButton, WindowEvent}, - window::Window, -}; - -use crate::Result; - -#[derive(Debug)] -pub struct Context<'a> { - pub surface: wgpu::Surface<'a>, - pub device: wgpu::Device, - pub queue: wgpu::Queue, - pub config: wgpu::SurfaceConfiguration, - pub size: winit::dpi::PhysicalSize, - - _texture: wgpu::Texture, - pub computed_texture_view: wgpu::TextureView, - - window: &'a Window, - - render_pipeline: render_pipeline::RenderPipeline, - render_bind_group: wgpu::BindGroup, - - pub camera: Camera, - pub camera_controller: crate::camera::CameraController, - - mouse_pressed: bool, - last_mouse_position: Option<(f64, f64)>, -} - -impl<'a> Context<'a> { - // Creating some of the wgpu types requires async code - pub async fn new(window: &'a Window) -> Result> { - let instance = wgpu::Instance::default(); - let surface = instance.create_surface(window).unwrap(); - let adapter = instance - .request_adapter(&wgpu::RequestAdapterOptions { - power_preference: wgpu::PowerPreference::default(), - compatible_surface: Some(&surface), - force_fallback_adapter: false, - }) - .await - .unwrap(); - - let (device, queue) = adapter - .request_device(&wgpu::DeviceDescriptor::default(), None) - .await - .unwrap(); - - let surface_caps = surface.get_capabilities(&adapter); - let surface_format = surface_caps - .formats - .iter() - .find(|f| f.is_srgb()) - .copied() - .unwrap_or(surface_caps.formats[0]); - - let size = window.inner_size(); - let config = wgpu::SurfaceConfiguration { - usage: wgpu::TextureUsages::RENDER_ATTACHMENT, - format: surface_format, - width: size.width, - height: size.height, - present_mode: surface_caps.present_modes[0], - alpha_mode: surface_caps.alpha_modes[0], - view_formats: vec![], - desired_maximum_frame_latency: 2, - }; - - let render_path = format!("{}/shaders/render.wgsl", env!("CARGO_MANIFEST_DIR")); - let render_pipeline = crate::context::render_pipeline::RenderPipeline::new( - &device, - Path::new(&render_path), - &config, - )?; - - // TODO: maybe handle resizing? - let texture = device.create_texture(&wgpu::TextureDescriptor { - label: Some("Compute Output Texture"), - size: wgpu::Extent3d { - width: config.width, - height: config.height, - depth_or_array_layers: 1, - }, - mip_level_count: 1, - sample_count: 1, - dimension: wgpu::TextureDimension::D2, - format: wgpu::TextureFormat::Rgba8Unorm, - usage: wgpu::TextureUsages::STORAGE_BINDING | wgpu::TextureUsages::TEXTURE_BINDING, - view_formats: &[], // TODO - }); - let sampler = device.create_sampler(&wgpu::SamplerDescriptor { - label: Some("Sampler"), - address_mode_u: wgpu::AddressMode::ClampToEdge, - address_mode_v: wgpu::AddressMode::ClampToEdge, - address_mode_w: wgpu::AddressMode::ClampToEdge, - mag_filter: wgpu::FilterMode::Linear, - min_filter: wgpu::FilterMode::Linear, - mipmap_filter: wgpu::FilterMode::Linear, - ..Default::default() - }); - let texture_view = texture.create_view(&wgpu::TextureViewDescriptor::default()); - let render_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor { - label: Some("Render Bind Group"), - layout: &render_pipeline.bind_group_layout, - entries: &[ - wgpu::BindGroupEntry { - binding: 0, - resource: wgpu::BindingResource::Sampler(&sampler), - }, - wgpu::BindGroupEntry { - binding: 1, - resource: wgpu::BindingResource::TextureView(&texture_view), - }, - ], - }); - - let aspect = config.width as f32 / config.height as f32; - let camera = Camera::new(aspect, &device); - - let camera_controller = crate::camera::CameraController::new(0.2, 0.2); - - Ok(Self { - window, - surface, - device, - queue, - config, - size, - _texture: texture, - computed_texture_view: texture_view, - render_pipeline, - render_bind_group, - camera, - camera_controller, - mouse_pressed: false, - last_mouse_position: None, - }) - } - - pub fn window(&self) -> &Window { - self.window - } - - pub fn resize(&mut self, new_size: winit::dpi::PhysicalSize) { - if new_size.width > 0 && new_size.height > 0 { - self.size = new_size; - self.config.width = new_size.width; - self.config.height = new_size.height; - self.surface.configure(&self.device, &self.config); - } - } - - pub fn input(&mut self, event: &WindowEvent) -> bool { - let r = match event { - //WindowEvent::KeyboardInput { - // event: - // KeyEvent { - // physical_key: PhysicalKey::Code(key), - // state, - // .. - // }, - // .. - //} => self.camera_controller.process_keyboard(*key, *state), - WindowEvent::CursorMoved { position, .. } => { - if self.mouse_pressed { - let current_pos = (position.x, position.y); - - // Calculate delta movement when mouse is pressed - if let Some(last_pos) = self.last_mouse_position { - let dx = current_pos.0 - last_pos.0; - let dy = current_pos.1 - last_pos.1; - - // Use the existing process_mouse method - self.camera_controller.process_mouse(dx, dy); - } - - // Update last mouse position - self.last_mouse_position = Some(current_pos); - } - true - } - WindowEvent::MouseWheel { delta, .. } => { - self.camera_controller.process_scroll(delta); - true - } - WindowEvent::MouseInput { - button: MouseButton::Left, - state, - .. - } => { - self.mouse_pressed = *state == ElementState::Pressed; - true - } - _ => false, - }; - - if r { - debug!(target = "input", "Processed event: {:?}", event); - } - r - } - - pub fn update(&mut self, dt: std::time::Duration) { - self.camera_controller.update_camera(&mut self.camera, dt); - self.camera.update_buffer(&self.queue, &self.camera.buffer); - } - - #[tracing::instrument(skip(self))] - pub fn render(&mut self) -> std::result::Result<(), wgpu::SurfaceError> { - debug!("Camera Position: {:?}", self.camera.position); - debug!("Camera Target: {:?}", self.camera.target); - debug!("Horizontal Angle: {}", self.camera.horizontal_angle); - debug!("Vertical Angle: {}", self.camera.vertical_angle); - let output = self.surface.get_current_texture()?; - let view = output - .texture - .create_view(&wgpu::TextureViewDescriptor::default()); - let mut encoder = self - .device - .create_command_encoder(&wgpu::CommandEncoderDescriptor { - label: Some("Render Encoder"), - }); - - // render pass - { - let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor { - label: Some("Render Pass"), - color_attachments: &[ - // This is what @location(0) in the fragment shader targets - Some(wgpu::RenderPassColorAttachment { - view: &view, - resolve_target: None, - ops: wgpu::Operations { - load: wgpu::LoadOp::Clear(wgpu::Color::default()), - store: wgpu::StoreOp::Store, - }, - }), - ], - depth_stencil_attachment: None, - timestamp_writes: None, - occlusion_query_set: None, - }); - - render_pass.set_pipeline(self.render_pipeline.as_ref()); - render_pass.set_bind_group(0, &self.render_bind_group, &[]); - debug!(target = "render_pass", "Render bind group set"); - render_pass.draw(0..6, 0..1); // Draw a quad (2*3 vertices) - debug!(target = "render_pass", "Draw done"); - } - - self.queue.submit(Some(encoder.finish())); - - // Before presenting to the screen we need to let the compositor know - This effectively - // syncs us to the monitor refresh rate. - // https://docs.rs/winit/latest/winit/window/struct.Window.html#platform-specific-2 - self.window.pre_present_notify(); - - output.present(); - - Ok(()) - } -} diff --git a/src/demos/compute_base.rs b/src/demos/compute_base.rs new file mode 100644 index 0000000..84aebff --- /dev/null +++ b/src/demos/compute_base.rs @@ -0,0 +1,162 @@ +use bytemuck::{Pod, Zeroable}; +use wgpu::util::DeviceExt; + +use crate::{camera::Camera, rendering_context::Context, state::State}; + +/// Base struct for every compute pipeline +#[derive(Debug)] +pub struct ComputeBase { + // Layouts are needed to create the pipeline + pub output_texture_layout: wgpu::BindGroupLayout, + pub camera_layout: wgpu::BindGroupLayout, + pub debug_matrix_layout: wgpu::BindGroupLayout, + + // Groups are passed to the pipeline + pub debug_matrix_group: wgpu::BindGroup, + pub camera_group: wgpu::BindGroup, + pub output_texture_group: wgpu::BindGroup, + + camera_buffer: wgpu::Buffer, +} + +pub const DESC_OUTPUT_TEXTURE: wgpu::BindGroupLayoutDescriptor<'static> = + wgpu::BindGroupLayoutDescriptor { + label: Some("Storage Texture Layour"), + entries: &[wgpu::BindGroupLayoutEntry { + binding: 0, + visibility: wgpu::ShaderStages::COMPUTE, + ty: wgpu::BindingType::StorageTexture { + access: wgpu::StorageTextureAccess::WriteOnly, + format: wgpu::TextureFormat::Rgba8Unorm, + view_dimension: wgpu::TextureViewDimension::D2, + }, + count: None, + }], + }; + +pub const DESC_DEBUG_MATRIX: wgpu::BindGroupLayoutDescriptor<'static> = + wgpu::BindGroupLayoutDescriptor { + label: Some("Storage Texture Layour"), + entries: &[wgpu::BindGroupLayoutEntry { + binding: 0, + visibility: wgpu::ShaderStages::COMPUTE, + ty: wgpu::BindingType::StorageTexture { + access: wgpu::StorageTextureAccess::WriteOnly, + format: wgpu::TextureFormat::Rgba8Unorm, + view_dimension: wgpu::TextureViewDimension::D2, + }, + count: None, + }], + }; + +pub const DESC_CAMERA_UNIFORMS: wgpu::BindGroupLayoutDescriptor<'static> = + wgpu::BindGroupLayoutDescriptor { + label: Some("Camera layout"), + entries: &[wgpu::BindGroupLayoutEntry { + binding: 0, + visibility: wgpu::ShaderStages::COMPUTE, + ty: wgpu::BindingType::Buffer { + ty: wgpu::BufferBindingType::Uniform, + has_dynamic_offset: false, + min_binding_size: None, + }, + count: None, + }], + }; + +impl ComputeBase { + pub fn new(ctx: &Context, state: &State, output_texture_view: &wgpu::TextureView) -> Self { + let camera_layout = ctx.device.create_bind_group_layout(&DESC_CAMERA_UNIFORMS); + let debug_matrix_layout = ctx.device.create_bind_group_layout(&DESC_DEBUG_MATRIX); + let output_texture_layout = ctx.device.create_bind_group_layout(&DESC_OUTPUT_TEXTURE); + + let uniforms: CameraUniforms = CameraUniforms::from(&state.camera); + + let camera_buffer = ctx + .device + .create_buffer_init(&wgpu::util::BufferInitDescriptor { + label: Some("Camera Buffer"), + contents: bytemuck::cast_slice(&[uniforms]), + usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST, + }); + + let camera_group = ctx.device.create_bind_group(&wgpu::BindGroupDescriptor { + layout: &camera_layout, + entries: &[wgpu::BindGroupEntry { + binding: 0, + resource: camera_buffer.as_entire_binding(), + }], + label: Some("camera_bind_group"), + }); + + let debug_matrix_texture = ctx.device.create_texture(&wgpu::TextureDescriptor { + label: Some("Debug Matrix Texture"), + size: wgpu::Extent3d { + width: ctx.surface_config.width, + height: ctx.surface_config.height, + depth_or_array_layers: 1, + }, + mip_level_count: 1, + sample_count: 1, + dimension: wgpu::TextureDimension::D2, + format: wgpu::TextureFormat::Rgba8Unorm, + usage: wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::STORAGE_BINDING, + view_formats: &[], + }); + + let debug_matrix_group = ctx.device.create_bind_group(&wgpu::BindGroupDescriptor { + label: Some("Debug Matrix Bind Group"), + layout: &debug_matrix_layout, + entries: &[wgpu::BindGroupEntry { + binding: 0, + resource: wgpu::BindingResource::TextureView( + &debug_matrix_texture.create_view(&wgpu::TextureViewDescriptor::default()), + ), + }], + }); + + let output_texture_group = ctx.device.create_bind_group(&wgpu::BindGroupDescriptor { + label: Some("Compute Output Texture Bind Group"), + layout: &output_texture_layout, + entries: &[wgpu::BindGroupEntry { + binding: 0, + resource: wgpu::BindingResource::TextureView(output_texture_view), + }], + }); + Self { + camera_layout, + debug_matrix_group, + output_texture_layout, + camera_group, + debug_matrix_layout, + output_texture_group, + camera_buffer, + } + } + + pub fn update(&self, ctx: &Context, state: &State) { + let uniforms = CameraUniforms::from(&state.camera); + ctx.queue + .write_buffer(&self.camera_buffer, 0, bytemuck::cast_slice(&[uniforms])); + } +} + +#[repr(C, align(16))] +#[derive(Debug, Copy, Clone, Pod, Zeroable)] +pub struct CameraUniforms { + view_matrix: [[f32; 4]; 4], + projection_matrix: [[f32; 4]; 4], + camera_position: [f32; 3], + _padding: f32, +} + +impl From<&Camera> for CameraUniforms { + fn from(camera: &Camera) -> Self { + CameraUniforms { + view_matrix: camera.view_matrix().into(), + projection_matrix: camera.projection_matrix().into(), + camera_position: camera.position.into(), + _padding: 0.0, + } + } +} diff --git a/src/demos/mod.rs b/src/demos/mod.rs index b4e99e5..7dab66a 100644 --- a/src/demos/mod.rs +++ b/src/demos/mod.rs @@ -1,9 +1,12 @@ -use crate::context; +use crate::rendering_context::Context; +use crate::state::State; use crate::Result; +pub mod compute_base; pub mod simple; -pub trait RenderingDemo: Sized { - fn init(ctx: &mut context::Context) -> Result; - fn compute(&self, ctx: &mut context::Context) -> Result<()>; +pub trait Demo: Sized { + fn init(ctx: &Context, state: &State, output_texture_view: &wgpu::TextureView) -> Result; + fn update_gpu_state(&self, ctx: &Context, state: &State) -> Result<()>; + fn compute_pass(&self, ctx: &Context) -> Result<()>; } diff --git a/src/demos/simple/compute_pipeline.rs b/src/demos/simple/compute_pipeline.rs index 657cfae..6902208 100644 --- a/src/demos/simple/compute_pipeline.rs +++ b/src/demos/simple/compute_pipeline.rs @@ -3,53 +3,43 @@ use std::path::Path; use tracing::info; -use crate::Result; +use crate::{demos::compute_base, rendering_context::Context, state::State, Result}; #[derive(Debug)] pub struct ComputePipeline { pub pipeline: wgpu::ComputePipeline, - pub bind_group_layout: wgpu::BindGroupLayout, + pub base: compute_base::ComputeBase, } -pub const DESC_OUTPUT_TEXTURE: wgpu::BindGroupLayoutDescriptor<'static> = - wgpu::BindGroupLayoutDescriptor { - label: Some("Storage Texture Layour"), - entries: &[wgpu::BindGroupLayoutEntry { - binding: 0, - visibility: wgpu::ShaderStages::COMPUTE, - ty: wgpu::BindingType::StorageTexture { - access: wgpu::StorageTextureAccess::WriteOnly, - format: wgpu::TextureFormat::Rgba8Unorm, - view_dimension: wgpu::TextureViewDimension::D2, - }, - count: None, - }], - }; - impl ComputePipeline { pub fn new( - device: &wgpu::Device, - shader_path: &Path, - input_texture_layout: &wgpu::BindGroupLayout, - camera_layout: &wgpu::BindGroupLayout, - debug_matrix_layout: &wgpu::BindGroupLayout, + ctx: &Context, + state: &State, + output_texture_view: &wgpu::TextureView, + input_volume_layout: &wgpu::BindGroupLayout, ) -> Result { - let shader_contents = std::fs::read_to_string(shader_path)?; + let device = &ctx.device; + let base = compute_base::ComputeBase::new(ctx, state, output_texture_view); + + let shader_path = + Path::new(&(format!("{}/shaders/simple_compute.wgsl", env!("CARGO_MANIFEST_DIR")))) + .to_path_buf(); + let shader_contents = std::fs::read_to_string(&shader_path)?; let shader = device.create_shader_module(wgpu::ShaderModuleDescriptor { label: Some(shader_path.to_str().unwrap()), source: wgpu::ShaderSource::Wgsl(shader_contents.into()), }); info!("Creating compute pipeline"); - let output_texture_layout = device.create_bind_group_layout(&DESC_OUTPUT_TEXTURE); + let render_pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor { label: Some("Compute Pipeline Layout"), bind_group_layouts: &[ - input_texture_layout, - &output_texture_layout, - camera_layout, - debug_matrix_layout, + input_volume_layout, + &base.output_texture_layout, + &base.camera_layout, + &base.debug_matrix_layout, ], push_constant_ranges: &[], }); @@ -63,10 +53,7 @@ impl ComputePipeline { cache: Default::default(), }); - Ok(ComputePipeline { - pipeline, - bind_group_layout: output_texture_layout, - }) + Ok(ComputePipeline { pipeline, base }) } } diff --git a/src/demos/simple/mod.rs b/src/demos/simple/mod.rs index 56fa128..1f03fca 100644 --- a/src/demos/simple/mod.rs +++ b/src/demos/simple/mod.rs @@ -1,10 +1,8 @@ -use std::path::Path; - use tracing::{debug, info}; -use crate::context; +use crate::{rendering_context::Context, state::State}; -use super::RenderingDemo; +use super::Demo; use crate::Result; pub mod compute_pipeline; @@ -12,95 +10,21 @@ pub mod volume; #[derive(Debug)] pub struct Simple { - volume: volume::Volume, // contains the bindgroup - pipeline: compute_pipeline::ComputePipeline, // contains the bindgrouplayout - compute_bind_group: wgpu::BindGroup, - debug_matrxix_group: wgpu::BindGroup, + volume: volume::Volume, // contains the bindgroup + compute_pipeline: compute_pipeline::ComputePipeline, } -pub const DESC_DEBUG_MATRIX: wgpu::BindGroupLayoutDescriptor<'static> = - wgpu::BindGroupLayoutDescriptor { - label: Some("Storage Texture Layour"), - entries: &[wgpu::BindGroupLayoutEntry { - binding: 0, - visibility: wgpu::ShaderStages::COMPUTE, - ty: wgpu::BindingType::StorageTexture { - access: wgpu::StorageTextureAccess::WriteOnly, - format: wgpu::TextureFormat::Rgba8Unorm, - view_dimension: wgpu::TextureViewDimension::D2, - }, - count: None, - }], - }; - -impl RenderingDemo for Simple { - #[tracing::instrument(skip(ctx))] - fn init(ctx: &mut context::Context) -> Result { +impl Demo for Simple { + #[tracing::instrument()] + fn init(ctx: &Context, state: &State, output_texture_view: &wgpu::TextureView) -> Result { info!("Initializing Simple Demo"); - let compute_path = format!("{}/shaders/simple_compute.wgsl", env!("CARGO_MANIFEST_DIR")); - - // Move? - let input_texture_layout = ctx - .device - .create_bind_group_layout(&crate::volume::Volume::DESC); - let camera_layout = ctx - .device - .create_bind_group_layout(&crate::camera::Camera::DESC); - let debug_matrix_layout = ctx.device.create_bind_group_layout(&DESC_DEBUG_MATRIX); - - let pipeline = crate::compute_pipeline::ComputePipeline::new( - &ctx.device, - Path::new(&compute_path), - &input_texture_layout, - &camera_layout, - &debug_matrix_layout, - )?; - - // TODO move this to the compute pipeline - // Abstract the compute pipeline away from the demo - let compute_bind_group = ctx.device.create_bind_group(&wgpu::BindGroupDescriptor { - label: Some("Compute Bind Group"), - layout: &pipeline.bind_group_layout, - entries: &[wgpu::BindGroupEntry { - binding: 0, - resource: wgpu::BindingResource::TextureView(&ctx.computed_texture_view), - }], - }); - - let debug_matrix_texture = ctx.device.create_texture(&wgpu::TextureDescriptor { - label: Some("Debug Matrix Texture"), - size: wgpu::Extent3d { - width: ctx.size.width, - height: ctx.size.height, - depth_or_array_layers: 1, - }, - mip_level_count: 1, - sample_count: 1, - dimension: wgpu::TextureDimension::D2, - format: wgpu::TextureFormat::Rgba8Unorm, - usage: wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::STORAGE_BINDING, - view_formats: &[], - }); - - let debug_matrxix_group = ctx.device.create_bind_group(&wgpu::BindGroupDescriptor { - label: Some("Debug Matrix Bind Group"), - layout: &debug_matrix_layout, - entries: &[wgpu::BindGroupEntry { - binding: 0, - resource: wgpu::BindingResource::TextureView( - &debug_matrix_texture.create_view(&wgpu::TextureViewDescriptor::default()), - ), - }], - }); - - info!("Compute shader: {:?}", compute_path); let volume_path = &(format!( "{}/assets/bonsai_256x256x256_uint8.raw", env!("CARGO_MANIFEST_DIR") )); - let volume = volume::Volume::new( + let (volume, input_volume_layout) = volume::Volume::init( volume_path.as_ref(), volume::FlipMode::None, &ctx.device, @@ -108,17 +32,26 @@ impl RenderingDemo for Simple { )?; info!("Volume loaded: {:?}", volume_path); + // TODO maybe move this to the context. And make it an argument of compute() + let compute_pipeline = compute_pipeline::ComputePipeline::new( + ctx, + state, + output_texture_view, + &input_volume_layout, + )?; + Ok(Simple { volume, - pipeline, - compute_bind_group, - debug_matrxix_group, + compute_pipeline, }) } - #[tracing::instrument(skip(self, ctx))] - fn compute(&self, ctx: &mut context::Context) -> Result<()> { - let size = ctx.size; + fn update_gpu_state(&self, ctx: &Context, state: &State) -> Result<()> { + self.compute_pipeline.base.update(ctx, state); + Ok(()) + } + + fn compute_pass(&self, ctx: &Context) -> Result<()> { let mut encoder = ctx .device .create_command_encoder(&wgpu::CommandEncoderDescriptor { @@ -131,33 +64,39 @@ impl RenderingDemo for Simple { label: Some("Compute Pass"), timestamp_writes: None, }); + compute_pass.set_pipeline(self.compute_pipeline.as_ref()); - compute_pass.set_pipeline(self.pipeline.as_ref()); - + let base = &self.compute_pipeline.base; // Get the volume inputs + // TODO: consider moving the bind_group to the compute pipeline or something compute_pass.set_bind_group(0, &self.volume.bind_group, &[]); debug!(target = "compute_pass", "Volume inputs bind_group set"); // Get the pipeline inputs - compute_pass.set_bind_group(1, &self.compute_bind_group, &[]); - debug!(target = "compute_pass", "Pipeline inputs bind_group set"); + compute_pass.set_bind_group(1, &base.output_texture_group, &[]); + debug!(target = "compute_pass", "Output texture bind_group set"); - compute_pass.set_bind_group(2, &ctx.camera.bind_group, &[]); + compute_pass.set_bind_group(2, &base.camera_group, &[]); debug!(target = "compute_pass", "Camera bind_group set"); - compute_pass.set_bind_group(3, &self.debug_matrxix_group, &[]); + compute_pass.set_bind_group(3, &base.debug_matrix_group, &[]); debug!(target = "compute_pass", "Debug matrix bind_group set"); // size.width + 15 ensures that any leftover pixels (less than a full workgroup 16x16) - // still require an additional workgroup. - compute_pass.dispatch_workgroups((size.width + 15) / 16, (size.height + 15) / 16, 1); + // still require an additional workgro + compute_pass.dispatch_workgroups( + (ctx.size.width + 15) / 16, + (ctx.size.height + 15) / 16, + 1, + ); debug!( target = "compute_pass", "dispatch_workgroups: {}, {}, {}", - (size.width + 15) / 16, - (size.height + 15) / 16, + (ctx.size.width + 15) / 16, + (ctx.size.height + 15) / 16, 1 ); } + ctx.queue.submit(Some(encoder.finish())); debug!( target = "compute_pass", diff --git a/src/demos/simple/volume.rs b/src/demos/simple/volume.rs index 99e1381..3ec13c1 100644 --- a/src/demos/simple/volume.rs +++ b/src/demos/simple/volume.rs @@ -1,13 +1,14 @@ use tracing::info; +use wgpu::BindGroupLayout; + +// TODO abstract this away. with a trait that returns an array of bind groups maybe? use crate::Result; use std::path::Path; #[derive(Debug)] pub struct Volume { - _texture: wgpu::Texture, pub bind_group: wgpu::BindGroup, - _sampler: wgpu::Sampler, } impl Volume { @@ -34,12 +35,12 @@ impl Volume { }; #[tracing::instrument(skip(device, queue))] - pub fn new( + pub fn init( path: &Path, flip_mode: FlipMode, device: &wgpu::Device, queue: &wgpu::Queue, - ) -> Result { + ) -> Result<(Self, BindGroupLayout)> { info!("Loading volume from {:?}", path); let data = { let mut data = std::fs::read(path)?; @@ -84,10 +85,10 @@ impl Volume { ..Default::default() }); - let bind_group_layout = device.create_bind_group_layout(&Self::DESC); - let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor { + let volume_layout = device.create_bind_group_layout(&Self::DESC); + let volume_group = device.create_bind_group(&wgpu::BindGroupDescriptor { label: Some("Volume Bind Group"), - layout: &bind_group_layout, + layout: &volume_layout, entries: &[ wgpu::BindGroupEntry { binding: 0, @@ -100,11 +101,12 @@ impl Volume { ], }); - Ok(Volume { - _texture: texture, - bind_group, - _sampler: sampler, - }) + Ok(( + Volume { + bind_group: volume_group, + }, + volume_layout, + )) } } diff --git a/src/event_loop.rs b/src/event_loop.rs index d1dc478..5e4a201 100644 --- a/src/event_loop.rs +++ b/src/event_loop.rs @@ -7,13 +7,17 @@ use winit::{ keyboard::{KeyCode, PhysicalKey}, }; -use crate::{context::Context, demos::RenderingDemo, Result}; +use crate::{ + demos::Demo, render_pipeline::RenderPipeline, rendering_context::Context, state::State, Result, +}; -#[tracing::instrument(skip(event_loop, ctx, rendering_algorithm))] +#[tracing::instrument(skip(event_loop, ctx, demo))] pub fn run( event_loop: EventLoop, - ctx: &mut Context, - rendering_algorithm: impl RenderingDemo, + mut ctx: Context, + state: &mut State, + render_pipeline: RenderPipeline, + demo: &impl Demo, ) -> Result<()> { let mut last_update = Instant::now(); let mut frame_count = 0; @@ -25,7 +29,7 @@ pub fn run( ref event, window_id, } if window_id == ctx.window().id() => { - if !ctx.input(event) { + if !state.process_input(event) { match event { WindowEvent::CloseRequested | WindowEvent::KeyboardInput { @@ -41,13 +45,22 @@ pub fn run( ctx.resize(*physical_size); } WindowEvent::RedrawRequested => { - debug!("Redraw requested"); - let duration = last_update.elapsed(); + // queue another redraw ctx.window().request_redraw(); - ctx.update(duration); - rendering_algorithm.compute(ctx).unwrap(); - match ctx.render() { + // update the state + { + state.update(); + demo.update_gpu_state(&ctx, state).unwrap(); + } + + // compute and render + let render_result = { + demo.compute_pass(&ctx).unwrap(); + render_pipeline.render_pass(&ctx) + }; + + match render_result { Ok(_) => { frame_count += 1; let now = Instant::now(); diff --git a/src/main.rs b/src/main.rs index 9a55be1..302ee79 100644 --- a/src/main.rs +++ b/src/main.rs @@ -7,14 +7,13 @@ use winit::{event_loop::EventLoop, window::WindowBuilder}; mod camera; mod cli; -mod context; mod demos; mod event_loop; mod render_pipeline; +mod rendering_context; +mod state; // Demos -use demos::simple::compute_pipeline; -use demos::simple::volume; use demos::simple::Simple; pub(crate) type Result = color_eyre::eyre::Result; @@ -30,19 +29,23 @@ fn main() -> Result<()> { } } -fn run() -> Result<()> { +fn run() -> Result<()> { // Setup event loop and window. let event_loop = EventLoop::new()?; let window = WindowBuilder::new() .with_title("Volym") .build(&event_loop)?; - // Create a rendering context - let mut ctx = pollster::block_on(context::Context::new(&window))?; + let ctx = pollster::block_on(rendering_context::Context::new(&window))?; + let aspect = ctx.surface_config.width as f32 / ctx.surface_config.height as f32; + let mut state = state::State::new(aspect); + let render_pipeline = render_pipeline::RenderPipeline::new(&ctx.device, &ctx.surface_config)?; + let output_texture_view = render_pipeline + .input_texture + .create_view(&wgpu::TextureViewDescriptor::default()); - // Init and run the rendering demo - let rendering_algorithm = Demo::init(&mut ctx)?; - event_loop::run(event_loop, &mut ctx, rendering_algorithm)?; + let demo = Demo::init(&ctx, &state, &output_texture_view)?; + event_loop::run(event_loop, ctx, &mut state, render_pipeline, &demo)?; Ok(()) } diff --git a/src/render_pipeline.rs b/src/render_pipeline.rs index c43ffcc..f68b180 100644 --- a/src/render_pipeline.rs +++ b/src/render_pipeline.rs @@ -1,11 +1,14 @@ +use crate::rendering_context::Context; +use tracing::debug; + /// Render pipeline that displays the texture on the screen use crate::Result; -use std::path::Path; #[derive(Debug)] pub struct RenderPipeline { pub pipeline: wgpu::RenderPipeline, - pub bind_group_layout: wgpu::BindGroupLayout, + pub input_texture_group: wgpu::BindGroup, + pub input_texture: wgpu::Texture, } pub const DESC_RENDER: wgpu::BindGroupLayoutDescriptor<'static> = wgpu::BindGroupLayoutDescriptor { @@ -30,14 +33,11 @@ pub const DESC_RENDER: wgpu::BindGroupLayoutDescriptor<'static> = wgpu::BindGrou ], }; impl RenderPipeline { - pub fn new( - device: &wgpu::Device, - shader_path: &Path, - config: &wgpu::SurfaceConfiguration, - ) -> Result { - let shader_contents = std::fs::read_to_string(shader_path)?; + pub fn new(device: &wgpu::Device, surface_config: &wgpu::SurfaceConfiguration) -> Result { + let shader_path = format!("{}/shaders/render.wgsl", env!("CARGO_MANIFEST_DIR")); + let shader_contents = std::fs::read_to_string(&shader_path)?; let shader = device.create_shader_module(wgpu::ShaderModuleDescriptor { - label: Some(shader_path.to_str().unwrap()), + label: Some(shader_path.as_str()), source: wgpu::ShaderSource::Wgsl(shader_contents.into()), }); let storage_texture_layout = device.create_bind_group_layout(&DESC_RENDER); @@ -62,7 +62,7 @@ impl RenderPipeline { module: &shader, entry_point: Some("fs_main"), targets: &[Some(wgpu::ColorTargetState { - format: config.format, + format: surface_config.format, blend: Some(wgpu::BlendState::REPLACE), write_mask: wgpu::ColorWrites::ALL, })], @@ -78,11 +78,104 @@ impl RenderPipeline { cache: None, }); + // TODO: maybe handle resizing? + let input_texture = device.create_texture(&wgpu::TextureDescriptor { + label: Some("Compute Output Texture"), + size: wgpu::Extent3d { + width: surface_config.width, + height: surface_config.height, + depth_or_array_layers: 1, + }, + mip_level_count: 1, + sample_count: 1, + dimension: wgpu::TextureDimension::D2, + format: wgpu::TextureFormat::Rgba8Unorm, + usage: wgpu::TextureUsages::STORAGE_BINDING | wgpu::TextureUsages::TEXTURE_BINDING, + view_formats: &[], + }); + let sampler = device.create_sampler(&wgpu::SamplerDescriptor { + label: Some("Sampler"), + address_mode_u: wgpu::AddressMode::ClampToEdge, + address_mode_v: wgpu::AddressMode::ClampToEdge, + address_mode_w: wgpu::AddressMode::ClampToEdge, + mag_filter: wgpu::FilterMode::Linear, + min_filter: wgpu::FilterMode::Linear, + mipmap_filter: wgpu::FilterMode::Linear, + ..Default::default() + }); + let texture_view = input_texture.create_view(&wgpu::TextureViewDescriptor::default()); + let input_texture_group = device.create_bind_group(&wgpu::BindGroupDescriptor { + label: Some("Render Bind Group"), + layout: &storage_texture_layout, + entries: &[ + wgpu::BindGroupEntry { + binding: 0, + resource: wgpu::BindingResource::Sampler(&sampler), + }, + wgpu::BindGroupEntry { + binding: 1, + resource: wgpu::BindingResource::TextureView(&texture_view), + }, + ], + }); + Ok(Self { pipeline, - bind_group_layout: storage_texture_layout, + input_texture_group, + input_texture, }) } + + #[tracing::instrument(skip(self))] + pub fn render_pass(&self, ctx: &Context) -> std::result::Result<(), wgpu::SurfaceError> { + let output = ctx.surface.get_current_texture()?; + let view = output + .texture + .create_view(&wgpu::TextureViewDescriptor::default()); + let mut encoder = ctx + .device + .create_command_encoder(&wgpu::CommandEncoderDescriptor { + label: Some("Render Encoder"), + }); + + // render pass + { + let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor { + label: Some("Render Pass"), + color_attachments: &[ + // This is what @location(0) in the fragment shader targets + Some(wgpu::RenderPassColorAttachment { + view: &view, + resolve_target: None, + ops: wgpu::Operations { + load: wgpu::LoadOp::Clear(wgpu::Color::default()), + store: wgpu::StoreOp::Store, + }, + }), + ], + depth_stencil_attachment: None, + timestamp_writes: None, + occlusion_query_set: None, + }); + + render_pass.set_pipeline(&self.pipeline); + render_pass.set_bind_group(0, &self.input_texture_group, &[]); + debug!(target = "render_pass", "Render bind group set"); + render_pass.draw(0..6, 0..1); // Draw a quad (2*3 vertices) + debug!(target = "render_pass", "Draw done"); + } + + ctx.queue.submit(Some(encoder.finish())); + + // Before presenting to the screen we need to let the compositor know - This effectively + // syncs us to the monitor refresh rate. + // https://docs.rs/winit/latest/winit/window/struct.Window.html#platform-specific-2 + ctx.window.pre_present_notify(); + + output.present(); + + Ok(()) + } } impl AsRef for RenderPipeline { diff --git a/src/rendering_context.rs b/src/rendering_context.rs new file mode 100644 index 0000000..97b49ba --- /dev/null +++ b/src/rendering_context.rs @@ -0,0 +1,79 @@ +// lib.rs +use winit::window::Window; + +use crate::Result; + +#[derive(Debug)] +pub struct Context<'a> { + pub surface: wgpu::Surface<'a>, + pub device: wgpu::Device, + pub queue: wgpu::Queue, + pub surface_config: wgpu::SurfaceConfiguration, + pub size: winit::dpi::PhysicalSize, + pub window: &'a Window, +} + +impl<'a> Context<'a> { + // Creating some of the wgpu types requires async code + pub async fn new(window: &'a Window) -> Result> { + let instance = wgpu::Instance::default(); + let surface = instance.create_surface(window).unwrap(); + let adapter = instance + .request_adapter(&wgpu::RequestAdapterOptions { + power_preference: wgpu::PowerPreference::default(), + compatible_surface: Some(&surface), + force_fallback_adapter: false, + }) + .await + .unwrap(); + + let (device, queue) = adapter + .request_device(&wgpu::DeviceDescriptor::default(), None) + .await + .unwrap(); + + let surface_caps = surface.get_capabilities(&adapter); + let surface_format = surface_caps + .formats + .iter() + .find(|f| f.is_srgb()) + .copied() + .unwrap_or(surface_caps.formats[0]); + + let size = window.inner_size(); + let config = wgpu::SurfaceConfiguration { + usage: wgpu::TextureUsages::RENDER_ATTACHMENT, + format: surface_format, + width: size.width, + height: size.height, + present_mode: surface_caps.present_modes[0], + alpha_mode: surface_caps.alpha_modes[0], + view_formats: vec![], + desired_maximum_frame_latency: 2, + }; + + //let pipelines = Pipelines::new_from_demo(&simple::Simple, &device, &config)?; + + Ok(Self { + window, + surface, + device, + queue, + surface_config: config, + size, + }) + } + + pub fn window(&self) -> &Window { + self.window + } + + pub fn resize(&mut self, new_size: winit::dpi::PhysicalSize) { + if new_size.width > 0 && new_size.height > 0 { + self.size = new_size; + self.surface_config.width = new_size.width; + self.surface_config.height = new_size.height; + self.surface.configure(&self.device, &self.surface_config); + } + } +} diff --git a/src/state.rs b/src/state.rs new file mode 100644 index 0000000..efba756 --- /dev/null +++ b/src/state.rs @@ -0,0 +1,78 @@ +use tracing::debug; +use winit::event::{ElementState, MouseButton, WindowEvent}; + +use crate::camera::{Camera, CameraController}; + +#[derive(Debug)] +pub struct State { + pub camera: Camera, + pub camera_controller: CameraController, + mouse_pressed: bool, + last_mouse_position: Option<(f64, f64)>, +} + +impl State { + pub fn new(aspect: f32) -> Self { + let camera = crate::camera::Camera::new(aspect); + Self { + camera, + camera_controller: CameraController::new(0.2, 0.2), + mouse_pressed: false, + last_mouse_position: None, + } + } + + pub fn process_input(&mut self, event: &WindowEvent) -> bool { + let r = match event { + //WindowEvent::KeyboardInput { + // event: + // KeyEvent { + // physical_key: PhysicalKey::Code(key), + // state, + // .. + // }, + // .. + //} => self.camera_controller.process_keyboard(*key, *state), + WindowEvent::CursorMoved { position, .. } => { + if self.mouse_pressed { + let current_pos = (position.x, position.y); + + // Calculate delta movement when mouse is pressed + if let Some(last_pos) = self.last_mouse_position { + let dx = current_pos.0 - last_pos.0; + let dy = current_pos.1 - last_pos.1; + + // Use the existing process_mouse method + self.camera_controller.process_mouse(dx, dy); + } + + // Update last mouse position + self.last_mouse_position = Some(current_pos); + } + true + } + WindowEvent::MouseWheel { delta, .. } => { + self.camera_controller.process_scroll(delta); + true + } + WindowEvent::MouseInput { + button: MouseButton::Left, + state, + .. + } => { + self.mouse_pressed = *state == ElementState::Pressed; + true + } + _ => false, + }; + + if r { + debug!(target = "input", "Processed event: {:?}", event); + } + r + } + + pub fn update(&mut self) { + self.camera_controller.update_camera(&mut self.camera); + } +}