1
0
Fork 0
wgpu-tutorial/src/main.rs
Adrian Hedqvist e7e251d29d tutorial10-lighting
Also switch back to cgmath from nalgebra-glm
2020-10-09 23:32:31 +02:00

562 lines
19 KiB
Rust

use model::{DrawLight, DrawModel, Model, Vertex};
use wgpu::util::DeviceExt;
use winit::{
event::*,
event_loop::{ControlFlow, EventLoop},
window::{Window, WindowBuilder},
};
use cgmath::prelude::*;
mod camera;
mod light;
mod model;
mod texture;
type Vec3 = cgmath::Vector3<f32>;
type Mat4 = cgmath::Matrix4<f32>;
const NUM_INSTANCES_PER_ROW: u32 = 11;
fn main() {
let event_loop = EventLoop::new();
let window = WindowBuilder::new()
.with_title("wgpu tutorial")
.build(&event_loop)
.expect("Failed to create window");
let mut state = futures::executor::block_on(State::new(&window));
event_loop.run(move |event, _, control_flow| match event {
Event::WindowEvent {
ref event,
window_id,
} if window_id == window.id() => {
if !state.input(event) {
match event {
WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit,
WindowEvent::KeyboardInput { input, .. } => match input {
KeyboardInput {
state: ElementState::Pressed,
virtual_keycode: Some(VirtualKeyCode::Escape),
..
} => *control_flow = ControlFlow::Exit,
_ => {}
},
WindowEvent::Resized(physical_size) => {
state.resize(*physical_size);
}
WindowEvent::ScaleFactorChanged { new_inner_size, .. } => {
state.resize(**new_inner_size);
}
_ => {}
}
}
}
Event::RedrawRequested(_) => {
state.render();
}
Event::MainEventsCleared => {
state.update();
window.request_redraw();
}
_ => {}
});
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
struct Uniforms {
view_position: cgmath::Vector4<f32>,
view_proj: cgmath::Matrix4<f32>,
}
impl Uniforms {
fn new() -> Self {
Self {
view_position: Zero::zero(),
view_proj: cgmath::Matrix4::identity(),
}
}
fn update_view_proj(&mut self, camera: &camera::Camera) {
self.view_position = camera.eye.to_homogeneous();
self.view_proj = camera.build_view_projection_matrix();
}
}
unsafe impl bytemuck::Pod for Uniforms {}
unsafe impl bytemuck::Zeroable for Uniforms {}
struct Instance {
position: cgmath::Vector3<f32>,
rotation: cgmath::Quaternion<f32>,
}
impl Instance {
fn to_raw(&self) -> InstanceRaw {
InstanceRaw {
model: cgmath::Matrix4::from_translation(self.position)
* cgmath::Matrix4::from(self.rotation),
}
}
}
#[repr(C)]
#[derive(Copy, Clone)]
struct InstanceRaw {
model: cgmath::Matrix4<f32>,
}
unsafe impl bytemuck::Pod for InstanceRaw {}
unsafe impl bytemuck::Zeroable for InstanceRaw {}
struct State {
instance: wgpu::Instance,
surface: wgpu::Surface,
adapter: wgpu::Adapter,
device: wgpu::Device,
queue: wgpu::Queue,
sc_desc: wgpu::SwapChainDescriptor,
swap_chain: wgpu::SwapChain,
depth_texture: texture::Texture,
camera: camera::Camera,
camera_controller: camera::CameraController,
uniforms: Uniforms,
uniform_buffer: wgpu::Buffer,
uniform_bind_group: wgpu::BindGroup,
obj_model: Model,
instances: Vec<Instance>,
instance_buffer: wgpu::Buffer,
light: light::Light,
light_buffer: wgpu::Buffer,
light_bind_group: wgpu::BindGroup,
light_render_pipeline: wgpu::RenderPipeline,
render_pipeline: wgpu::RenderPipeline,
size: winit::dpi::PhysicalSize<u32>,
}
impl State {
async fn new(window: &Window) -> Self {
let size = window.inner_size();
let instance = wgpu::Instance::new(wgpu::BackendBit::PRIMARY);
let surface = unsafe { instance.create_surface(window) };
let adapter = instance
.request_adapter(&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::HighPerformance,
compatible_surface: Some(&surface),
})
.await
.expect("Could not get surface");
let (device, queue) = adapter
.request_device(
&wgpu::DeviceDescriptor {
features: wgpu::Features::default(),
limits: Default::default(),
shader_validation: true,
},
None,
)
.await
.expect("Failed to get device");
let sc_desc = wgpu::SwapChainDescriptor {
usage: wgpu::TextureUsage::OUTPUT_ATTACHMENT,
format: wgpu::TextureFormat::Bgra8UnormSrgb,
width: size.width,
height: size.height,
present_mode: wgpu::PresentMode::Fifo,
};
let swap_chain = device.create_swap_chain(&surface, &sc_desc);
let texture_bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: Some("texture_bind_group_layout"),
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::SampledTexture {
dimension: wgpu::TextureViewDimension::D2,
component_type: wgpu::TextureComponentType::Uint,
multisampled: false,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::Sampler { comparison: false },
count: None,
},
],
});
let res_dir = std::path::Path::new(env!("OUT_DIR")).join("res");
let obj_model = Model::load(
&device,
&queue,
&texture_bind_group_layout,
res_dir.join("cube.obj"),
)
.unwrap();
const SPACE_BETWEEN: f32 = 3.0;
let instances: Vec<Instance> = (0..NUM_INSTANCES_PER_ROW)
.flat_map(|z| {
(0..NUM_INSTANCES_PER_ROW).map(move |x| {
let x = SPACE_BETWEEN * (x as f32 - NUM_INSTANCES_PER_ROW as f32 / 2.0);
let z = SPACE_BETWEEN * (z as f32 - NUM_INSTANCES_PER_ROW as f32 / 2.0);
let position: Vec3 = (x, 0.0, z).into();
let rotation = if Vec3::zero() == position {
cgmath::Quaternion::from_axis_angle(Vec3::unit_z(), cgmath::Deg(0.0))
} else {
cgmath::Quaternion::from_axis_angle(position.normalize(), cgmath::Deg(45.0))
};
Instance { position, rotation }
})
})
.collect();
let instance_data: Vec<_> = instances.iter().map(Instance::to_raw).collect();
let instance_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("instance_buffer"),
contents: bytemuck::cast_slice(&instance_data),
usage: wgpu::BufferUsage::STORAGE,
});
let camera = camera::Camera {
eye: (0.0, 1.0, 2.0).into(),
target: (0.0, 0.0, 0.0).into(),
up: (0.0, 1.0, 0.0).into(),
aspect: sc_desc.width as f32 / sc_desc.height as f32,
fovy: 45.0,
znear: 0.1,
zfar: 100.0,
};
let camera_controller = camera::CameraController::new(0.2);
let mut uniforms = Uniforms::new();
uniforms.update_view_proj(&camera);
let uniform_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("uniform_buffer"),
contents: bytemuck::cast_slice(&[uniforms]),
usage: wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
});
let uniform_bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: Some("uniform_bind_group_layout"),
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::VERTEX | wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::UniformBuffer {
dynamic: false,
min_binding_size: None,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStage::VERTEX,
ty: wgpu::BindingType::StorageBuffer {
dynamic: false,
min_binding_size: None,
readonly: true,
},
count: None,
},
],
});
let uniform_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
label: Some("uniform_bind_group"),
layout: &uniform_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::Buffer(uniform_buffer.slice(..)),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Buffer(instance_buffer.slice(..)),
},
],
});
// Lights
let light = light::Light::new((2.0, 2.0, 2.0).into(), (1.0, 1.0, 1.0).into());
let light_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Light VB"),
contents: bytemuck::cast_slice(&[light]),
usage: wgpu::BufferUsage::UNIFORM | wgpu::BufferUsage::COPY_DST,
});
let light_bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStage::VERTEX | wgpu::ShaderStage::FRAGMENT,
ty: wgpu::BindingType::UniformBuffer {
dynamic: false,
min_binding_size: None,
},
count: None,
}],
label: None,
});
let light_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &light_bind_group_layout,
entries: &[wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::Buffer(light_buffer.slice(..)),
}],
label: None,
});
let render_pipeline = {
let layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("render_pipeline_layout"),
bind_group_layouts: &[
&texture_bind_group_layout,
&uniform_bind_group_layout,
&light_bind_group_layout,
],
push_constant_ranges: &[],
});
create_render_pipeline(
&device,
&layout,
sc_desc.format,
Some(texture::Texture::DEPTH_FORMAT),
&[model::ModelVertex::desc()],
wgpu::include_spirv!("shader.vert.spv"),
wgpu::include_spirv!("shader.frag.spv"),
)
};
let light_render_pipeline = {
let layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("light_render_pipeline_layout"),
bind_group_layouts: &[&uniform_bind_group_layout, &light_bind_group_layout],
push_constant_ranges: &[],
});
create_render_pipeline(
&device,
&layout,
sc_desc.format,
Some(texture::Texture::DEPTH_FORMAT),
&[model::ModelVertex::desc()],
wgpu::include_spirv!("light.vert.spv"),
wgpu::include_spirv!("light.frag.spv"),
)
};
let depth_texture =
texture::Texture::create_depth_texture(&device, &sc_desc, "depth_texture");
Self {
instance,
surface,
adapter,
device,
queue,
sc_desc,
swap_chain,
camera,
camera_controller,
depth_texture,
uniforms,
uniform_buffer,
uniform_bind_group,
obj_model,
light,
light_buffer,
light_bind_group,
light_render_pipeline,
instances,
instance_buffer,
render_pipeline,
size,
}
}
fn resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>) {
self.size = new_size;
self.sc_desc.width = new_size.width;
self.sc_desc.height = new_size.height;
self.depth_texture =
texture::Texture::create_depth_texture(&self.device, &self.sc_desc, "depth_texture");
self.camera.aspect = new_size.width as f32 / new_size.height as f32;
self.swap_chain = self.device.create_swap_chain(&self.surface, &self.sc_desc);
}
fn input(&mut self, event: &WindowEvent) -> bool {
self.camera_controller.process_events(event)
}
fn update(&mut self) {
self.camera_controller.update_camera(&mut self.camera);
self.uniforms.update_view_proj(&self.camera);
let mut encoder = self
.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("update encoder"),
});
let staging_buffer = self
.device
.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("staging_buffer"),
contents: bytemuck::cast_slice(&[self.uniforms]),
usage: wgpu::BufferUsage::COPY_SRC,
});
encoder.copy_buffer_to_buffer(
&staging_buffer,
0,
&self.uniform_buffer,
0,
std::mem::size_of::<Uniforms>() as wgpu::BufferAddress,
);
self.light.position =
cgmath::Quaternion::from_axis_angle((0.0, 1.0, 0.0).into(), cgmath::Deg(1.0))
* self.light.position;
self.queue
.write_buffer(&self.light_buffer, 0, bytemuck::cast_slice(&[self.light]));
self.queue.submit(Some(encoder.finish()));
}
fn render(&mut self) {
let frame = self
.swap_chain
.get_current_frame()
.expect("Failed getting frame");
let mut encoder = self
.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Render Encoder"),
});
let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
color_attachments: &[wgpu::RenderPassColorAttachmentDescriptor {
attachment: &frame.output.view,
resolve_target: None,
ops: wgpu::Operations::<wgpu::Color> {
load: wgpu::LoadOp::Clear(wgpu::Color {
r: 0.1,
g: 0.2,
b: 0.3,
a: 1.0,
}),
store: true,
},
}],
depth_stencil_attachment: Some(wgpu::RenderPassDepthStencilAttachmentDescriptor {
attachment: &self.depth_texture.view,
depth_ops: Some(wgpu::Operations {
load: wgpu::LoadOp::Clear(1.0),
store: true,
}),
stencil_ops: None,
}),
});
render_pass.set_pipeline(&self.light_render_pipeline);
render_pass.draw_light_model_instanced(
&self.obj_model,
&self.uniform_bind_group,
&self.light_bind_group,
0..self.instances.len() as u32,
);
render_pass.set_pipeline(&self.render_pipeline);
render_pass.draw_model_instanced(
&self.obj_model,
&self.uniform_bind_group,
&self.light_bind_group,
0..self.instances.len() as u32,
);
drop(render_pass);
self.queue.submit(Some(encoder.finish()));
}
}
fn create_render_pipeline(
device: &wgpu::Device,
layout: &wgpu::PipelineLayout,
color_format: wgpu::TextureFormat,
depth_format: Option<wgpu::TextureFormat>,
vertex_descs: &[wgpu::VertexBufferDescriptor],
vs_src: wgpu::ShaderModuleSource,
fs_src: wgpu::ShaderModuleSource,
) -> wgpu::RenderPipeline {
let vs_module = device.create_shader_module(vs_src);
let fs_module = device.create_shader_module(fs_src);
device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("render_pipeline"),
layout: Some(layout),
vertex_stage: wgpu::ProgrammableStageDescriptor {
module: &vs_module,
entry_point: "main",
},
fragment_stage: Some(wgpu::ProgrammableStageDescriptor {
module: &fs_module,
entry_point: "main",
}),
rasterization_state: Some(wgpu::RasterizationStateDescriptor {
front_face: wgpu::FrontFace::Ccw,
cull_mode: wgpu::CullMode::Back,
clamp_depth: false,
depth_bias: 0,
depth_bias_slope_scale: 0.0,
depth_bias_clamp: 0.0,
}),
primitive_topology: wgpu::PrimitiveTopology::TriangleList,
color_states: &[wgpu::ColorStateDescriptor {
format: color_format,
alpha_blend: wgpu::BlendDescriptor::REPLACE,
color_blend: wgpu::BlendDescriptor::REPLACE,
write_mask: wgpu::ColorWrite::ALL,
}],
depth_stencil_state: depth_format.map(|format| wgpu::DepthStencilStateDescriptor {
format,
depth_write_enabled: true,
depth_compare: wgpu::CompareFunction::Less,
stencil: wgpu::StencilStateDescriptor::default(),
}),
vertex_state: wgpu::VertexStateDescriptor {
index_format: wgpu::IndexFormat::Uint32,
vertex_buffers: vertex_descs,
},
sample_count: 1,
sample_mask: !0,
alpha_to_coverage_enabled: false,
})
}