Implement a Render Graph #16

Merged
SeanOMik merged 20 commits from feature/render-graph into main 2024-06-15 22:54:47 +00:00
9 changed files with 606 additions and 108 deletions
Showing only changes of commit c846d52b0d - Show all commits

View File

@ -125,7 +125,7 @@ fn setup_scene_plugin(game: &mut Game) {
world.spawn(( world.spawn((
cube_mesh.clone(), cube_mesh.clone(),
WorldTransform::default(), WorldTransform::default(),
Transform::from_xyz(0.0, -5.0, -2.0), Transform::from_xyz(0.0, 0.0, -2.0),
)); ));
{ {

View File

@ -205,7 +205,7 @@ impl RenderGraph {
#[instrument(skip(self, world))] #[instrument(skip(self, world))]
pub fn prepare(&mut self, world: &mut World) { pub fn prepare(&mut self, world: &mut World) {
// prepare all passes // prepare all passes
let mut context = RenderGraphContext::new(&self.queue, None); let mut context = RenderGraphContext::new(&self.device, &self.queue, None);
for (_, pass) in &mut self.passes { for (_, pass) in &mut self.passes {
let mut inner = pass.inner.borrow_mut(); let mut inner = pass.inner.borrow_mut();
inner.prepare(world, &mut context); inner.prepare(world, &mut context);
@ -266,8 +266,10 @@ impl RenderGraph {
None None
}; };
let queue = self.queue.clone(); // clone is required to appease the borrow checker // clone of the Rc's is required to appease the borrow checker
let mut context = RenderGraphContext::new(&queue, encoder); let device = self.device.clone();
let queue = self.queue.clone();
let mut context = RenderGraphContext::new(&device, &queue, encoder);
// all encoders need to be submitted before a presenter node is executed. // all encoders need to be submitted before a presenter node is executed.
if pass_desc.pass_type == RenderPassType::Presenter { if pass_desc.pass_type == RenderPassType::Presenter {
@ -407,15 +409,17 @@ pub(crate) struct GraphBufferWrite {
pub struct RenderGraphContext<'a> { pub struct RenderGraphContext<'a> {
/// Becomes None when the encoder is submitted /// Becomes None when the encoder is submitted
pub(crate) encoder: Option<wgpu::CommandEncoder>, pub(crate) encoder: Option<wgpu::CommandEncoder>,
pub(crate) device: &'a wgpu::Device,
pub(crate) queue: &'a wgpu::Queue, pub(crate) queue: &'a wgpu::Queue,
pub(crate) buffer_writes: VecDeque<GraphBufferWrite>, pub(crate) buffer_writes: VecDeque<GraphBufferWrite>,
renderpass_desc: Vec<wgpu::RenderPassDescriptor<'a, 'a>>, renderpass_desc: Vec<wgpu::RenderPassDescriptor<'a, 'a>>,
} }
impl<'a> RenderGraphContext<'a> { impl<'a> RenderGraphContext<'a> {
pub fn new(queue: &'a wgpu::Queue, encoder: Option<wgpu::CommandEncoder>) -> Self { pub(crate) fn new(device: &'a wgpu::Device, queue: &'a wgpu::Queue, encoder: Option<wgpu::CommandEncoder>) -> Self {
Self { Self {
encoder, encoder,
device,
queue, queue,
buffer_writes: Default::default(), buffer_writes: Default::default(),
renderpass_desc: vec![], renderpass_desc: vec![],

View File

@ -90,7 +90,7 @@ impl RenderGraphPass for BasePass {
let mut desc = RenderGraphPassDesc::new( let mut desc = RenderGraphPassDesc::new(
graph.next_id(), graph.next_id(),
"base", "base",
RenderPassType::Render, RenderPassType::Node,
None, None,
vec![ vec![
("depth_texture", depth_texture_bg, Some(depth_texture_bgl)), ("depth_texture", depth_texture_bg, Some(depth_texture_bgl)),
@ -117,7 +117,7 @@ impl RenderGraphPass for BasePass {
Some(SlotValue::Lazy), Some(SlotValue::Lazy),
); );
desc.add_texture_view_slot( desc.add_texture_view_slot(
self.window_tv_id, graph.next_id(),
"depth_texture_view", "depth_texture_view",
SlotAttribute::Output, SlotAttribute::Output,
Some(SlotValue::TextureView(depth_texture_view)), Some(SlotValue::TextureView(depth_texture_view)),

View File

@ -0,0 +1,560 @@
use std::{collections::{HashSet, VecDeque}, rc::Rc};
use glam::Vec3;
use itertools::izip;
use lyra_ecs::{query::{filter::{Has, Not, Or}, Entities, Res, TickOf}, relation::{ChildOf, RelationOriginComponent}, Component, Entity};
use lyra_math::Transform;
use lyra_resource::{gltf::Mesh, ResHandle};
use lyra_scene::{SceneGraph, WorldTransform};
use rustc_hash::FxHashMap;
use tracing::{debug, instrument, warn};
use uuid::Uuid;
use wgpu::util::DeviceExt;
use crate::{
render::{
desc_buf_lay::DescVertexBufferLayout, graph::{
RenderGraphContext, RenderGraphPass, RenderGraphPassDesc,
RenderPassType,
}, material::{Material, MaterialUniform}, render_buffer::{BufferStorage, BufferWrapper}, render_job::RenderJob, resource::{FragmentState, PipelineDescriptor, RenderPipelineDescriptor, Shader, VertexState}, texture::RenderTexture, transform_buffer_storage::{TransformBuffers, TransformGroup}, vertex::Vertex
},
DeltaTime,
};
type MeshHandle = ResHandle<Mesh>;
type SceneHandle = ResHandle<SceneGraph>;
struct MeshBufferStorage {
buffer_vertex: BufferStorage,
buffer_indices: Option<(wgpu::IndexFormat, BufferStorage)>,
//#[allow(dead_code)]
//render_texture: Option<RenderTexture>,
material: Option<Rc<Material>>,
// The index of the transform for this entity.
// The tuple is structured like this: (transform index, index of transform inside the buffer)
//transform_index: TransformBufferIndices,
}
#[derive(Clone, Debug, Component)]
struct InterpTransform {
last_transform: Transform,
alpha: f32,
}
#[derive(Default)]
pub struct MeshPass {
transforms: Option<TransformBuffers>,
mesh_buffers: FxHashMap<uuid::Uuid, MeshBufferStorage>,
render_jobs: VecDeque<RenderJob>,
texture_bind_group_layout: Option<Rc<wgpu::BindGroupLayout>>,
material_buffer: Option<wgpu::Buffer>,
material_buffers: FxHashMap<uuid::Uuid, Rc<Material>>,
entity_meshes: FxHashMap<Entity, uuid::Uuid>,
default_texture: Option<RenderTexture>,
}
impl MeshPass {
pub fn new() -> Self {
Self::default()
}
/// Checks if the mesh buffers in the GPU need to be updated.
#[instrument(skip(self, device, queue, mesh_han))]
fn check_mesh_buffers(&mut self, device: &wgpu::Device, queue: &wgpu::Queue, mesh_han: &ResHandle<Mesh>) {
let mesh_uuid = mesh_han.uuid();
if let (Some(mesh), Some(buffers)) = (mesh_han.data_ref(), self.mesh_buffers.get_mut(&mesh_uuid)) {
// check if the buffer sizes dont match. If they dont, completely remake the buffers
let vertices = mesh.position().unwrap();
if buffers.buffer_vertex.count() != vertices.len() {
debug!("Recreating buffers for mesh {}", mesh_uuid.to_string());
let (vert, idx) = self.create_vertex_index_buffers(device, &mesh);
// have to re-get buffers because of borrow checker
let buffers = self.mesh_buffers.get_mut(&mesh_uuid).unwrap();
buffers.buffer_indices = idx;
buffers.buffer_vertex = vert;
return;
}
// update vertices
let vertex_buffer = buffers.buffer_vertex.buffer();
let vertices = vertices.as_slice();
// align the vertices to 4 bytes (u32 is 4 bytes, which is wgpu::COPY_BUFFER_ALIGNMENT)
let (_, vertices, _) = bytemuck::pod_align_to::<Vec3, u32>(vertices);
queue.write_buffer(vertex_buffer, 0, bytemuck::cast_slice(vertices));
// update the indices if they're given
if let Some(index_buffer) = buffers.buffer_indices.as_ref() {
let aligned_indices = match mesh.indices.as_ref().unwrap() {
// U16 indices need to be aligned to u32, for wpgu, which are 4-bytes in size.
lyra_resource::gltf::MeshIndices::U16(v) => bytemuck::pod_align_to::<u16, u32>(v).1,
lyra_resource::gltf::MeshIndices::U32(v) => bytemuck::pod_align_to::<u32, u32>(v).1,
};
let index_buffer = index_buffer.1.buffer();
queue.write_buffer(index_buffer, 0, bytemuck::cast_slice(aligned_indices));
}
}
}
#[instrument(skip(self, device, mesh))]
fn create_vertex_index_buffers(&mut self, device: &wgpu::Device, mesh: &Mesh) -> (BufferStorage, Option<(wgpu::IndexFormat, BufferStorage)>) {
let positions = mesh.position().unwrap();
let tex_coords: Vec<glam::Vec2> = mesh.tex_coords().cloned()
.unwrap_or_else(|| vec![glam::Vec2::new(0.0, 0.0); positions.len()]);
let normals = mesh.normals().unwrap();
assert!(positions.len() == tex_coords.len() && positions.len() == normals.len());
let mut vertex_inputs = vec![];
for (v, t, n) in izip!(positions.iter(), tex_coords.iter(), normals.iter()) {
vertex_inputs.push(Vertex::new(*v, *t, *n));
}
let vertex_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Vertex Buffer"),
contents: bytemuck::cast_slice(vertex_inputs.as_slice()),//vertex_combined.as_slice(),
usage: wgpu::BufferUsages::VERTEX | wgpu::BufferUsages:: COPY_DST,
}
);
let vertex_buffer = BufferStorage::new(vertex_buffer, 0, vertex_inputs.len());
let indices = match mesh.indices.as_ref() {
Some(indices) => {
let (idx_type, len, contents) = match indices {
lyra_resource::gltf::MeshIndices::U16(v) => (wgpu::IndexFormat::Uint16, v.len(), bytemuck::cast_slice(v)),
lyra_resource::gltf::MeshIndices::U32(v) => (wgpu::IndexFormat::Uint32, v.len(), bytemuck::cast_slice(v)),
};
let index_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Index Buffer"),
contents,
usage: wgpu::BufferUsages::INDEX | wgpu::BufferUsages:: COPY_DST,
}
);
let buffer_indices = BufferStorage::new(index_buffer, 0, len);
Some((idx_type, buffer_indices))
},
None => {
None
}
};
( vertex_buffer, indices )
}
#[instrument(skip(self, device, queue, mesh))]
fn create_mesh_buffers(&mut self, device: &wgpu::Device, queue: &wgpu::Queue, mesh: &Mesh) -> MeshBufferStorage {
let (vertex_buffer, buffer_indices) = self.create_vertex_index_buffers(device, mesh);
let material = mesh.material.as_ref()
.expect("Material resource not loaded yet");
let material_ref = material.data_ref()
.unwrap();
let material = self.material_buffers.entry(material.uuid())
.or_insert_with(|| {
debug!(uuid=material.uuid().to_string(), "Sending material to gpu");
Rc::new(Material::from_resource(&device, &queue, self.texture_bind_group_layout.clone().unwrap(), &material_ref))
});
// TODO: support material uniforms from multiple uniforms
let uni = MaterialUniform::from(&**material);
queue.write_buffer(&self.material_buffer.as_ref().unwrap(), 0, bytemuck::bytes_of(&uni));
MeshBufferStorage {
buffer_vertex: vertex_buffer,
buffer_indices,
material: Some(material.clone()),
}
}
/// Processes the mesh for the renderer, storing and creating buffers as needed. Returns true if a new mesh was processed.
#[instrument(skip(self, device, queue, transform, mesh, entity))]
fn process_mesh(&mut self, device: &wgpu::Device, queue: &wgpu::Queue, entity: Entity, transform: Transform, mesh: &Mesh, mesh_uuid: Uuid) -> bool {
let _ = transform;
/* if self.transform_buffers.should_expand() {
self.transform_buffers.expand_buffers(&self.device);
}
self.transform_buffers.update_or_insert(&self.queue, &self.render_limits,
entity, || ( transform.calculate_mat4(), glam::Mat3::from_quat(transform.rotation) )); */
#[allow(clippy::map_entry)]
if !self.mesh_buffers.contains_key(&mesh_uuid) {
// create the mesh's buffers
let buffers = self.create_mesh_buffers(device, queue, mesh);
self.mesh_buffers.insert(mesh_uuid, buffers);
self.entity_meshes.insert(entity, mesh_uuid);
true
} else { false }
}
}
impl RenderGraphPass for MeshPass {
fn desc(
&mut self,
graph: &mut crate::render::graph::RenderGraph,
) -> crate::render::graph::RenderGraphPassDesc {
let device = graph.device();
let transforms = TransformBuffers::new(device);
let transform_bgl = transforms.bindgroup_layout.clone();
self.transforms = Some(transforms);
let texture_bind_group_layout = Rc::new(RenderTexture::create_layout(&device));
self.texture_bind_group_layout = Some(texture_bind_group_layout.clone());
let (material_bgl, material_bg, material_buf, _) = BufferWrapper::builder()
.label_prefix("material")
.visibility(wgpu::ShaderStages::FRAGMENT)
.buffer_usage(wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST)
.contents(&[MaterialUniform::default()])
.finish_parts(device);
let material_bgl = Rc::new(material_bgl);
let material_bg = Rc::new(material_bg);
self.material_buffer = Some(material_buf);
// load the default texture
let bytes = include_bytes!("../../default_texture.png");
self.default_texture = Some(RenderTexture::from_bytes(&device, &graph.queue, texture_bind_group_layout.clone(), bytes, "default_texture").unwrap());
// get surface config format
let main_rt = graph.slot_id("main_render_target")
.and_then(|s| graph.slot_value(s))
.and_then(|s| s.as_render_target())
.expect("missing main render target");
let surface_config_format = main_rt.surface_config.format;
drop(main_rt);
// get the id here to make borrow checker happy
let pass_id = graph.next_id();
let camera_bgl = graph.bind_group_layout(graph.bind_group_id("camera").unwrap());
let lights_bgl = graph.bind_group_layout(graph.bind_group_id("light_buffers").unwrap());
let light_grid_bgl = graph
.bind_group_layout(graph.bind_group_id("light_indices_grid")
.expect("Missing light grid bind group"));
let shader = Rc::new(Shader {
label: Some("base_shader".into()),
source: include_str!("../../shaders/base.wgsl").to_string(),
});
let desc = RenderGraphPassDesc::new(
pass_id,
"meshes",
RenderPassType::Render,
Some(PipelineDescriptor::Render(RenderPipelineDescriptor {
label: Some("meshes".into()),
layouts: vec![
texture_bind_group_layout.clone(),
transform_bgl,
camera_bgl.clone(),
lights_bgl.clone(),
material_bgl.clone(),
texture_bind_group_layout,
light_grid_bgl.clone(),
],
push_constant_ranges: vec![],
vertex: VertexState {
module: shader.clone(),
entry_point: "vs_main".into(),
buffers: vec![
Vertex::desc().into(),
],
},
fragment: Some(FragmentState {
module: shader,
entry_point: "fs_main".into(),
targets: vec![Some(wgpu::ColorTargetState {
format: surface_config_format,
blend: Some(wgpu::BlendState::REPLACE),
write_mask: wgpu::ColorWrites::ALL,
})],
}),
depth_stencil: Some(wgpu::DepthStencilState {
format: RenderTexture::DEPTH_FORMAT,
depth_write_enabled: true,
depth_compare: wgpu::CompareFunction::Less,
stencil: wgpu::StencilState::default(), // TODO: stencil buffer
bias: wgpu::DepthBiasState::default(),
}),
primitive: wgpu::PrimitiveState::default(),
multisample: wgpu::MultisampleState::default(),
multiview: None,
})),
vec![
("material", material_bg, Some(material_bgl)),
],
);
desc
}
#[instrument(skip(self, world, context))]
fn prepare(&mut self, world: &mut lyra_ecs::World, context: &mut RenderGraphContext) {
let device = context.device;
let queue = context.queue;
let render_limits = device.limits();
let last_epoch = world.current_tick();
let mut alive_entities = HashSet::new();
let view = world.view_iter::<(
Entities,
&Transform,
TickOf<Transform>,
Or<
(&MeshHandle, TickOf<MeshHandle>),
(&SceneHandle, TickOf<SceneHandle>)
>,
Option<&mut InterpTransform>,
Res<DeltaTime>,
)>();
// used to store InterpTransform components to add to entities later
let mut component_queue: Vec<(Entity, InterpTransform)> = vec![];
for (
entity,
transform,
_transform_epoch,
(
mesh_pair,
scene_pair
),
interp_tran,
delta_time,
) in view
{
alive_entities.insert(entity);
let interp_transform = match interp_tran {
Some(mut interp_transform) => {
// found in https://youtu.be/YJB1QnEmlTs?t=472
interp_transform.alpha = 1.0 - interp_transform.alpha.powf(**delta_time);
interp_transform.last_transform = interp_transform.last_transform.lerp(*transform, interp_transform.alpha);
interp_transform.last_transform
},
None => {
let interp = InterpTransform {
last_transform: *transform,
alpha: 0.5,
};
component_queue.push((entity, interp));
*transform
}
};
if let Some((mesh_han, mesh_epoch)) = mesh_pair {
if let Some(mesh) = mesh_han.data_ref() {
// if process mesh did not just create a new mesh, and the epoch
// shows that the scene has changed, verify that the mesh buffers
// dont need to be resent to the gpu.
if !self.process_mesh(device, queue, entity, interp_transform, &*mesh, mesh_han.uuid())
&& mesh_epoch == last_epoch {
self.check_mesh_buffers(device, queue, &mesh_han);
}
let transforms = self.transforms.as_mut().unwrap();
if transforms.needs_expand() {
debug!("Expanding transform buffers");
transforms.expand_buffers(device);
}
let group = TransformGroup::EntityRes(entity, mesh_han.uuid());
let transform_id = transforms.update_or_push(device, queue, &render_limits,
group, interp_transform.calculate_mat4(), glam::Mat3::from_quat(interp_transform.rotation));
let material = mesh.material.as_ref().unwrap()
.data_ref().unwrap();
let shader = material.shader_uuid.unwrap_or(0);
let job = RenderJob::new(entity, shader, mesh_han.uuid(), transform_id);
self.render_jobs.push_back(job);
}
}
if let Some((scene_han, scene_epoch)) = scene_pair {
if let Some(scene) = scene_han.data_ref() {
if scene_epoch == last_epoch {
let view = scene.world().view::<(Entities, &mut WorldTransform, &Transform, Not<Has<RelationOriginComponent<ChildOf>>>)>();
lyra_scene::system_update_world_transforms(scene.world(), view).unwrap();
}
for (mesh_han, pos) in scene.world().view_iter::<(&MeshHandle, &WorldTransform)>() {
if let Some(mesh) = mesh_han.data_ref() {
let mesh_interpo = interp_transform + **pos;
// if process mesh did not just create a new mesh, and the epoch
// shows that the scene has changed, verify that the mesh buffers
// dont need to be resent to the gpu.
if !self.process_mesh(device, queue, entity, mesh_interpo, &*mesh, mesh_han.uuid())
&& scene_epoch == last_epoch {
self.check_mesh_buffers(device, queue, &mesh_han);
}
let transforms = self.transforms.as_mut().unwrap();
if transforms.needs_expand() {
debug!("Expanding transform buffers");
transforms.expand_buffers(device);
}
let scene_mesh_group = TransformGroup::Res(scene_han.uuid(), mesh_han.uuid());
let group = TransformGroup::OwnedGroup(entity, scene_mesh_group.into());
let transform_id = transforms.update_or_push(device, queue, &render_limits,
group, mesh_interpo.calculate_mat4(), glam::Mat3::from_quat(mesh_interpo.rotation) );
let material = mesh.material.as_ref().unwrap()
.data_ref().unwrap();
let shader = material.shader_uuid.unwrap_or(0);
let job = RenderJob::new(entity, shader, mesh_han.uuid(), transform_id);
self.render_jobs.push_back(job);
}
}
}
}
}
for (en, interp) in component_queue {
world.insert(en, interp);
}
let transforms = self.transforms.as_mut().unwrap();
transforms.send_to_gpu(queue);
}
fn execute(
&mut self,
graph: &mut crate::render::graph::RenderGraph,
desc: &crate::render::graph::RenderGraphPassDesc,
context: &mut crate::render::graph::RenderGraphContext,
) {
let encoder = context.encoder.as_mut().unwrap();
let view = graph
.slot_value(graph.slot_id("window_texture_view").unwrap())
.unwrap()
.as_texture_view();
let depth_view = graph
.slot_value(graph.slot_id("depth_texture_view").unwrap())
.unwrap()
.as_texture_view();
let camera_bg = graph
.bind_group(graph.bind_group_id("camera")
.expect("Missing camera bind group"));
let lights_bg = graph
.bind_group(graph.bind_group_id("light_buffers")
.expect("Missing lights bind group"));
let light_grid_bg = graph
.bind_group(graph.bind_group_id("light_indices_grid")
.expect("Missing light grid bind group"));
let material_bg = graph
.bind_group(graph.bind_group_id("material")
.expect("Missing material bind group"));
let mut pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
label: Some("Render Pass"),
color_attachments: &[Some(wgpu::RenderPassColorAttachment {
view: &view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color {
r: 0.1,
g: 0.2,
b: 0.3,
a: 1.0,
}),
store: true,
},
})],
// enable depth buffer
depth_stencil_attachment: Some(wgpu::RenderPassDepthStencilAttachment {
view: &depth_view,
depth_ops: Some(wgpu::Operations {
load: wgpu::LoadOp::Clear(1.0),
store: true,
}),
stencil_ops: None,
}),
});
let pipeline = graph.pipeline(desc.id);
pass.set_pipeline(&pipeline.as_render());
//let material_buffer_bg = self.material_buffer.as_ref().unwrap().bindgroup();
let default_texture = self.default_texture.as_ref().unwrap();
let transforms = self.transforms.as_mut().unwrap();
while let Some(job) = self.render_jobs.pop_front() {
// get the mesh (containing vertices) and the buffers from storage
let buffers = self.mesh_buffers.get(&job.mesh_uuid);
if buffers.is_none() {
warn!("Skipping job since its mesh is missing {:?}", job.mesh_uuid);
continue;
}
let buffers = buffers.unwrap();
// Bind the optional texture
if let Some(tex) = buffers.material.as_ref()
.and_then(|m| m.diffuse_texture.as_ref()) {
pass.set_bind_group(0, tex.bind_group(), &[]);
} else {
pass.set_bind_group(0, default_texture.bind_group(), &[]);
}
if let Some(tex) = buffers.material.as_ref()
.and_then(|m| m.specular.as_ref())
.and_then(|s| s.texture.as_ref().or(s.color_texture.as_ref())) {
pass.set_bind_group(5, tex.bind_group(), &[]);
} else {
pass.set_bind_group(5, default_texture.bind_group(), &[]);
}
// Get the bindgroup for job's transform and bind to it using an offset.
let bindgroup = transforms.bind_group(job.transform_id);
let offset = transforms.buffer_offset(job.transform_id);
pass.set_bind_group(1, bindgroup, &[ offset, ]);
pass.set_bind_group(2, &camera_bg, &[]);
pass.set_bind_group(3, &lights_bg, &[]);
pass.set_bind_group(4, &material_bg, &[]);
pass.set_bind_group(6, &light_grid_bg, &[]);
// if this mesh uses indices, use them to draw the mesh
if let Some((idx_type, indices)) = buffers.buffer_indices.as_ref() {
let indices_len = indices.count() as u32;
pass.set_vertex_buffer(buffers.buffer_vertex.slot(), buffers.buffer_vertex.buffer().slice(..));
pass.set_index_buffer(indices.buffer().slice(..), *idx_type);
pass.draw_indexed(0..indices_len, 0, 0..1);
} else {
let vertex_count = buffers.buffer_vertex.count();
pass.set_vertex_buffer(buffers.buffer_vertex.slot(), buffers.buffer_vertex.buffer().slice(..));
pass.draw(0..vertex_count as u32, 0..1);
}
}
}
}

View File

@ -10,6 +10,9 @@ pub use simple_phong::*; */
mod base; mod base;
pub use base::*; pub use base::*;
mod meshes;
pub use meshes::*;
mod light_base; mod light_base;
pub use light_base::*; pub use light_base::*;

View File

@ -1,35 +1,16 @@
use std::cell::RefCell;
use std::collections::VecDeque; use std::collections::VecDeque;
use std::ops::{Deref, DerefMut}; use std::ops::{Deref, DerefMut};
use std::rc::Rc; use std::rc::Rc;
use std::sync::Arc; use std::sync::Arc;
use std::borrow::Cow;
use lyra_ecs::Component;
use lyra_ecs::World; use lyra_ecs::World;
use lyra_scene::SceneGraph;
use tracing::{debug, instrument, warn}; use tracing::{debug, instrument, warn};
use wgpu::Limits;
use winit::window::Window; use winit::window::Window;
use crate::math::Transform; use crate::render::graph::{BasePass, LightBasePass, LightCullComputePass, MeshPass, PresentPass};
use crate::render::graph::{BasePass, LightBasePass, LightCullComputePass, PresentPass, TrianglePass};
use crate::render::material::MaterialUniform;
use crate::render::render_buffer::BufferWrapperBuilder;
use super::camera::CameraUniform;
use super::graph::RenderGraph; use super::graph::RenderGraph;
use super::light::LightUniformBuffers; use super::{resource::RenderPipeline, render_job::RenderJob};
use super::material::Material;
use super::render_buffer::BufferWrapper;
use super::texture::RenderTexture;
use super::transform_buffer_storage::TransformBuffers;
use super::{resource::RenderPipeline, render_buffer::BufferStorage, render_job::RenderJob};
use lyra_resource::{gltf::Mesh, ResHandle};
type MeshHandle = ResHandle<Mesh>;
type SceneHandle = ResHandle<SceneGraph>;
#[derive(Clone, Copy, Debug)] #[derive(Clone, Copy, Debug)]
pub struct ScreenSize(glam::UVec2); pub struct ScreenSize(glam::UVec2);
@ -63,25 +44,6 @@ pub trait RenderPass {
fn on_resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>); fn on_resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>);
} }
struct MeshBufferStorage {
buffer_vertex: BufferStorage,
buffer_indices: Option<(wgpu::IndexFormat, BufferStorage)>,
//#[allow(dead_code)]
//render_texture: Option<RenderTexture>,
material: Option<Rc<Material>>,
// The index of the transform for this entity.
// The tuple is structured like this: (transform index, index of transform inside the buffer)
//transform_index: TransformBufferIndices,
}
#[derive(Clone, Debug, Component)]
pub struct InterpTransform {
last_transform: Transform,
alpha: f32,
}
pub struct BasicRenderer { pub struct BasicRenderer {
pub device: Rc<wgpu::Device>, // device does not need to be mutable, no need for refcell pub device: Rc<wgpu::Device>, // device does not need to be mutable, no need for refcell
pub queue: Rc<wgpu::Queue>, pub queue: Rc<wgpu::Queue>,
@ -93,24 +55,6 @@ pub struct BasicRenderer {
pub render_pipelines: rustc_hash::FxHashMap<u64, Arc<RenderPipeline>>, pub render_pipelines: rustc_hash::FxHashMap<u64, Arc<RenderPipeline>>,
pub render_jobs: VecDeque<RenderJob>, pub render_jobs: VecDeque<RenderJob>,
//mesh_buffers: rustc_hash::FxHashMap<uuid::Uuid, MeshBufferStorage>, // TODO: clean up left over buffers from deleted entities/components
//material_buffers: rustc_hash::FxHashMap<uuid::Uuid, Rc<Material>>,
//entity_meshes: rustc_hash::FxHashMap<Entity, uuid::Uuid>,
//transform_buffers: TransformBuffers,
render_limits: Limits,
//inuse_camera: RenderCamera,
//camera_buffer: BufferWrapper,
//bgl_texture: Rc<BindGroupLayout>,
//default_texture: RenderTexture,
//depth_buffer_texture: RenderTexture,
//material_buffer: BufferWrapper,
//light_buffers: LightUniformBuffers,
//light_cull_compute: LightCullCompute,
graph: RenderGraph, graph: RenderGraph,
} }
@ -156,11 +100,8 @@ impl BasicRenderer {
None, None,
).await.unwrap(); ).await.unwrap();
let render_limits = device.limits();
let surface_caps = surface.get_capabilities(&adapter); let surface_caps = surface.get_capabilities(&adapter);
let present_mode = surface_caps.present_modes[0]; let present_mode = surface_caps.present_modes[0];
debug!("present mode: {:?}", present_mode); debug!("present mode: {:?}", present_mode);
let surface_format = surface_caps.formats.iter() let surface_format = surface_caps.formats.iter()
@ -172,43 +113,12 @@ impl BasicRenderer {
format: surface_format, format: surface_format,
width: size.width, width: size.width,
height: size.height, height: size.height,
present_mode: wgpu::PresentMode::Immediate, present_mode: wgpu::PresentMode::default(), //wgpu::PresentMode::Mailbox, // "Fast Vsync"
alpha_mode: surface_caps.alpha_modes[0], alpha_mode: surface_caps.alpha_modes[0],
view_formats: vec![], view_formats: vec![],
}; };
surface.configure(&device, &config); surface.configure(&device, &config);
let bgl_texture = Rc::new(RenderTexture::create_layout(&device));
let shader_src = include_str!("shaders/base.wgsl");
let shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
label: Some("Shader"),
source: wgpu::ShaderSource::Wgsl(Cow::Borrowed(shader_src)),
});
let transform_buffers = TransformBuffers::new(&device);
let camera_buffer = BufferWrapper::builder()
.buffer_usage(wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST)
.contents(&[CameraUniform::default()])
.label_prefix("Camera")
.visibility(wgpu::ShaderStages::VERTEX | wgpu::ShaderStages::FRAGMENT | wgpu::ShaderStages::COMPUTE)
.buffer_dynamic_offset(false)
.finish(&device);
let mut depth_texture = RenderTexture::create_depth_texture(&device, &config, "Tex_Depth");
// load the default texture
let bytes = include_bytes!("default_texture.png");
let default_texture = RenderTexture::from_bytes(&device, &queue, bgl_texture.clone(), bytes, "default_texture").unwrap();
let light_uniform_buffers = LightUniformBuffers::new(&device);
let mat_buffer = BufferWrapperBuilder::new()
.buffer_usage(wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST)
.visibility(wgpu::ShaderStages::FRAGMENT)
.contents(&[MaterialUniform::default()])
.finish(&device);
let device = Rc::new(device); let device = Rc::new(device);
let queue = Rc::new(queue); let queue = Rc::new(queue);
@ -220,15 +130,23 @@ impl BasicRenderer {
g.add_pass(LightBasePass::new()); g.add_pass(LightBasePass::new());
debug!("Adding light cull compute pass"); debug!("Adding light cull compute pass");
g.add_pass(LightCullComputePass::new(size)); g.add_pass(LightCullComputePass::new(size));
debug!("Adding triangle pass"); //debug!("Adding triangle pass");
g.add_pass(TrianglePass::new()); //g.add_pass(TrianglePass::new());
debug!("Adding mesh pass");
g.add_pass(MeshPass::new());
debug!("Adding present pass"); debug!("Adding present pass");
g.add_pass(PresentPass::new("main_render_target")); g.add_pass(PresentPass::new("main_render_target"));
g.add_edge("base", "light_base"); g.add_edge("base", "light_base");
g.add_edge("light_base", "light_cull_compute"); g.add_edge("light_base", "light_cull_compute");
g.add_edge("base", "triangle"); g.add_edge("base", "meshes");
// make sure that present runs last
g.add_edge("base", "present_main_render_target"); g.add_edge("base", "present_main_render_target");
g.add_edge("light_cull_compute", "present_main_render_target");
g.add_edge("meshes", "present_main_render_target");
g.setup(&device); g.setup(&device);
@ -246,7 +164,6 @@ impl BasicRenderer {
render_pipelines: Default::default(), render_pipelines: Default::default(),
render_jobs: Default::default(), render_jobs: Default::default(),
render_limits,
graph: g, graph: g,
} }
} }

View File

@ -7,6 +7,16 @@ pub struct VertexBufferLayout {
pub attributes: Vec<wgpu::VertexAttribute>, pub attributes: Vec<wgpu::VertexAttribute>,
} }
impl<'a> From<wgpu::VertexBufferLayout<'a>> for VertexBufferLayout {
fn from(value: wgpu::VertexBufferLayout) -> Self {
Self {
array_stride: value.array_stride,
step_mode: value.step_mode,
attributes: value.attributes.to_vec(),
}
}
}
/// Describes the vertex stage in a render pipeline. /// Describes the vertex stage in a render pipeline.
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct VertexState { pub struct VertexState {

View File

@ -273,7 +273,10 @@ impl RenderTexture {
}; };
let texture = device.create_texture(&desc); let texture = device.create_texture(&desc);
let view = texture.create_view(&wgpu::TextureViewDescriptor::default()); let view = texture.create_view(&wgpu::TextureViewDescriptor {
format: Some(wgpu::TextureFormat::Depth32Float),
..Default::default()
});
let sampler = device.create_sampler( let sampler = device.create_sampler(
&wgpu::SamplerDescriptor { // 4. &wgpu::SamplerDescriptor { // 4.
address_mode_u: wgpu::AddressMode::ClampToEdge, address_mode_u: wgpu::AddressMode::ClampToEdge,

View File

@ -1,4 +1,4 @@
use std::{collections::{HashMap, VecDeque}, hash::{BuildHasher, DefaultHasher, Hash, Hasher, RandomState}, num::NonZeroU64}; use std::{collections::{HashMap, VecDeque}, hash::{BuildHasher, DefaultHasher, Hash, Hasher, RandomState}, num::NonZeroU64, rc::Rc};
use lyra_ecs::Entity; use lyra_ecs::Entity;
use tracing::instrument; use tracing::instrument;
@ -162,7 +162,7 @@ impl<K: Hash + Eq + PartialEq + Clone, V: Clone, S: BuildHasher> CachedValMap<K,
/// [`TransformGroup`]s are used to represent entries in the buffer. They are used to insert, /// [`TransformGroup`]s are used to represent entries in the buffer. They are used to insert,
/// update, and retrieve the transforms. /// update, and retrieve the transforms.
pub struct TransformBuffers { pub struct TransformBuffers {
pub bindgroup_layout: wgpu::BindGroupLayout, pub bindgroup_layout: Rc<wgpu::BindGroupLayout>,
//groups: CachedValMap<TransformGroupId, TransformIndex>, //groups: CachedValMap<TransformGroupId, TransformIndex>,
//groups: SlotMap<TransformGroupId, TransformIndex>, //groups: SlotMap<TransformGroupId, TransformIndex>,
entries: Vec<BufferEntry>, entries: Vec<BufferEntry>,
@ -192,7 +192,7 @@ impl TransformBuffers {
}); });
let mut s = Self { let mut s = Self {
bindgroup_layout, bindgroup_layout: Rc::new(bindgroup_layout),
entries: Default::default(), entries: Default::default(),
max_transform_count: (limits.max_uniform_buffer_binding_size) as usize / (limits.min_uniform_buffer_offset_alignment as usize), //(mem::size_of::<glam::Mat4>()), max_transform_count: (limits.max_uniform_buffer_binding_size) as usize / (limits.min_uniform_buffer_offset_alignment as usize), //(mem::size_of::<glam::Mat4>()),
limits, limits,
@ -209,6 +209,7 @@ impl TransformBuffers {
/// ///
/// This uses [`wgpu::Queue::write_buffer`], so the write is not immediately submitted, /// This uses [`wgpu::Queue::write_buffer`], so the write is not immediately submitted,
/// and instead enqueued internally to happen at the start of the next submit() call. /// and instead enqueued internally to happen at the start of the next submit() call.
#[instrument(skip(self, queue))]
pub fn send_to_gpu(&mut self, queue: &wgpu::Queue) { pub fn send_to_gpu(&mut self, queue: &wgpu::Queue) {
self.next_index = 0; self.next_index = 0;