Compare commits

...

3 Commits

23 changed files with 1309 additions and 513 deletions

17
Cargo.lock generated
View File

@ -959,6 +959,12 @@ dependencies = [
"tracing", "tracing",
] ]
[[package]]
name = "fixedbitset"
version = "0.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80"
[[package]] [[package]]
name = "flate2" name = "flate2"
version = "1.0.28" version = "1.0.28"
@ -1865,6 +1871,7 @@ dependencies = [
"lyra-reflect", "lyra-reflect",
"lyra-resource", "lyra-resource",
"lyra-scene", "lyra-scene",
"petgraph",
"quote", "quote",
"rustc-hash", "rustc-hash",
"syn 2.0.51", "syn 2.0.51",
@ -2507,6 +2514,16 @@ version = "2.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e"
[[package]]
name = "petgraph"
version = "0.6.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db"
dependencies = [
"fixedbitset",
"indexmap 2.1.0",
]
[[package]] [[package]]
name = "pin-project-lite" name = "pin-project-lite"
version = "0.2.13" version = "0.2.13"

View File

@ -125,7 +125,7 @@ fn setup_scene_plugin(game: &mut Game) {
world.spawn(( world.spawn((
cube_mesh.clone(), cube_mesh.clone(),
WorldTransform::default(), WorldTransform::default(),
Transform::from_xyz(0.0, -5.0, -2.0), Transform::from_xyz(0.0, 0.0, -2.0),
)); ));
{ {

View File

@ -35,6 +35,7 @@ itertools = "0.11.0"
thiserror = "1.0.56" thiserror = "1.0.56"
unique = "0.9.1" unique = "0.9.1"
rustc-hash = "1.1.0" rustc-hash = "1.1.0"
petgraph = { version = "0.6.5", features = ["matrix_graph"] }
[features] [features]
tracy = ["dep:tracing-tracy"] tracy = ["dep:tracing-tracy"]

View File

@ -1,89 +0,0 @@
use std::collections::{HashMap, VecDeque};
use rustc_hash::{FxHashMap, FxHashSet};
use super::RenderGraphPassDesc;
pub struct GraphExecutionPath {
/// Queue of the path, top is the first to be executed.
/// Each element is the handle of a pass.
pub queue: VecDeque<u64>,
}
impl GraphExecutionPath {
pub fn new(pass_descriptions: Vec<&RenderGraphPassDesc>) -> Self {
// collect all the output slots
let mut total_outputs = HashMap::new();
total_outputs.reserve(pass_descriptions.len());
for desc in pass_descriptions.iter() {
for slot in desc.output_slots() {
total_outputs.insert(slot.name.clone(), SlotOwnerPair {
pass: desc.id,
slot: slot.id,
});
}
}
let mut nodes = FxHashMap::<u64, Node>::default();
for desc in pass_descriptions.iter() {
// find the node inputs
let mut inputs = vec![];
for slot in desc.input_slots() {
let inp = total_outputs.get(&slot.name)
.expect(&format!("failed to find slot: '{}', ensure that there is a pass outputting it", slot.name));
inputs.push(*inp);
}
let node = Node {
id: desc.id,
desc: (*desc),
slot_inputs: inputs
};
nodes.insert(node.id, node);
}
// sort the graph
let mut stack = VecDeque::new();
let mut visited = FxHashSet::default();
for (_, no) in nodes.iter() {
Self::topological_sort(&nodes, &mut stack, &mut visited, no);
}
Self {
queue: stack,
}
}
fn topological_sort(graph: &FxHashMap<u64, Node>, stack: &mut VecDeque<u64>, visited: &mut FxHashSet<u64>, node: &Node) {
if !visited.contains(&node.id) {
visited.insert(node.id);
for depend in &node.slot_inputs {
let depend_node = graph.get(&depend.pass)
.expect("could not find dependent node");
if !visited.contains(&depend.pass) {
Self::topological_sort(graph, stack, visited, depend_node);
}
}
stack.push_back(node.id);
}
}
}
#[allow(dead_code)]
#[derive(Debug, Clone, Copy)]
struct SlotOwnerPair {
pass: u64,
slot: u64,
}
#[allow(dead_code)]
struct Node<'a> {
id: u64,
desc: &'a RenderGraphPassDesc,
slot_inputs: Vec<SlotOwnerPair>,
}

View File

@ -6,6 +6,7 @@ use std::{
sync::Arc, sync::Arc,
}; };
use itertools::Itertools;
use lyra_ecs::World; use lyra_ecs::World;
pub use pass::*; pub use pass::*;
@ -15,19 +16,17 @@ pub use passes::*;
mod slot_desc; mod slot_desc;
pub use slot_desc::*; pub use slot_desc::*;
mod execution_path;
use rustc_hash::FxHashMap; use rustc_hash::FxHashMap;
use tracing::{debug_span, instrument, trace, warn}; use tracing::{debug_span, instrument, trace, warn};
use wgpu::ComputePass;
use self::execution_path::GraphExecutionPath; use super::resource::{ComputePipeline, Pipeline, RenderPipeline};
use super::resource::{Pipeline, RenderPipeline};
//#[derive(Clone)]
struct PassEntry { struct PassEntry {
inner: Arc<RefCell<dyn RenderGraphPass>>, inner: Arc<RefCell<dyn RenderGraphPass>>,
desc: Arc<RenderGraphPassDesc>, desc: Arc<RenderGraphPassDesc>,
/// The index of the pass in the execution graph
graph_index: petgraph::matrix_graph::NodeIndex<usize>,
} }
pub struct BindGroupEntry { pub struct BindGroupEntry {
@ -41,7 +40,6 @@ pub struct BindGroupEntry {
#[allow(dead_code)] #[allow(dead_code)]
struct ResourcedSlot { struct ResourcedSlot {
name: String, name: String,
//slot: RenderPassSlot,
ty: SlotType, ty: SlotType,
value: SlotValue, value: SlotValue,
} }
@ -70,13 +68,13 @@ pub struct RenderGraph {
passes: FxHashMap<u64, PassEntry>, passes: FxHashMap<u64, PassEntry>,
// TODO: Use a SlotMap // TODO: Use a SlotMap
bind_groups: FxHashMap<u64, BindGroupEntry>, bind_groups: FxHashMap<u64, BindGroupEntry>,
bind_group_names: FxHashMap<String, u64>, bind_group_names: HashMap<String, u64>,
// TODO: make pipelines a `type` parameter in RenderPasses, // TODO: make pipelines a `type` parameter in RenderPasses,
// then the pipelines can be retrieved via TypeId to the pass. // then the pipelines can be retrieved via TypeId to the pass.
///
pipelines: FxHashMap<u64, PipelineResource>, pipelines: FxHashMap<u64, PipelineResource>,
current_id: u64, current_id: u64,
exec_path: Option<GraphExecutionPath>, /// A directed graph describing the execution path of the RenderGraph
execution_graph: petgraph::matrix_graph::DiMatrix<u64, (), Option<()>, usize>,
} }
impl RenderGraph { impl RenderGraph {
@ -91,7 +89,7 @@ impl RenderGraph {
bind_group_names: Default::default(), bind_group_names: Default::default(),
pipelines: Default::default(), pipelines: Default::default(),
current_id: 1, current_id: 1,
exec_path: None, execution_graph: Default::default(),
} }
} }
@ -127,7 +125,8 @@ impl RenderGraph {
trace!( trace!(
"Found existing slot for {}, changing id to {}", "Found existing slot for {}, changing id to {}",
slot.name, id slot.name,
id
); );
// if there is a slot of the same name // if there is a slot of the same name
@ -158,11 +157,14 @@ impl RenderGraph {
self.bind_group_names.insert(name.clone(), bg_id); self.bind_group_names.insert(name.clone(), bg_id);
} }
let index = self.execution_graph.add_node(desc.id);
self.passes.insert( self.passes.insert(
desc.id, desc.id,
PassEntry { PassEntry {
inner: Arc::new(RefCell::new(pass)), inner: Arc::new(RefCell::new(pass)),
desc: Arc::new(desc), desc: Arc::new(desc),
graph_index: index,
}, },
); );
} }
@ -172,12 +174,23 @@ impl RenderGraph {
pub fn setup(&mut self, device: &wgpu::Device) { pub fn setup(&mut self, device: &wgpu::Device) {
// For all passes, create their pipelines // For all passes, create their pipelines
for pass in self.passes.values() { for pass in self.passes.values() {
if let Some(pipei) = &pass.desc.pipeline_desc { if let Some(pipeline_desc) = &pass.desc.pipeline_desc {
let pipeline = match pass.desc.pass_type { let pipeline = match pass.desc.pass_type {
RenderPassType::Render => { RenderPassType::Render => Pipeline::Render(RenderPipeline::create(
Pipeline::Render(RenderPipeline::create(device, pipei)) device,
pipeline_desc
.as_render_pipeline_descriptor()
.expect("got compute pipeline descriptor in a render pass"),
)),
RenderPassType::Compute => Pipeline::Compute(ComputePipeline::create(
device,
pipeline_desc
.as_compute_pipeline_descriptor()
.expect("got render pipeline descriptor in a compute pass"),
)),
RenderPassType::Presenter | RenderPassType::Node => {
panic!("Present or Node RenderGraph passes should not have a pipeline descriptor!");
} }
_ => todo!(),
}; };
let res = PipelineResource { let res = PipelineResource {
@ -192,7 +205,7 @@ impl RenderGraph {
#[instrument(skip(self, world))] #[instrument(skip(self, world))]
pub fn prepare(&mut self, world: &mut World) { pub fn prepare(&mut self, world: &mut World) {
// prepare all passes // prepare all passes
let mut context = RenderGraphContext::new(&self.queue, None); let mut context = RenderGraphContext::new(&self.device, &self.queue, None);
for (_, pass) in &mut self.passes { for (_, pass) in &mut self.passes {
let mut inner = pass.inner.borrow_mut(); let mut inner = pass.inner.borrow_mut();
inner.prepare(world, &mut context); inner.prepare(world, &mut context);
@ -219,49 +232,52 @@ impl RenderGraph {
self.queue.write_buffer(buf, bufwr.offset, &bufwr.bytes); self.queue.write_buffer(buf, bufwr.offset, &bufwr.bytes);
} }
} }
// create the execution path for the graph. This will be executed in `RenderGraph::render`
let descs = self.passes.values().map(|p| &*p.desc).collect();
let path = GraphExecutionPath::new(descs);
trace!(
"Found {} steps in the rendergraph to execute",
path.queue.len()
);
self.exec_path = Some(path);
} }
#[instrument(skip(self))] #[instrument(skip(self))]
pub fn render(&mut self) { pub fn render(&mut self) {
let mut path = self.exec_path.take().unwrap(); let mut sorted: VecDeque<u64> = petgraph::algo::toposort(&self.execution_graph, None)
.expect("RenderGraph had cycled!")
.iter()
.map(|i| self.execution_graph[i.clone()])
.collect();
let path_names = sorted
.iter()
.map(|i| self.pass(*i).unwrap().name.clone())
.collect_vec();
trace!("Render graph execution order: {:?}", path_names);
let mut encoders = Vec::with_capacity(self.passes.len() / 2); let mut encoders = Vec::with_capacity(self.passes.len() / 2);
while let Some(pass_id) = path.queue.pop_front() { while let Some(pass_id) = sorted.pop_front() {
let pass = self.passes.get(&pass_id).unwrap(); let pass = self.passes.get(&pass_id).unwrap();
let pass_inn = pass.inner.clone(); let pass_inn = pass.inner.clone();
let pass_desc = pass.desc.clone(); let pass_desc = pass.desc.clone();
let label = format!("{} Encoder", pass_desc.name); let label = format!("{} Encoder", pass_desc.name);
// encoders are not needed for presenter nodes. // encoders are not needed for presenter nodes.
let encoder = if pass_desc.pass_type == RenderPassType::Presenter { let encoder = if pass_desc.pass_type.should_have_pipeline() {
None
} else {
Some( Some(
self.device self.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor { .create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some(&label), label: Some(&label),
}), }),
) )
} else {
None
}; };
let queue = self.queue.clone(); // clone is required to appease the borrow checker // clone of the Rc's is required to appease the borrow checker
let mut context = RenderGraphContext::new(&queue, encoder); let device = self.device.clone();
let queue = self.queue.clone();
let mut context = RenderGraphContext::new(&device, &queue, encoder);
// all encoders need to be submitted before a presenter node is executed. // all encoders need to be submitted before a presenter node is executed.
if pass_desc.pass_type == RenderPassType::Presenter { if pass_desc.pass_type == RenderPassType::Presenter {
trace!("Submitting {} encoderd before presenting", encoders.len());
self.queue.submit(encoders.drain(..)); self.queue.submit(encoders.drain(..));
} }
trace!("Executing {}", pass_desc.name);
let mut inner = pass_inn.borrow_mut(); let mut inner = pass_inn.borrow_mut();
inner.execute(self, &*pass_desc, &mut context); inner.execute(self, &*pass_desc, &mut context);
@ -271,9 +287,12 @@ impl RenderGraph {
} }
if !encoders.is_empty() { if !encoders.is_empty() {
warn!("{} encoders were not submitted in the same render cycle they were created. \ warn!(
"{} encoders were not submitted in the same render cycle they were created. \
Make sure there is a presenting pass at the end. You may still see something, \ Make sure there is a presenting pass at the end. You may still see something, \
however it will be delayed a render cycle.", encoders.len()); however it will be delayed a render cycle.",
encoders.len()
);
self.queue.submit(encoders.into_iter()); self.queue.submit(encoders.into_iter());
} }
} }
@ -320,6 +339,62 @@ impl RenderGraph {
pub fn bind_group_id(&self, name: &str) -> Option<u64> { pub fn bind_group_id(&self, name: &str) -> Option<u64> {
self.bind_group_names.get(name).copied() self.bind_group_names.get(name).copied()
} }
pub fn add_edge(&mut self, from: &str, to: &str) {
let from_idx = self
.passes
.iter()
.find(|p| p.1.desc.name == from)
.map(|p| p.1.graph_index)
.expect("Failed to find from pass");
let to_idx = self
.passes
.iter()
.find(|p| p.1.desc.name == to)
.map(|p| p.1.graph_index)
.expect("Failed to find to pass");
self.execution_graph.add_edge(from_idx, to_idx, ());
}
/// Utility method for setting the bind groups for a pass.
///
/// The parameter `bind_groups` can be used to specify the labels of a bind group, and the
/// index of the bind group in the pipeline for the pass. If a bind group of the provided
/// name is not found in the graph, a panic will occur.
///
/// # Example:
/// ```rust,nobuild
/// graph.set_bind_groups(
/// &mut pass,
/// &[
/// // retrieves the "depth_texture" bind group and sets the index 0 in the
/// // pass to it.
/// ("depth_texture", 0),
/// ("camera", 1),
/// ("light_buffers", 2),
/// ("light_indices_grid", 3),
/// ("screen_size", 4),
/// ],
/// );
/// ```
///
/// # Panics
/// Panics if a bind group of a provided name is not found.
pub fn set_bind_groups<'a>(
&'a self,
pass: &mut ComputePass<'a>,
bind_groups: &[(&str, u32)],
) {
for (name, index) in bind_groups {
let bg = self
.bind_group_id(name)
.map(|bgi| self.bind_group(bgi))
.expect(&format!("Could not find bind group '{}'", name));
pass.set_bind_group(*index, bg, &[]);
}
}
} }
/// A queued write to a GPU buffer targeting a graph slot. /// A queued write to a GPU buffer targeting a graph slot.
@ -334,15 +409,17 @@ pub(crate) struct GraphBufferWrite {
pub struct RenderGraphContext<'a> { pub struct RenderGraphContext<'a> {
/// Becomes None when the encoder is submitted /// Becomes None when the encoder is submitted
pub(crate) encoder: Option<wgpu::CommandEncoder>, pub(crate) encoder: Option<wgpu::CommandEncoder>,
pub(crate) device: &'a wgpu::Device,
pub(crate) queue: &'a wgpu::Queue, pub(crate) queue: &'a wgpu::Queue,
pub(crate) buffer_writes: VecDeque<GraphBufferWrite>, pub(crate) buffer_writes: VecDeque<GraphBufferWrite>,
renderpass_desc: Vec<wgpu::RenderPassDescriptor<'a, 'a>>, renderpass_desc: Vec<wgpu::RenderPassDescriptor<'a, 'a>>,
} }
impl<'a> RenderGraphContext<'a> { impl<'a> RenderGraphContext<'a> {
pub fn new(queue: &'a wgpu::Queue, encoder: Option<wgpu::CommandEncoder>) -> Self { pub(crate) fn new(device: &'a wgpu::Device, queue: &'a wgpu::Queue, encoder: Option<wgpu::CommandEncoder>) -> Self {
Self { Self {
encoder, encoder,
device,
queue, queue,
buffer_writes: Default::default(), buffer_writes: Default::default(),
renderpass_desc: vec![], renderpass_desc: vec![],

View File

@ -2,18 +2,31 @@ use std::{cell::{Ref, RefCell, RefMut}, collections::HashMap, num::NonZeroU32, r
use lyra_ecs::World; use lyra_ecs::World;
use crate::render::resource::RenderPipelineDescriptor; use crate::render::resource::PipelineDescriptor;
use super::{RenderGraph, RenderGraphContext, RenderTarget}; use super::{RenderGraph, RenderGraphContext, RenderTarget};
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Default)] #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Default)]
pub enum RenderPassType { pub enum RenderPassType {
/// A node doesn't render, compute, or present anything. This likely means it injects data into the graph.
Node,
Compute, Compute,
#[default] #[default]
Render, Render,
Presenter, Presenter,
} }
impl RenderPassType {
pub fn should_have_pipeline(&self) -> bool {
match self {
RenderPassType::Node => false,
RenderPassType::Compute => true,
RenderPassType::Render => true,
RenderPassType::Presenter => false,
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub enum SlotType { pub enum SlotType {
TextureView, TextureView,
@ -150,7 +163,7 @@ pub struct RenderGraphPassDesc {
pub pass_type: RenderPassType, pub pass_type: RenderPassType,
pub slots: Vec<RenderPassSlot>, pub slots: Vec<RenderPassSlot>,
slot_names: HashMap<String, u64>, slot_names: HashMap<String, u64>,
pub pipeline_desc: Option<RenderPipelineDescriptor>, pub pipeline_desc: Option<PipelineDescriptor>,
pub bind_groups: Vec<( pub bind_groups: Vec<(
String, String,
Rc<wgpu::BindGroup>, Rc<wgpu::BindGroup>,
@ -163,7 +176,7 @@ impl RenderGraphPassDesc {
id: u64, id: u64,
name: &str, name: &str,
pass_type: RenderPassType, pass_type: RenderPassType,
pipeline_desc: Option<RenderPipelineDescriptor>, pipeline_desc: Option<PipelineDescriptor>,
bind_groups: Vec<(&str, Rc<wgpu::BindGroup>, Option<Rc<wgpu::BindGroupLayout>>)>, bind_groups: Vec<(&str, Rc<wgpu::BindGroup>, Option<Rc<wgpu::BindGroupLayout>>)>,
) -> Self { ) -> Self {
Self { Self {

View File

@ -1,68 +1,114 @@
use std::{cell::RefCell, rc::Rc}; use std::{cell::RefCell, rc::Rc};
use crate::render::graph::{RenderGraphContext, RenderGraphPass, RenderGraphPassDesc, RenderPassSlot, RenderPassType, RenderTarget, SlotAttribute, SlotType, SlotValue}; use glam::UVec2;
use tracing::warn;
use winit::dpi::PhysicalSize;
use crate::{
render::{
camera::{CameraUniform, RenderCamera},
graph::{
RenderGraphContext, RenderGraphPass, RenderGraphPassDesc, RenderPassSlot,
RenderPassType, RenderTarget, SlotAttribute, SlotType, SlotValue,
},
render_buffer::BufferWrapper, texture::RenderTexture,
},
scene::CameraComponent,
};
/// Supplies some basic things other passes needs. /// Supplies some basic things other passes needs.
/// ///
/// screen size buffer, camera buffer, /// screen size buffer, camera buffer,
#[derive(Default)] #[derive(Default)]
pub struct BasePass { pub struct BasePass {
/// Temporary storage for the main render target /// Temporary storage for the main render target
/// ///
/// This should be Some when the pass is first created then after its added to /// This should be Some when the pass is first created then after its added to
/// the render graph it will be None and stay None. /// the render graph it will be None and stay None.
temp_render_target: Option<RenderTarget>, temp_render_target: Option<RenderTarget>,
main_rt_id: u64, main_rt_id: u64,
window_tv_id: u64, window_tv_id: u64,
screen_size: glam::UVec2,
} }
impl BasePass { impl BasePass {
pub fn new(surface: wgpu::Surface, surface_config: wgpu::SurfaceConfiguration) -> Self { pub fn new(surface: wgpu::Surface, surface_config: wgpu::SurfaceConfiguration) -> Self {
let size = glam::UVec2::new(surface_config.width, surface_config.height);
Self { Self {
temp_render_target: Some(RenderTarget { temp_render_target: Some(RenderTarget {
surface, surface,
surface_config, surface_config,
current_texture: None, current_texture: None,
}), }),
main_rt_id: 0, screen_size: size,
window_tv_id: 0, ..Default::default()
} }
} }
} }
impl RenderGraphPass for BasePass { impl RenderGraphPass for BasePass {
fn desc(&mut self, graph: &mut crate::render::graph::RenderGraph) -> crate::render::graph::RenderGraphPassDesc { fn desc(
&mut self,
graph: &mut crate::render::graph::RenderGraph,
) -> crate::render::graph::RenderGraphPassDesc {
let render_target = self.temp_render_target.take().unwrap();
self.screen_size = UVec2::new(
render_target.surface_config.width,
render_target.surface_config.height,
);
let (screen_size_bgl, screen_size_bg, screen_size_buf, _) = BufferWrapper::builder()
.buffer_usage(wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST)
.label_prefix("ScreenSize")
.visibility(wgpu::ShaderStages::COMPUTE)
.buffer_dynamic_offset(false)
.contents(&[self.screen_size])
.finish_parts(&graph.device());
let screen_size_bgl = Rc::new(screen_size_bgl);
let screen_size_bg = Rc::new(screen_size_bg);
let (camera_bgl, camera_bg, camera_buf, _) = BufferWrapper::builder()
.buffer_usage(wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST)
.label_prefix("camera")
.visibility(wgpu::ShaderStages::all())
.buffer_dynamic_offset(false)
.contents(&[CameraUniform::default()])
.finish_parts(&graph.device());
let camera_bgl = Rc::new(camera_bgl);
let camera_bg = Rc::new(camera_bg);
// create the depth texture using the utility struct, then take all the required fields
let mut depth_texture = RenderTexture::create_depth_texture(&graph.device(), &render_target.surface_config, "depth_texture");
depth_texture.create_bind_group(&graph.device);
let dt_bg_pair = depth_texture.bindgroup_pair.unwrap();
let depth_texture_bg = Rc::new(dt_bg_pair.bindgroup);
let depth_texture_bgl = dt_bg_pair.layout;
let depth_texture_view = Rc::new(depth_texture.view);
let mut desc = RenderGraphPassDesc::new( let mut desc = RenderGraphPassDesc::new(
graph.next_id(), graph.next_id(),
"base", "base",
RenderPassType::Render, RenderPassType::Node,
None, None,
vec![], vec![
("depth_texture", depth_texture_bg, Some(depth_texture_bgl)),
("screen_size", screen_size_bg, Some(screen_size_bgl)),
("camera", camera_bg, Some(camera_bgl)),
],
); );
/* desc.add_buffer_slot(*id, "screen_size_buffer", SlotAttribute::Output, Some(SlotDescriptor::BufferInit(BufferInitDescriptor {
label: Some("B_ScreenSize".to_string()),
contents: bytemuck::bytes_of(&UVec2::new(800, 600)).to_vec(),
usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
})));
desc.add_buffer_slot(*id, "camera_buffer", SlotAttribute::Output, Some(SlotDescriptor::BufferInit(BufferInitDescriptor {
label: Some("B_Camera".to_string()),
contents: bytemuck::bytes_of(&CameraUniform::default()).to_vec(),
usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
})));
*id += 1; */
self.main_rt_id = graph.next_id(); self.main_rt_id = graph.next_id();
let render_target = self.temp_render_target.take().unwrap(); desc.add_slot(RenderPassSlot {
desc.add_slot( ty: SlotType::RenderTarget,
RenderPassSlot { attribute: SlotAttribute::Output,
ty: SlotType::RenderTarget, id: self.main_rt_id,
attribute: SlotAttribute::Output, name: "main_render_target".into(),
id: self.main_rt_id, value: Some(SlotValue::RenderTarget(Rc::new(RefCell::new(
name: "main_render_target".into(), render_target,
value: Some(SlotValue::RenderTarget(Rc::new(RefCell::new(render_target)))), )))),
} });
);
self.window_tv_id = graph.next_id(); self.window_tv_id = graph.next_id();
desc.add_texture_view_slot( desc.add_texture_view_slot(
self.window_tv_id, self.window_tv_id,
@ -70,31 +116,75 @@ impl RenderGraphPass for BasePass {
SlotAttribute::Output, SlotAttribute::Output,
Some(SlotValue::Lazy), Some(SlotValue::Lazy),
); );
desc.add_texture_view_slot(
graph.next_id(),
"depth_texture_view",
SlotAttribute::Output,
Some(SlotValue::TextureView(depth_texture_view)),
);
desc.add_buffer_slot(
graph.next_id(),
"screen_size_buffer",
SlotAttribute::Output,
Some(SlotValue::Buffer(Rc::new(screen_size_buf))),
);
desc.add_buffer_slot(
graph.next_id(),
"camera_buffer",
SlotAttribute::Output,
Some(SlotValue::Buffer(Rc::new(camera_buf))),
);
desc desc
} }
fn prepare(&mut self, _world: &mut lyra_ecs::World, _context: &mut RenderGraphContext) { fn prepare(&mut self, world: &mut lyra_ecs::World, context: &mut RenderGraphContext) {
if let Some(camera) = world.view_iter::<&mut CameraComponent>().next() {
let mut render_cam =
RenderCamera::new(PhysicalSize::new(self.screen_size.x, self.screen_size.y));
let uniform = render_cam.calc_view_projection(&camera);
context.queue_buffer_write_with("camera_buffer", 0, uniform)
} else {
warn!("Missing camera!");
}
} }
fn execute(&mut self, graph: &mut crate::render::graph::RenderGraph, _desc: &crate::render::graph::RenderGraphPassDesc, _context: &mut crate::render::graph::RenderGraphContext) { fn execute(
let tv_slot = graph.slot_value_mut(self.main_rt_id) &mut self,
graph: &mut crate::render::graph::RenderGraph,
_desc: &crate::render::graph::RenderGraphPassDesc,
context: &mut crate::render::graph::RenderGraphContext,
) {
let tv_slot = graph
.slot_value_mut(self.main_rt_id)
.expect("somehow the main render target slot is missing"); .expect("somehow the main render target slot is missing");
let mut rt = tv_slot.as_render_target_mut().unwrap(); let mut rt = tv_slot.as_render_target_mut().unwrap();
debug_assert!(!rt.current_texture.is_some(), "main render target surface was not presented!"); debug_assert!(
!rt.current_texture.is_some(),
"main render target surface was not presented!"
);
// update the screen size buffer if the size changed.
if rt.surface_config.width != self.screen_size.x
|| rt.surface_config.height != self.screen_size.y
{
self.screen_size = UVec2::new(rt.surface_config.width, rt.surface_config.height);
context.queue_buffer_write_with("screen_size_buffer", 0, self.screen_size)
}
let surface_tex = rt.surface.get_current_texture().unwrap(); let surface_tex = rt.surface.get_current_texture().unwrap();
let view = surface_tex.texture.create_view(&wgpu::TextureViewDescriptor::default()); let view = surface_tex
.texture
.create_view(&wgpu::TextureViewDescriptor::default());
rt.current_texture = Some(surface_tex); rt.current_texture = Some(surface_tex);
drop(rt); // must be manually dropped for borrow checker when getting texture view slot drop(rt); // must be manually dropped for borrow checker when getting texture view slot
// store the surface texture to the slot // store the surface texture to the slot
let tv_slot = graph.slot_value_mut(self.window_tv_id) let tv_slot = graph
.slot_value_mut(self.window_tv_id)
.expect("somehow the window texture view slot is missing"); .expect("somehow the window texture view slot is missing");
*tv_slot = SlotValue::TextureView(Rc::new(view)); *tv_slot = SlotValue::TextureView(Rc::new(view));
} }
} }

View File

@ -0,0 +1,67 @@
use crate::render::{
graph::{
RenderGraphContext, RenderGraphPass, RenderGraphPassDesc, RenderPassType, SlotAttribute,
SlotValue,
},
light::LightUniformBuffers,
};
/// Supplies some basic things other passes needs.
///
/// screen size buffer, camera buffer,
#[derive(Default)]
pub struct LightBasePass {
light_buffers: Option<LightUniformBuffers>,
}
impl LightBasePass {
pub fn new() -> Self {
Self::default()
}
}
impl RenderGraphPass for LightBasePass {
fn desc(
&mut self,
graph: &mut crate::render::graph::RenderGraph,
) -> crate::render::graph::RenderGraphPassDesc {
let device = &graph.device;
self.light_buffers = Some(LightUniformBuffers::new(device));
let light_buffers = self.light_buffers.as_ref().unwrap();
let mut desc = RenderGraphPassDesc::new(
graph.next_id(),
"light_base",
RenderPassType::Node,
None,
vec![(
"light_buffers",
light_buffers.bind_group.clone(),
Some(light_buffers.bind_group_layout.clone()),
)],
);
desc.add_buffer_slot(
graph.next_id(),
"light_buffers",
SlotAttribute::Output,
Some(SlotValue::Buffer(light_buffers.buffer.clone())),
);
desc
}
fn prepare(&mut self, world: &mut lyra_ecs::World, context: &mut RenderGraphContext) {
let tick = world.current_tick();
let lights = self.light_buffers.as_mut().unwrap();
lights.update_lights(context.queue, tick, world);
}
fn execute(
&mut self,
_graph: &mut crate::render::graph::RenderGraph,
_desc: &crate::render::graph::RenderGraphPassDesc,
_context: &mut crate::render::graph::RenderGraphContext,
) {
}
}

View File

@ -1,10 +1,14 @@
use std::mem; use std::{mem, rc::Rc};
use lyra_ecs::World; use lyra_ecs::World;
use wgpu::util::DeviceExt;
use crate::render::graph::{ use crate::render::{
BufferInitDescriptor, RenderGraphContext, RenderGraphPass, RenderGraphPassDesc, RenderPassType, graph::{
SlotAttribute, SlotDescriptor, TextureDescriptor, TextureViewDescriptor, RenderGraphContext, RenderGraphPass, RenderGraphPassDesc, RenderPassType, SlotAttribute,
SlotValue,
},
resource::{ComputePipelineDescriptor, PipelineDescriptor, Shader},
}; };
pub struct LightCullComputePass { pub struct LightCullComputePass {
@ -21,124 +25,227 @@ impl LightCullComputePass {
impl RenderGraphPass for LightCullComputePass { impl RenderGraphPass for LightCullComputePass {
fn desc( fn desc(
&self, &mut self,
graph: &mut crate::render::graph::RenderGraph, graph: &mut crate::render::graph::RenderGraph,
id: &mut u64,
) -> crate::render::graph::RenderGraphPassDesc { ) -> crate::render::graph::RenderGraphPassDesc {
let mut desc = RenderGraphPassDesc::new(*id, "LightCullCompute", RenderPassType::Compute); let shader = Rc::new(Shader {
*id += 1; label: Some("light_cull_comp_shader".into()),
source: include_str!("../../shaders/light_cull.comp.wgsl").to_string(),
});
desc.add_buffer_slot(*id, "screen_size_buffer", SlotAttribute::Input, None); // get the size of the work group for the grid
*id += 1; let main_rt = graph
desc.add_buffer_slot(*id, "camera_buffer", SlotAttribute::Input, None); .slot_id("main_render_target")
*id += 1; .and_then(|s| graph.slot_value(s))
.and_then(|s| s.as_render_target())
.expect("missing main render target");
self.workgroup_size =
glam::UVec2::new(main_rt.surface_config.width, main_rt.surface_config.height);
// initialize some buffers with empty data
let mut contents = Vec::<u8>::new(); let mut contents = Vec::<u8>::new();
let contents_len = let contents_len =
self.workgroup_size.x * self.workgroup_size.y * 200 * mem::size_of::<u32>() as u32; self.workgroup_size.x * self.workgroup_size.y * mem::size_of::<u32>() as u32;
contents.resize(contents_len as _, 0); contents.resize(contents_len as _, 0);
desc.add_buffer_slot(
*id, let device = graph.device();
"light_indices", let light_indices_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
SlotAttribute::Output, label: Some("light_indices_buffer"),
Some(SlotDescriptor::BufferInit(BufferInitDescriptor { contents: &contents,
label: Some("B_LightIndices".to_string()), usage: wgpu::BufferUsages::STORAGE | wgpu::BufferUsages::COPY_DST,
contents, });
let light_index_counter_buffer =
device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("light_index_counter_buffer"),
contents: &bytemuck::cast_slice(&[0]),
usage: wgpu::BufferUsages::STORAGE | wgpu::BufferUsages::COPY_DST, usage: wgpu::BufferUsages::STORAGE | wgpu::BufferUsages::COPY_DST,
})), });
);
*id += 1; let light_indices_bg_layout = Rc::new(device.create_bind_group_layout(
&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::COMPUTE | wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Storage { read_only: false },
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStages::COMPUTE | wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::StorageTexture {
access: wgpu::StorageTextureAccess::ReadWrite,
format: wgpu::TextureFormat::Rg32Uint, // vec2<uint>
view_dimension: wgpu::TextureViewDimension::D2,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 2,
visibility: wgpu::ShaderStages::COMPUTE,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Storage { read_only: false },
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
},
],
label: Some("light_indices_grid_bgl"),
},
));
let size = wgpu::Extent3d { let size = wgpu::Extent3d {
width: self.workgroup_size.x, width: self.workgroup_size.x,
height: self.workgroup_size.y, height: self.workgroup_size.y,
depth_or_array_layers: 1, depth_or_array_layers: 1,
}; };
desc.add_texture_slot( let grid_texture = device.create_texture(&wgpu::TextureDescriptor {
*id, label: Some("light_grid_tex"),
"lightgrid_texture", size,
SlotAttribute::Output, mip_level_count: 1,
Some(SlotDescriptor::Texture(TextureDescriptor { sample_count: 1,
size, dimension: wgpu::TextureDimension::D2,
mip_level_count: 1, format: wgpu::TextureFormat::Rg32Uint, // vec2<uint>
sample_count: 1, usage: wgpu::TextureUsages::STORAGE_BINDING,
dimension: wgpu::TextureDimension::D2, view_formats: &[],
format: wgpu::TextureFormat::Rg32Uint, // vec2<uint> });
usage: wgpu::TextureUsages::STORAGE_BINDING,
view_formats: vec![], let grid_texture_view = grid_texture.create_view(&wgpu::TextureViewDescriptor {
label: Some("light_grid_texview"),
format: Some(wgpu::TextureFormat::Rg32Uint), // vec2<uint>
dimension: Some(wgpu::TextureViewDimension::D2),
aspect: wgpu::TextureAspect::All,
base_mip_level: 0,
mip_level_count: None,
base_array_layer: 0,
array_layer_count: None,
});
let light_indices_bg = Rc::new(device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &light_indices_bg_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
buffer: &light_indices_buffer,
offset: 0,
size: None, // the entire light buffer is needed
}),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::TextureView(&grid_texture_view),
},
wgpu::BindGroupEntry {
binding: 2,
resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
buffer: &light_index_counter_buffer,
offset: 0,
size: None, // the entire light buffer is needed
}),
},
],
label: Some("light_indices_grid_bind_group"),
}));
drop(main_rt);
let pass_id = graph.next_id();
let depth_tex_bgl = graph.bind_group_layout(graph.bind_group_id("depth_texture").unwrap());
let camera_bgl = graph.bind_group_layout(graph.bind_group_id("camera").unwrap());
let lights_bgl = graph.bind_group_layout(graph.bind_group_id("light_buffers").unwrap());
let screen_size_bgl = graph.bind_group_layout(graph.bind_group_id("screen_size").unwrap());
let mut desc = RenderGraphPassDesc::new(
pass_id,
"light_cull_compute",
RenderPassType::Compute,
Some(PipelineDescriptor::Compute(ComputePipelineDescriptor {
label: Some("light_cull_pipeline".into()),
push_constant_ranges: vec![],
layouts: vec![
depth_tex_bgl.clone(),
camera_bgl.clone(),
lights_bgl.clone(),
light_indices_bg_layout.clone(),
screen_size_bgl.clone(),
],
shader,
shader_entry_point: "cs_main".into(),
})), })),
vec![(
"light_indices_grid",
light_indices_bg,
Some(light_indices_bg_layout),
)],
); );
*id += 1;
desc.add_texture_view_slot( desc.add_texture_view_slot(
*id, graph.next_id(),
"lightgrid_texture_view", "window_texture_view",
SlotAttribute::Output, SlotAttribute::Input,
Some(SlotDescriptor::TextureView(TextureViewDescriptor { None,
texture_label: "lightgrid_texture".to_string(), );
format: Some(wgpu::TextureFormat::Rg32Uint), // vec2<uint> desc.add_buffer_slot(
dimension: Some(wgpu::TextureViewDimension::D2), graph.next_id(),
aspect: wgpu::TextureAspect::All, "screen_size_buffer",
base_mip_level: 0, SlotAttribute::Input,
mip_level_count: None, None,
base_array_layer: 0, );
array_layer_count: None, desc.add_buffer_slot(graph.next_id(), "camera_buffer", SlotAttribute::Input, None);
})), desc.add_buffer_slot(
graph.next_id(),
"index_counter_buffer",
SlotAttribute::Output,
Some(SlotValue::Buffer(Rc::new(light_index_counter_buffer))),
); );
*id += 1;
desc desc
} }
fn prepare(&mut self, world: &mut World) { fn prepare(&mut self, _world: &mut World, _context: &mut RenderGraphContext) {}
let _ = world;
todo!()
}
fn execute( fn execute(
&mut self, &mut self,
graph: &mut crate::render::graph::RenderGraph, graph: &mut crate::render::graph::RenderGraph,
_: &crate::render::graph::RenderGraphPassDesc, desc: &crate::render::graph::RenderGraphPassDesc,
context: &mut RenderGraphContext, context: &mut RenderGraphContext,
) { ) {
let mut pass = context.begin_compute_pass(&wgpu::ComputePassDescriptor { let mut pass = context.begin_compute_pass(&wgpu::ComputePassDescriptor {
label: Some("Pass_lightCull"), label: Some("light_cull_pass"),
}); });
let pipeline = graph.compute_pipeline("main"); let pipeline = graph.pipeline(desc.id);
pass.set_pipeline(pipeline); pass.set_pipeline(pipeline.as_compute());
let depth_tex = graph.slot_bind_group( /* let depth_tex_bg = graph.bind_group(graph.bind_group_id("depth_texture").unwrap());
graph let camera_bg = graph.bind_group(graph.bind_group_id("camera").unwrap());
.slot_id("depth_texture") let lights_bg = graph.bind_group(graph.bind_group_id("light_buffers").unwrap());
.expect("Could not find depth texture slot"), let grid_bg = graph.bind_group(graph.bind_group_id("light_indices_grid").unwrap());
); let screen_size_bg = graph.bind_group(graph.bind_group_id("screen_size").unwrap());
let camera_bg = graph.slot_bind_group(
graph
.slot_id("camera_buffer")
.expect("Could not find camera buffers"),
);
let screen_size_bg = graph.slot_bind_group(
graph
.slot_id("screen_size_buffer")
.expect("Could not find screen size buffer slot"),
);
let indices_bg = graph.slot_bind_group(
graph
.slot_id("light_indices")
.expect("Could not find light index buffer slot"),
);
let light_grid_bg = graph.slot_bind_group(
graph
.slot_id("grid_texture")
.expect("Could not find light grid buffer slot"),
);
pass.set_bind_group(0, depth_tex, &[]); pass.set_bind_group(0, depth_tex_bg, &[]);
pass.set_bind_group(1, camera_bg, &[]); pass.set_bind_group(1, camera_bg, &[]);
pass.set_bind_group(2, indices_bg, &[]); pass.set_bind_group(2, lights_bg, &[]);
pass.set_bind_group(3, light_grid_bg, &[]); pass.set_bind_group(3, grid_bg, &[]);
pass.set_bind_group(4, screen_size_bg, &[]); pass.set_bind_group(4, screen_size_bg, &[]); */
graph.set_bind_groups(
&mut pass,
&[
("depth_texture", 0),
("camera", 1),
("light_buffers", 2),
("light_indices_grid", 3),
("screen_size", 4),
],
);
pass.dispatch_workgroups(self.workgroup_size.x, self.workgroup_size.y, 1); pass.dispatch_workgroups(self.workgroup_size.x, self.workgroup_size.y, 1);
} }

View File

@ -0,0 +1,560 @@
use std::{collections::{HashSet, VecDeque}, rc::Rc};
use glam::Vec3;
use itertools::izip;
use lyra_ecs::{query::{filter::{Has, Not, Or}, Entities, Res, TickOf}, relation::{ChildOf, RelationOriginComponent}, Component, Entity};
use lyra_math::Transform;
use lyra_resource::{gltf::Mesh, ResHandle};
use lyra_scene::{SceneGraph, WorldTransform};
use rustc_hash::FxHashMap;
use tracing::{debug, instrument, warn};
use uuid::Uuid;
use wgpu::util::DeviceExt;
use crate::{
render::{
desc_buf_lay::DescVertexBufferLayout, graph::{
RenderGraphContext, RenderGraphPass, RenderGraphPassDesc,
RenderPassType,
}, material::{Material, MaterialUniform}, render_buffer::{BufferStorage, BufferWrapper}, render_job::RenderJob, resource::{FragmentState, PipelineDescriptor, RenderPipelineDescriptor, Shader, VertexState}, texture::RenderTexture, transform_buffer_storage::{TransformBuffers, TransformGroup}, vertex::Vertex
},
DeltaTime,
};
type MeshHandle = ResHandle<Mesh>;
type SceneHandle = ResHandle<SceneGraph>;
struct MeshBufferStorage {
buffer_vertex: BufferStorage,
buffer_indices: Option<(wgpu::IndexFormat, BufferStorage)>,
//#[allow(dead_code)]
//render_texture: Option<RenderTexture>,
material: Option<Rc<Material>>,
// The index of the transform for this entity.
// The tuple is structured like this: (transform index, index of transform inside the buffer)
//transform_index: TransformBufferIndices,
}
#[derive(Clone, Debug, Component)]
struct InterpTransform {
last_transform: Transform,
alpha: f32,
}
#[derive(Default)]
pub struct MeshPass {
transforms: Option<TransformBuffers>,
mesh_buffers: FxHashMap<uuid::Uuid, MeshBufferStorage>,
render_jobs: VecDeque<RenderJob>,
texture_bind_group_layout: Option<Rc<wgpu::BindGroupLayout>>,
material_buffer: Option<wgpu::Buffer>,
material_buffers: FxHashMap<uuid::Uuid, Rc<Material>>,
entity_meshes: FxHashMap<Entity, uuid::Uuid>,
default_texture: Option<RenderTexture>,
}
impl MeshPass {
pub fn new() -> Self {
Self::default()
}
/// Checks if the mesh buffers in the GPU need to be updated.
#[instrument(skip(self, device, queue, mesh_han))]
fn check_mesh_buffers(&mut self, device: &wgpu::Device, queue: &wgpu::Queue, mesh_han: &ResHandle<Mesh>) {
let mesh_uuid = mesh_han.uuid();
if let (Some(mesh), Some(buffers)) = (mesh_han.data_ref(), self.mesh_buffers.get_mut(&mesh_uuid)) {
// check if the buffer sizes dont match. If they dont, completely remake the buffers
let vertices = mesh.position().unwrap();
if buffers.buffer_vertex.count() != vertices.len() {
debug!("Recreating buffers for mesh {}", mesh_uuid.to_string());
let (vert, idx) = self.create_vertex_index_buffers(device, &mesh);
// have to re-get buffers because of borrow checker
let buffers = self.mesh_buffers.get_mut(&mesh_uuid).unwrap();
buffers.buffer_indices = idx;
buffers.buffer_vertex = vert;
return;
}
// update vertices
let vertex_buffer = buffers.buffer_vertex.buffer();
let vertices = vertices.as_slice();
// align the vertices to 4 bytes (u32 is 4 bytes, which is wgpu::COPY_BUFFER_ALIGNMENT)
let (_, vertices, _) = bytemuck::pod_align_to::<Vec3, u32>(vertices);
queue.write_buffer(vertex_buffer, 0, bytemuck::cast_slice(vertices));
// update the indices if they're given
if let Some(index_buffer) = buffers.buffer_indices.as_ref() {
let aligned_indices = match mesh.indices.as_ref().unwrap() {
// U16 indices need to be aligned to u32, for wpgu, which are 4-bytes in size.
lyra_resource::gltf::MeshIndices::U16(v) => bytemuck::pod_align_to::<u16, u32>(v).1,
lyra_resource::gltf::MeshIndices::U32(v) => bytemuck::pod_align_to::<u32, u32>(v).1,
};
let index_buffer = index_buffer.1.buffer();
queue.write_buffer(index_buffer, 0, bytemuck::cast_slice(aligned_indices));
}
}
}
#[instrument(skip(self, device, mesh))]
fn create_vertex_index_buffers(&mut self, device: &wgpu::Device, mesh: &Mesh) -> (BufferStorage, Option<(wgpu::IndexFormat, BufferStorage)>) {
let positions = mesh.position().unwrap();
let tex_coords: Vec<glam::Vec2> = mesh.tex_coords().cloned()
.unwrap_or_else(|| vec![glam::Vec2::new(0.0, 0.0); positions.len()]);
let normals = mesh.normals().unwrap();
assert!(positions.len() == tex_coords.len() && positions.len() == normals.len());
let mut vertex_inputs = vec![];
for (v, t, n) in izip!(positions.iter(), tex_coords.iter(), normals.iter()) {
vertex_inputs.push(Vertex::new(*v, *t, *n));
}
let vertex_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Vertex Buffer"),
contents: bytemuck::cast_slice(vertex_inputs.as_slice()),//vertex_combined.as_slice(),
usage: wgpu::BufferUsages::VERTEX | wgpu::BufferUsages:: COPY_DST,
}
);
let vertex_buffer = BufferStorage::new(vertex_buffer, 0, vertex_inputs.len());
let indices = match mesh.indices.as_ref() {
Some(indices) => {
let (idx_type, len, contents) = match indices {
lyra_resource::gltf::MeshIndices::U16(v) => (wgpu::IndexFormat::Uint16, v.len(), bytemuck::cast_slice(v)),
lyra_resource::gltf::MeshIndices::U32(v) => (wgpu::IndexFormat::Uint32, v.len(), bytemuck::cast_slice(v)),
};
let index_buffer = device.create_buffer_init(
&wgpu::util::BufferInitDescriptor {
label: Some("Index Buffer"),
contents,
usage: wgpu::BufferUsages::INDEX | wgpu::BufferUsages:: COPY_DST,
}
);
let buffer_indices = BufferStorage::new(index_buffer, 0, len);
Some((idx_type, buffer_indices))
},
None => {
None
}
};
( vertex_buffer, indices )
}
#[instrument(skip(self, device, queue, mesh))]
fn create_mesh_buffers(&mut self, device: &wgpu::Device, queue: &wgpu::Queue, mesh: &Mesh) -> MeshBufferStorage {
let (vertex_buffer, buffer_indices) = self.create_vertex_index_buffers(device, mesh);
let material = mesh.material.as_ref()
.expect("Material resource not loaded yet");
let material_ref = material.data_ref()
.unwrap();
let material = self.material_buffers.entry(material.uuid())
.or_insert_with(|| {
debug!(uuid=material.uuid().to_string(), "Sending material to gpu");
Rc::new(Material::from_resource(&device, &queue, self.texture_bind_group_layout.clone().unwrap(), &material_ref))
});
// TODO: support material uniforms from multiple uniforms
let uni = MaterialUniform::from(&**material);
queue.write_buffer(&self.material_buffer.as_ref().unwrap(), 0, bytemuck::bytes_of(&uni));
MeshBufferStorage {
buffer_vertex: vertex_buffer,
buffer_indices,
material: Some(material.clone()),
}
}
/// Processes the mesh for the renderer, storing and creating buffers as needed. Returns true if a new mesh was processed.
#[instrument(skip(self, device, queue, transform, mesh, entity))]
fn process_mesh(&mut self, device: &wgpu::Device, queue: &wgpu::Queue, entity: Entity, transform: Transform, mesh: &Mesh, mesh_uuid: Uuid) -> bool {
let _ = transform;
/* if self.transform_buffers.should_expand() {
self.transform_buffers.expand_buffers(&self.device);
}
self.transform_buffers.update_or_insert(&self.queue, &self.render_limits,
entity, || ( transform.calculate_mat4(), glam::Mat3::from_quat(transform.rotation) )); */
#[allow(clippy::map_entry)]
if !self.mesh_buffers.contains_key(&mesh_uuid) {
// create the mesh's buffers
let buffers = self.create_mesh_buffers(device, queue, mesh);
self.mesh_buffers.insert(mesh_uuid, buffers);
self.entity_meshes.insert(entity, mesh_uuid);
true
} else { false }
}
}
impl RenderGraphPass for MeshPass {
fn desc(
&mut self,
graph: &mut crate::render::graph::RenderGraph,
) -> crate::render::graph::RenderGraphPassDesc {
let device = graph.device();
let transforms = TransformBuffers::new(device);
let transform_bgl = transforms.bindgroup_layout.clone();
self.transforms = Some(transforms);
let texture_bind_group_layout = Rc::new(RenderTexture::create_layout(&device));
self.texture_bind_group_layout = Some(texture_bind_group_layout.clone());
let (material_bgl, material_bg, material_buf, _) = BufferWrapper::builder()
.label_prefix("material")
.visibility(wgpu::ShaderStages::FRAGMENT)
.buffer_usage(wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST)
.contents(&[MaterialUniform::default()])
.finish_parts(device);
let material_bgl = Rc::new(material_bgl);
let material_bg = Rc::new(material_bg);
self.material_buffer = Some(material_buf);
// load the default texture
let bytes = include_bytes!("../../default_texture.png");
self.default_texture = Some(RenderTexture::from_bytes(&device, &graph.queue, texture_bind_group_layout.clone(), bytes, "default_texture").unwrap());
// get surface config format
let main_rt = graph.slot_id("main_render_target")
.and_then(|s| graph.slot_value(s))
.and_then(|s| s.as_render_target())
.expect("missing main render target");
let surface_config_format = main_rt.surface_config.format;
drop(main_rt);
// get the id here to make borrow checker happy
let pass_id = graph.next_id();
let camera_bgl = graph.bind_group_layout(graph.bind_group_id("camera").unwrap());
let lights_bgl = graph.bind_group_layout(graph.bind_group_id("light_buffers").unwrap());
let light_grid_bgl = graph
.bind_group_layout(graph.bind_group_id("light_indices_grid")
.expect("Missing light grid bind group"));
let shader = Rc::new(Shader {
label: Some("base_shader".into()),
source: include_str!("../../shaders/base.wgsl").to_string(),
});
let desc = RenderGraphPassDesc::new(
pass_id,
"meshes",
RenderPassType::Render,
Some(PipelineDescriptor::Render(RenderPipelineDescriptor {
label: Some("meshes".into()),
layouts: vec![
texture_bind_group_layout.clone(),
transform_bgl,
camera_bgl.clone(),
lights_bgl.clone(),
material_bgl.clone(),
texture_bind_group_layout,
light_grid_bgl.clone(),
],
push_constant_ranges: vec![],
vertex: VertexState {
module: shader.clone(),
entry_point: "vs_main".into(),
buffers: vec![
Vertex::desc().into(),
],
},
fragment: Some(FragmentState {
module: shader,
entry_point: "fs_main".into(),
targets: vec![Some(wgpu::ColorTargetState {
format: surface_config_format,
blend: Some(wgpu::BlendState::REPLACE),
write_mask: wgpu::ColorWrites::ALL,
})],
}),
depth_stencil: Some(wgpu::DepthStencilState {
format: RenderTexture::DEPTH_FORMAT,
depth_write_enabled: true,
depth_compare: wgpu::CompareFunction::Less,
stencil: wgpu::StencilState::default(), // TODO: stencil buffer
bias: wgpu::DepthBiasState::default(),
}),
primitive: wgpu::PrimitiveState::default(),
multisample: wgpu::MultisampleState::default(),
multiview: None,
})),
vec![
("material", material_bg, Some(material_bgl)),
],
);
desc
}
#[instrument(skip(self, world, context))]
fn prepare(&mut self, world: &mut lyra_ecs::World, context: &mut RenderGraphContext) {
let device = context.device;
let queue = context.queue;
let render_limits = device.limits();
let last_epoch = world.current_tick();
let mut alive_entities = HashSet::new();
let view = world.view_iter::<(
Entities,
&Transform,
TickOf<Transform>,
Or<
(&MeshHandle, TickOf<MeshHandle>),
(&SceneHandle, TickOf<SceneHandle>)
>,
Option<&mut InterpTransform>,
Res<DeltaTime>,
)>();
// used to store InterpTransform components to add to entities later
let mut component_queue: Vec<(Entity, InterpTransform)> = vec![];
for (
entity,
transform,
_transform_epoch,
(
mesh_pair,
scene_pair
),
interp_tran,
delta_time,
) in view
{
alive_entities.insert(entity);
let interp_transform = match interp_tran {
Some(mut interp_transform) => {
// found in https://youtu.be/YJB1QnEmlTs?t=472
interp_transform.alpha = 1.0 - interp_transform.alpha.powf(**delta_time);
interp_transform.last_transform = interp_transform.last_transform.lerp(*transform, interp_transform.alpha);
interp_transform.last_transform
},
None => {
let interp = InterpTransform {
last_transform: *transform,
alpha: 0.5,
};
component_queue.push((entity, interp));
*transform
}
};
if let Some((mesh_han, mesh_epoch)) = mesh_pair {
if let Some(mesh) = mesh_han.data_ref() {
// if process mesh did not just create a new mesh, and the epoch
// shows that the scene has changed, verify that the mesh buffers
// dont need to be resent to the gpu.
if !self.process_mesh(device, queue, entity, interp_transform, &*mesh, mesh_han.uuid())
&& mesh_epoch == last_epoch {
self.check_mesh_buffers(device, queue, &mesh_han);
}
let transforms = self.transforms.as_mut().unwrap();
if transforms.needs_expand() {
debug!("Expanding transform buffers");
transforms.expand_buffers(device);
}
let group = TransformGroup::EntityRes(entity, mesh_han.uuid());
let transform_id = transforms.update_or_push(device, queue, &render_limits,
group, interp_transform.calculate_mat4(), glam::Mat3::from_quat(interp_transform.rotation));
let material = mesh.material.as_ref().unwrap()
.data_ref().unwrap();
let shader = material.shader_uuid.unwrap_or(0);
let job = RenderJob::new(entity, shader, mesh_han.uuid(), transform_id);
self.render_jobs.push_back(job);
}
}
if let Some((scene_han, scene_epoch)) = scene_pair {
if let Some(scene) = scene_han.data_ref() {
if scene_epoch == last_epoch {
let view = scene.world().view::<(Entities, &mut WorldTransform, &Transform, Not<Has<RelationOriginComponent<ChildOf>>>)>();
lyra_scene::system_update_world_transforms(scene.world(), view).unwrap();
}
for (mesh_han, pos) in scene.world().view_iter::<(&MeshHandle, &WorldTransform)>() {
if let Some(mesh) = mesh_han.data_ref() {
let mesh_interpo = interp_transform + **pos;
// if process mesh did not just create a new mesh, and the epoch
// shows that the scene has changed, verify that the mesh buffers
// dont need to be resent to the gpu.
if !self.process_mesh(device, queue, entity, mesh_interpo, &*mesh, mesh_han.uuid())
&& scene_epoch == last_epoch {
self.check_mesh_buffers(device, queue, &mesh_han);
}
let transforms = self.transforms.as_mut().unwrap();
if transforms.needs_expand() {
debug!("Expanding transform buffers");
transforms.expand_buffers(device);
}
let scene_mesh_group = TransformGroup::Res(scene_han.uuid(), mesh_han.uuid());
let group = TransformGroup::OwnedGroup(entity, scene_mesh_group.into());
let transform_id = transforms.update_or_push(device, queue, &render_limits,
group, mesh_interpo.calculate_mat4(), glam::Mat3::from_quat(mesh_interpo.rotation) );
let material = mesh.material.as_ref().unwrap()
.data_ref().unwrap();
let shader = material.shader_uuid.unwrap_or(0);
let job = RenderJob::new(entity, shader, mesh_han.uuid(), transform_id);
self.render_jobs.push_back(job);
}
}
}
}
}
for (en, interp) in component_queue {
world.insert(en, interp);
}
let transforms = self.transforms.as_mut().unwrap();
transforms.send_to_gpu(queue);
}
fn execute(
&mut self,
graph: &mut crate::render::graph::RenderGraph,
desc: &crate::render::graph::RenderGraphPassDesc,
context: &mut crate::render::graph::RenderGraphContext,
) {
let encoder = context.encoder.as_mut().unwrap();
let view = graph
.slot_value(graph.slot_id("window_texture_view").unwrap())
.unwrap()
.as_texture_view();
let depth_view = graph
.slot_value(graph.slot_id("depth_texture_view").unwrap())
.unwrap()
.as_texture_view();
let camera_bg = graph
.bind_group(graph.bind_group_id("camera")
.expect("Missing camera bind group"));
let lights_bg = graph
.bind_group(graph.bind_group_id("light_buffers")
.expect("Missing lights bind group"));
let light_grid_bg = graph
.bind_group(graph.bind_group_id("light_indices_grid")
.expect("Missing light grid bind group"));
let material_bg = graph
.bind_group(graph.bind_group_id("material")
.expect("Missing material bind group"));
let mut pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
label: Some("Render Pass"),
color_attachments: &[Some(wgpu::RenderPassColorAttachment {
view: &view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color {
r: 0.1,
g: 0.2,
b: 0.3,
a: 1.0,
}),
store: true,
},
})],
// enable depth buffer
depth_stencil_attachment: Some(wgpu::RenderPassDepthStencilAttachment {
view: &depth_view,
depth_ops: Some(wgpu::Operations {
load: wgpu::LoadOp::Clear(1.0),
store: true,
}),
stencil_ops: None,
}),
});
let pipeline = graph.pipeline(desc.id);
pass.set_pipeline(&pipeline.as_render());
//let material_buffer_bg = self.material_buffer.as_ref().unwrap().bindgroup();
let default_texture = self.default_texture.as_ref().unwrap();
let transforms = self.transforms.as_mut().unwrap();
while let Some(job) = self.render_jobs.pop_front() {
// get the mesh (containing vertices) and the buffers from storage
let buffers = self.mesh_buffers.get(&job.mesh_uuid);
if buffers.is_none() {
warn!("Skipping job since its mesh is missing {:?}", job.mesh_uuid);
continue;
}
let buffers = buffers.unwrap();
// Bind the optional texture
if let Some(tex) = buffers.material.as_ref()
.and_then(|m| m.diffuse_texture.as_ref()) {
pass.set_bind_group(0, tex.bind_group(), &[]);
} else {
pass.set_bind_group(0, default_texture.bind_group(), &[]);
}
if let Some(tex) = buffers.material.as_ref()
.and_then(|m| m.specular.as_ref())
.and_then(|s| s.texture.as_ref().or(s.color_texture.as_ref())) {
pass.set_bind_group(5, tex.bind_group(), &[]);
} else {
pass.set_bind_group(5, default_texture.bind_group(), &[]);
}
// Get the bindgroup for job's transform and bind to it using an offset.
let bindgroup = transforms.bind_group(job.transform_id);
let offset = transforms.buffer_offset(job.transform_id);
pass.set_bind_group(1, bindgroup, &[ offset, ]);
pass.set_bind_group(2, &camera_bg, &[]);
pass.set_bind_group(3, &lights_bg, &[]);
pass.set_bind_group(4, &material_bg, &[]);
pass.set_bind_group(6, &light_grid_bg, &[]);
// if this mesh uses indices, use them to draw the mesh
if let Some((idx_type, indices)) = buffers.buffer_indices.as_ref() {
let indices_len = indices.count() as u32;
pass.set_vertex_buffer(buffers.buffer_vertex.slot(), buffers.buffer_vertex.buffer().slice(..));
pass.set_index_buffer(indices.buffer().slice(..), *idx_type);
pass.draw_indexed(0..indices_len, 0, 0..1);
} else {
let vertex_count = buffers.buffer_vertex.count();
pass.set_vertex_buffer(buffers.buffer_vertex.slot(), buffers.buffer_vertex.buffer().slice(..));
pass.draw(0..vertex_count as u32, 0..1);
}
}
}
}

View File

@ -1,9 +1,7 @@
/* mod light_cull_compute; mod light_cull_compute;
pub use light_cull_compute::*; pub use light_cull_compute::*;
/*mod depth_prepass;
mod depth_prepass;
pub use depth_prepass::*; */ pub use depth_prepass::*; */
/* mod simple_phong; /* mod simple_phong;
@ -12,6 +10,12 @@ pub use simple_phong::*; */
mod base; mod base;
pub use base::*; pub use base::*;
mod meshes;
pub use meshes::*;
mod light_base;
pub use light_base::*;
mod present_pass; mod present_pass;
pub use present_pass::*; pub use present_pass::*;

View File

@ -7,7 +7,7 @@ use crate::{
SlotAttribute, SlotValue, SlotAttribute, SlotValue,
}, },
render_buffer::BufferWrapper, render_buffer::BufferWrapper,
resource::{FragmentState, RenderPipelineDescriptor, Shader, VertexState}, resource::{FragmentState, PipelineDescriptor, RenderPipelineDescriptor, Shader, VertexState},
}, },
DeltaTime, DeltaTime,
}; };
@ -53,9 +53,9 @@ impl RenderGraphPass for TrianglePass {
let mut desc = RenderGraphPassDesc::new( let mut desc = RenderGraphPassDesc::new(
graph.next_id(), graph.next_id(),
"TrianglePass", "triangle",
RenderPassType::Render, RenderPassType::Render,
Some(RenderPipelineDescriptor { Some(PipelineDescriptor::Render(RenderPipelineDescriptor {
label: Some("triangle_pipeline".into()), label: Some("triangle_pipeline".into()),
layouts: vec![color_bgl.clone()], layouts: vec![color_bgl.clone()],
push_constant_ranges: vec![], push_constant_ranges: vec![],
@ -77,7 +77,7 @@ impl RenderGraphPass for TrianglePass {
primitive: wgpu::PrimitiveState::default(), primitive: wgpu::PrimitiveState::default(),
multisample: wgpu::MultisampleState::default(), multisample: wgpu::MultisampleState::default(),
multiview: None, multiview: None,
}), })),
vec![("color_bg", color_bg, Some(color_bgl))], vec![("color_bg", color_bg, Some(color_bgl))],
); );
@ -125,7 +125,7 @@ impl RenderGraphPass for TrianglePass {
let encoder = context.encoder.as_mut().unwrap(); let encoder = context.encoder.as_mut().unwrap();
let mut pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor { let mut pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
label: Some("TrianglePass"), label: Some("triangle_pass"),
color_attachments: &[ color_attachments: &[
// This is what @location(0) in the fragment shader targets // This is what @location(0) in the fragment shader targets
Some(wgpu::RenderPassColorAttachment { Some(wgpu::RenderPassColorAttachment {

View File

@ -6,7 +6,7 @@ use lyra_ecs::{Entity, Tick, World, query::{Entities, TickOf}};
pub use point::*; pub use point::*;
pub use spotlight::*; pub use spotlight::*;
use std::{collections::{HashMap, VecDeque}, marker::PhantomData, mem}; use std::{collections::{HashMap, VecDeque}, marker::PhantomData, mem, rc::Rc};
use crate::math::Transform; use crate::math::Transform;
@ -100,8 +100,10 @@ impl<U: Default + bytemuck::Pod + bytemuck::Zeroable> LightBuffer<U> {
} }
pub(crate) struct LightUniformBuffers { pub(crate) struct LightUniformBuffers {
pub buffer: wgpu::Buffer, pub buffer: Rc<wgpu::Buffer>,
pub bind_group_pair: BindGroupPair, //pub bind_group_pair: BindGroupPair,
pub bind_group: Rc<wgpu::BindGroup>,
pub bind_group_layout: Rc<wgpu::BindGroupLayout>,
pub light_indexes: HashMap<Entity, u32>, pub light_indexes: HashMap<Entity, u32>,
dead_indices: VecDeque<u32>, dead_indices: VecDeque<u32>,
pub current_light_idx: u32, pub current_light_idx: u32,
@ -158,8 +160,9 @@ impl LightUniformBuffers {
}); });
Self { Self {
buffer, buffer: Rc::new(buffer),
bind_group_pair: BindGroupPair::new(bindgroup, bindgroup_layout), bind_group: Rc::new(bindgroup),
bind_group_layout: Rc::new(bindgroup_layout),
light_indexes: Default::default(), light_indexes: Default::default(),
current_light_idx: 0, current_light_idx: 0,
dead_indices: VecDeque::new(), dead_indices: VecDeque::new(),

View File

@ -12,6 +12,6 @@ pub mod camera;
pub mod window; pub mod window;
pub mod transform_buffer_storage; pub mod transform_buffer_storage;
pub mod light; pub mod light;
pub mod light_cull_compute; //pub mod light_cull_compute;
pub mod avec; pub mod avec;
pub mod graph; pub mod graph;

View File

@ -1,35 +1,16 @@
use std::cell::RefCell;
use std::collections::VecDeque; use std::collections::VecDeque;
use std::ops::{Deref, DerefMut}; use std::ops::{Deref, DerefMut};
use std::rc::Rc; use std::rc::Rc;
use std::sync::Arc; use std::sync::Arc;
use std::borrow::Cow;
use lyra_ecs::Component;
use lyra_ecs::World; use lyra_ecs::World;
use lyra_scene::SceneGraph;
use tracing::{debug, instrument, warn}; use tracing::{debug, instrument, warn};
use wgpu::Limits;
use winit::window::Window; use winit::window::Window;
use crate::math::Transform; use crate::render::graph::{BasePass, LightBasePass, LightCullComputePass, MeshPass, PresentPass};
use crate::render::graph::{BasePass, PresentPass, TrianglePass};
use crate::render::material::MaterialUniform;
use crate::render::render_buffer::BufferWrapperBuilder;
use super::camera::CameraUniform;
use super::graph::RenderGraph; use super::graph::RenderGraph;
use super::light::LightUniformBuffers; use super::{resource::RenderPipeline, render_job::RenderJob};
use super::material::Material;
use super::render_buffer::BufferWrapper;
use super::texture::RenderTexture;
use super::transform_buffer_storage::TransformBuffers;
use super::{resource::RenderPipeline, render_buffer::BufferStorage, render_job::RenderJob};
use lyra_resource::{gltf::Mesh, ResHandle};
type MeshHandle = ResHandle<Mesh>;
type SceneHandle = ResHandle<SceneGraph>;
#[derive(Clone, Copy, Debug)] #[derive(Clone, Copy, Debug)]
pub struct ScreenSize(glam::UVec2); pub struct ScreenSize(glam::UVec2);
@ -63,25 +44,6 @@ pub trait RenderPass {
fn on_resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>); fn on_resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>);
} }
struct MeshBufferStorage {
buffer_vertex: BufferStorage,
buffer_indices: Option<(wgpu::IndexFormat, BufferStorage)>,
//#[allow(dead_code)]
//render_texture: Option<RenderTexture>,
material: Option<Rc<Material>>,
// The index of the transform for this entity.
// The tuple is structured like this: (transform index, index of transform inside the buffer)
//transform_index: TransformBufferIndices,
}
#[derive(Clone, Debug, Component)]
pub struct InterpTransform {
last_transform: Transform,
alpha: f32,
}
pub struct BasicRenderer { pub struct BasicRenderer {
pub device: Rc<wgpu::Device>, // device does not need to be mutable, no need for refcell pub device: Rc<wgpu::Device>, // device does not need to be mutable, no need for refcell
pub queue: Rc<wgpu::Queue>, pub queue: Rc<wgpu::Queue>,
@ -93,24 +55,6 @@ pub struct BasicRenderer {
pub render_pipelines: rustc_hash::FxHashMap<u64, Arc<RenderPipeline>>, pub render_pipelines: rustc_hash::FxHashMap<u64, Arc<RenderPipeline>>,
pub render_jobs: VecDeque<RenderJob>, pub render_jobs: VecDeque<RenderJob>,
//mesh_buffers: rustc_hash::FxHashMap<uuid::Uuid, MeshBufferStorage>, // TODO: clean up left over buffers from deleted entities/components
//material_buffers: rustc_hash::FxHashMap<uuid::Uuid, Rc<Material>>,
//entity_meshes: rustc_hash::FxHashMap<Entity, uuid::Uuid>,
//transform_buffers: TransformBuffers,
render_limits: Limits,
//inuse_camera: RenderCamera,
//camera_buffer: BufferWrapper,
//bgl_texture: Rc<BindGroupLayout>,
//default_texture: RenderTexture,
//depth_buffer_texture: RenderTexture,
//material_buffer: BufferWrapper,
//light_buffers: LightUniformBuffers,
//light_cull_compute: LightCullCompute,
graph: RenderGraph, graph: RenderGraph,
} }
@ -156,11 +100,8 @@ impl BasicRenderer {
None, None,
).await.unwrap(); ).await.unwrap();
let render_limits = device.limits();
let surface_caps = surface.get_capabilities(&adapter); let surface_caps = surface.get_capabilities(&adapter);
let present_mode = surface_caps.present_modes[0]; let present_mode = surface_caps.present_modes[0];
debug!("present mode: {:?}", present_mode); debug!("present mode: {:?}", present_mode);
let surface_format = surface_caps.formats.iter() let surface_format = surface_caps.formats.iter()
@ -172,61 +113,41 @@ impl BasicRenderer {
format: surface_format, format: surface_format,
width: size.width, width: size.width,
height: size.height, height: size.height,
present_mode: wgpu::PresentMode::Immediate, present_mode: wgpu::PresentMode::default(), //wgpu::PresentMode::Mailbox, // "Fast Vsync"
alpha_mode: surface_caps.alpha_modes[0], alpha_mode: surface_caps.alpha_modes[0],
view_formats: vec![], view_formats: vec![],
}; };
surface.configure(&device, &config); surface.configure(&device, &config);
let bgl_texture = Rc::new(RenderTexture::create_layout(&device));
let shader_src = include_str!("shaders/base.wgsl");
let shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
label: Some("Shader"),
source: wgpu::ShaderSource::Wgsl(Cow::Borrowed(shader_src)),
});
let transform_buffers = TransformBuffers::new(&device);
let camera_buffer = BufferWrapper::builder()
.buffer_usage(wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST)
.contents(&[CameraUniform::default()])
.label_prefix("Camera")
.visibility(wgpu::ShaderStages::VERTEX | wgpu::ShaderStages::FRAGMENT | wgpu::ShaderStages::COMPUTE)
.buffer_dynamic_offset(false)
.finish(&device);
let mut depth_texture = RenderTexture::create_depth_texture(&device, &config, "Tex_Depth");
// load the default texture
let bytes = include_bytes!("default_texture.png");
let default_texture = RenderTexture::from_bytes(&device, &queue, bgl_texture.clone(), bytes, "default_texture").unwrap();
let light_uniform_buffers = LightUniformBuffers::new(&device);
let mat_buffer = BufferWrapperBuilder::new()
.buffer_usage(wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST)
.visibility(wgpu::ShaderStages::FRAGMENT)
.contents(&[MaterialUniform::default()])
.finish(&device);
let device = Rc::new(device); let device = Rc::new(device);
let queue = Rc::new(queue); let queue = Rc::new(queue);
//let light_cull_compute = LightCullCompute::new(device.clone(), queue.clone(), size, &light_uniform_buffers, &camera_buffer, &mut depth_texture);
let mut g = RenderGraph::new(device.clone(), queue.clone()); let mut g = RenderGraph::new(device.clone(), queue.clone());
/* debug!("Adding base pass");
g.add_pass(TrianglePass::new());
debug!("Adding depth pre-pass");
g.add_pass(DepthPrePass::new());
debug!("Adding light cull compute pass");
g.add_pass(LightCullComputePass::new(size)); */
debug!("Adding base pass"); debug!("Adding base pass");
g.add_pass(BasePass::new(surface, config)); g.add_pass(BasePass::new(surface, config));
debug!("Adding triangle pass"); debug!("Adding light base pass");
g.add_pass(TrianglePass::new()); g.add_pass(LightBasePass::new());
debug!("Adding light cull compute pass");
g.add_pass(LightCullComputePass::new(size));
//debug!("Adding triangle pass");
//g.add_pass(TrianglePass::new());
debug!("Adding mesh pass");
g.add_pass(MeshPass::new());
debug!("Adding present pass"); debug!("Adding present pass");
g.add_pass(PresentPass::new("main_render_target")); g.add_pass(PresentPass::new("main_render_target"));
g.add_edge("base", "light_base");
g.add_edge("light_base", "light_cull_compute");
g.add_edge("base", "meshes");
// make sure that present runs last
g.add_edge("base", "present_main_render_target");
g.add_edge("light_cull_compute", "present_main_render_target");
g.add_edge("meshes", "present_main_render_target");
g.setup(&device); g.setup(&device);
Self { Self {
@ -243,7 +164,6 @@ impl BasicRenderer {
render_pipelines: Default::default(), render_pipelines: Default::default(),
render_jobs: Default::default(), render_jobs: Default::default(),
render_limits,
graph: g, graph: g,
} }
} }

View File

@ -1,9 +1,41 @@
use std::ops::Deref; use std::{ops::Deref, rc::Rc};
use wgpu::PipelineLayout; use wgpu::PipelineLayout;
use super::Shader;
//#[derive(Debug, Clone)]
pub struct ComputePipelineDescriptor {
pub label: Option<String>,
pub layouts: Vec<Rc<wgpu::BindGroupLayout>>,
pub push_constant_ranges: Vec<wgpu::PushConstantRange>,
// TODO: make this a ResHandle<Shader>
/// The compiled shader module for the stage.
pub shader: Rc<Shader>,
/// The entry point in the compiled shader.
/// There must be a function in the shader with the same name.
pub shader_entry_point: String,
}
impl ComputePipelineDescriptor {
/// Create the [`wgpu::PipelineLayout`] for this pipeline
pub(crate) fn create_layout(&self, device: &wgpu::Device) -> wgpu::PipelineLayout {
let bgs = self
.layouts
.iter()
.map(|bg| bg.as_ref())
.collect::<Vec<_>>();
device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: None, //self.label.as_ref().map(|s| format!("{}Layout", s)),
bind_group_layouts: &bgs,
push_constant_ranges: &self.push_constant_ranges,
})
}
}
pub struct ComputePipeline { pub struct ComputePipeline {
layout: PipelineLayout, layout: Option<PipelineLayout>,
wgpu_pipeline: wgpu::ComputePipeline, wgpu_pipeline: wgpu::ComputePipeline,
} }
@ -15,8 +47,48 @@ impl Deref for ComputePipeline {
} }
} }
impl From<wgpu::ComputePipeline> for ComputePipeline {
fn from(value: wgpu::ComputePipeline) -> Self {
Self {
layout: None,
wgpu_pipeline: value,
}
}
}
impl ComputePipeline { impl ComputePipeline {
pub fn new(layout: PipelineLayout, pipeline: wgpu::ComputePipeline) -> Self { /// Creates a new compute pipeline on the `device`.
///
/// Parameters:
/// * `device` - The device to create the pipeline on.
/// * `desc` - The discriptor of the compute pipeline
pub fn create(device: &wgpu::Device, desc: &ComputePipelineDescriptor) -> ComputePipeline {
// create the layout only if bind groups layouts were specified
let layout = if !desc.layouts.is_empty() {
Some(desc.create_layout(device))
} else {
None
};
// an Rc was used here so that this shader could be reused by the fragment stage if
// they share the same shader. I tried to do it without an Rc but couldn't get past
// the borrow checker
let compiled_shader = Rc::new(device.create_shader_module(wgpu::ShaderModuleDescriptor {
label: desc.shader.label.as_ref().map(|s| s.as_str()),
source: wgpu::ShaderSource::Wgsl(std::borrow::Cow::Borrowed(
&desc.shader.source,
)),
}));
let desc = wgpu::ComputePipelineDescriptor {
label: desc.label.as_deref(),
layout: layout.as_ref(),
module: &compiled_shader,
entry_point: &desc.shader_entry_point,
};
let pipeline = device.create_compute_pipeline(&desc);
Self { Self {
layout, layout,
wgpu_pipeline: pipeline, wgpu_pipeline: pipeline,
@ -24,12 +96,12 @@ impl ComputePipeline {
} }
#[inline(always)] #[inline(always)]
pub fn layout(&self) -> &PipelineLayout { pub fn layout(&self) -> Option<&PipelineLayout> {
&self.layout self.layout.as_ref()
} }
#[inline(always)] #[inline(always)]
pub fn wgpu_pipeline(&self) -> &wgpu::ComputePipeline { pub fn wgpu_pipeline(&self) -> &wgpu::ComputePipeline {
&self.wgpu_pipeline &self.wgpu_pipeline
} }
} }

View File

@ -1,3 +1,6 @@
mod shader;
pub use shader::*;
mod pipeline; mod pipeline;
pub use pipeline::*; pub use pipeline::*;

View File

@ -1,4 +1,25 @@
use super::{compute_pipeline::ComputePipeline, render_pipeline::RenderPipeline}; use super::{compute_pipeline::ComputePipeline, render_pipeline::RenderPipeline, ComputePipelineDescriptor, RenderPipelineDescriptor};
pub enum PipelineDescriptor {
Render(RenderPipelineDescriptor),
Compute(ComputePipelineDescriptor),
}
impl PipelineDescriptor {
pub fn as_render_pipeline_descriptor(&self) -> Option<&RenderPipelineDescriptor> {
match self {
Self::Render(r) => Some(r),
_ => None,
}
}
pub fn as_compute_pipeline_descriptor(&self) -> Option<&ComputePipelineDescriptor> {
match self {
Self::Compute(c) => Some(c),
_ => None,
}
}
}
pub enum Pipeline { pub enum Pipeline {
Render(RenderPipeline), Render(RenderPipeline),

View File

@ -2,44 +2,7 @@ use std::{num::NonZeroU32, ops::Deref, rc::Rc};
use wgpu::PipelineLayout; use wgpu::PipelineLayout;
#[derive(Debug, Default, Clone)] use super::{FragmentState, VertexState};
pub struct VertexBufferLayout {
pub array_stride: wgpu::BufferAddress,
pub step_mode: wgpu::VertexStepMode,
pub attributes: Vec<wgpu::VertexAttribute>,
}
/// Describes the vertex stage in a render pipeline.
#[derive(Debug, Clone)]
pub struct VertexState {
// TODO: make this a ResHandle<Shader>
/// The compiled shader module for the stage.
pub module: Rc<Shader>,
/// The entry point in the compiled shader.
/// There must be a function in the shader with the same name.
pub entry_point: String,
/// The format of the vertex buffers used with this pipeline.
pub buffers: Vec<VertexBufferLayout>,
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Shader {
pub label: Option<String>,
pub source: String,
}
/// Describes the fragment stage in the render pipeline.
#[derive(Debug, Clone)]
pub struct FragmentState {
// TODO: make this a ResHandle<Shader>
/// The compiled shader module for the stage.
pub module: Rc<Shader>,
/// The entry point in the compiled shader.
/// There must be a function in the shader with the same name.
pub entry_point: String,
/// The color state of the render targets.
pub targets: Vec<Option<wgpu::ColorTargetState>>,
}
//#[derive(Debug, Clone)] //#[derive(Debug, Clone)]
pub struct RenderPipelineDescriptor { pub struct RenderPipelineDescriptor {

View File

@ -0,0 +1,50 @@
use std::rc::Rc;
#[derive(Debug, Default, Clone)]
pub struct VertexBufferLayout {
pub array_stride: wgpu::BufferAddress,
pub step_mode: wgpu::VertexStepMode,
pub attributes: Vec<wgpu::VertexAttribute>,
}
impl<'a> From<wgpu::VertexBufferLayout<'a>> for VertexBufferLayout {
fn from(value: wgpu::VertexBufferLayout) -> Self {
Self {
array_stride: value.array_stride,
step_mode: value.step_mode,
attributes: value.attributes.to_vec(),
}
}
}
/// Describes the vertex stage in a render pipeline.
#[derive(Debug, Clone)]
pub struct VertexState {
// TODO: make this a ResHandle<Shader>
/// The compiled shader module for the stage.
pub module: Rc<Shader>,
/// The entry point in the compiled shader.
/// There must be a function in the shader with the same name.
pub entry_point: String,
/// The format of the vertex buffers used with this pipeline.
pub buffers: Vec<VertexBufferLayout>,
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Shader {
pub label: Option<String>,
pub source: String,
}
/// Describes the fragment stage in the render pipeline.
#[derive(Debug, Clone)]
pub struct FragmentState {
// TODO: make this a ResHandle<Shader>
/// The compiled shader module for the stage.
pub module: Rc<Shader>,
/// The entry point in the compiled shader.
/// There must be a function in the shader with the same name.
pub entry_point: String,
/// The color state of the render targets.
pub targets: Vec<Option<wgpu::ColorTargetState>>,
}

View File

@ -1,87 +0,0 @@
// Vertex shader
const max_light_count: u32 = 16u;
const LIGHT_TY_DIRECTIONAL = 0u;
const LIGHT_TY_POINT = 1u;
const LIGHT_TY_SPOT = 2u;
const ALPHA_CUTOFF = 0.1;
struct VertexInput {
@location(0) position: vec3<f32>,
@location(1) tex_coords: vec2<f32>,
@location(2) normal: vec3<f32>,
}
struct VertexOutput {
@builtin(position) clip_position: vec4<f32>,
@location(0) tex_coords: vec2<f32>,
@location(1) world_position: vec3<f32>,
@location(2) world_normal: vec3<f32>,
}
struct TransformData {
transform: mat4x4<f32>,
normal_matrix: mat4x4<f32>,
}
struct CameraUniform {
view: mat4x4<f32>,
inverse_projection: mat4x4<f32>,
view_projection: mat4x4<f32>,
projection: mat4x4<f32>,
position: vec3<f32>,
tile_debug: u32,
};
@group(1) @binding(0)
var<uniform> u_model_transform_data: TransformData;
@group(2) @binding(0)
var<uniform> u_camera: CameraUniform;
@vertex
fn vs_main(
model: VertexInput,
) -> VertexOutput {
var out: VertexOutput;
out.tex_coords = model.tex_coords;
out.clip_position = u_camera.view_projection * u_model_transform_data.transform * vec4<f32>(model.position, 1.0);
// the normal mat is actually only a mat3x3, but there's a bug in wgpu: https://github.com/gfx-rs/wgpu-rs/issues/36
let normal_mat4 = u_model_transform_data.normal_matrix;
let normal_mat = mat3x3(normal_mat4[0].xyz, normal_mat4[1].xyz, normal_mat4[2].xyz);
out.world_normal = normalize(normal_mat * model.normal, );
var world_position: vec4<f32> = u_model_transform_data.transform * vec4<f32>(model.position, 1.0);
out.world_position = world_position.xyz;
return out;
}
// Fragment shader
struct Material {
ambient: vec4<f32>,
diffuse: vec4<f32>,
specular: vec4<f32>,
shininess: f32,
}
@group(0) @binding(0)
var t_diffuse: texture_2d<f32>;
@group(0) @binding(1)
var s_diffuse: sampler;
@fragment
fn fs_main(in: VertexOutput) -> @location(0) vec4<f32> {
let object_color: vec4<f32> = textureSample(t_diffuse, s_diffuse, in.tex_coords);
if (object_color.a < ALPHA_CUTOFF) {
discard;
}
return object_color;
}

View File

@ -273,7 +273,10 @@ impl RenderTexture {
}; };
let texture = device.create_texture(&desc); let texture = device.create_texture(&desc);
let view = texture.create_view(&wgpu::TextureViewDescriptor::default()); let view = texture.create_view(&wgpu::TextureViewDescriptor {
format: Some(wgpu::TextureFormat::Depth32Float),
..Default::default()
});
let sampler = device.create_sampler( let sampler = device.create_sampler(
&wgpu::SamplerDescriptor { // 4. &wgpu::SamplerDescriptor { // 4.
address_mode_u: wgpu::AddressMode::ClampToEdge, address_mode_u: wgpu::AddressMode::ClampToEdge,

View File

@ -1,4 +1,4 @@
use std::{collections::{HashMap, VecDeque}, hash::{BuildHasher, DefaultHasher, Hash, Hasher, RandomState}, num::NonZeroU64}; use std::{collections::{HashMap, VecDeque}, hash::{BuildHasher, DefaultHasher, Hash, Hasher, RandomState}, num::NonZeroU64, rc::Rc};
use lyra_ecs::Entity; use lyra_ecs::Entity;
use tracing::instrument; use tracing::instrument;
@ -162,7 +162,7 @@ impl<K: Hash + Eq + PartialEq + Clone, V: Clone, S: BuildHasher> CachedValMap<K,
/// [`TransformGroup`]s are used to represent entries in the buffer. They are used to insert, /// [`TransformGroup`]s are used to represent entries in the buffer. They are used to insert,
/// update, and retrieve the transforms. /// update, and retrieve the transforms.
pub struct TransformBuffers { pub struct TransformBuffers {
pub bindgroup_layout: wgpu::BindGroupLayout, pub bindgroup_layout: Rc<wgpu::BindGroupLayout>,
//groups: CachedValMap<TransformGroupId, TransformIndex>, //groups: CachedValMap<TransformGroupId, TransformIndex>,
//groups: SlotMap<TransformGroupId, TransformIndex>, //groups: SlotMap<TransformGroupId, TransformIndex>,
entries: Vec<BufferEntry>, entries: Vec<BufferEntry>,
@ -192,7 +192,7 @@ impl TransformBuffers {
}); });
let mut s = Self { let mut s = Self {
bindgroup_layout, bindgroup_layout: Rc::new(bindgroup_layout),
entries: Default::default(), entries: Default::default(),
max_transform_count: (limits.max_uniform_buffer_binding_size) as usize / (limits.min_uniform_buffer_offset_alignment as usize), //(mem::size_of::<glam::Mat4>()), max_transform_count: (limits.max_uniform_buffer_binding_size) as usize / (limits.min_uniform_buffer_offset_alignment as usize), //(mem::size_of::<glam::Mat4>()),
limits, limits,
@ -209,6 +209,7 @@ impl TransformBuffers {
/// ///
/// This uses [`wgpu::Queue::write_buffer`], so the write is not immediately submitted, /// This uses [`wgpu::Queue::write_buffer`], so the write is not immediately submitted,
/// and instead enqueued internally to happen at the start of the next submit() call. /// and instead enqueued internally to happen at the start of the next submit() call.
#[instrument(skip(self, queue))]
pub fn send_to_gpu(&mut self, queue: &wgpu::Queue) { pub fn send_to_gpu(&mut self, queue: &wgpu::Queue) {
self.next_index = 0; self.next_index = 0;