Compare commits

..

No commits in common. "2eeca335e236dc09802ed2536064f978113ed7c8" and "007b1047ef7bf9fdceefacceb1fbb72799b1d1d0" have entirely different histories.

20 changed files with 184 additions and 182 deletions

View File

@ -160,10 +160,6 @@ impl<E: Event> EventReader<E> {
pub fn len(&self) -> usize {
self.buf.len()
}
pub fn is_empty(&self) -> bool {
self.len() == 0
}
}
impl<E: Event> Iterator for EventReader<E> {

View File

@ -92,7 +92,10 @@ impl<T: Button> InputButtons<T> {
pub fn was_just_pressed(&self, button: T) -> bool {
let hash = Self::get_button_hash(&button);
match self.button_events.get(&hash) {
Some(button_event) => matches!(button_event, ButtonEvent::JustPressed(b) if button == *b),
Some(button_event) => match button_event {
ButtonEvent::JustPressed(b) if button == *b => true,
_ => false,
},
None => false
}
}
@ -102,8 +105,11 @@ impl<T: Button> InputButtons<T> {
/// This must be done so that a key does not stay as JustPressed between multiple ticks
pub fn update(&mut self) {
for bev in self.button_events.values_mut() {
if let ButtonEvent::JustPressed(btn) = bev {
*bev = ButtonEvent::Pressed(btn.clone());
match bev {
ButtonEvent::JustPressed(btn) => {
*bev = ButtonEvent::Pressed(btn.clone());
},
_ => {},
}
}
}

View File

@ -67,7 +67,7 @@ impl<T> AVec<T> {
#[inline(always)]
fn slot_size(&self) -> usize {
let a = self.align - 1;
(mem::align_of::<T>() + (a)) & !a
mem::align_of::<T>() + (a) & !a
}
/// # Panics
@ -240,11 +240,6 @@ impl<T> AVec<T> {
self.len
}
#[inline(always)]
pub fn is_empty(&self) -> bool {
self.len == 0
}
/// Returns the capacity of the vector.
///
/// The capacity is the amount of elements that the vector can store without reallocating.

View File

@ -1,6 +1,6 @@
mod node;
use std::{
cell::{Ref, RefCell, RefMut}, collections::VecDeque, fmt::Debug, hash::Hash, rc::Rc, sync::Arc
cell::{Ref, RefCell, RefMut}, collections::{HashMap, VecDeque}, fmt::Debug, hash::Hash, rc::Rc, sync::Arc
};
use lyra_ecs::World;
@ -20,13 +20,14 @@ pub use render_target::*;
use rustc_hash::FxHashMap;
use tracing::{debug_span, instrument, trace, warn};
use wgpu::CommandEncoder;
use wgpu::{CommandEncoder, ComputePass};
use super::resource::{ComputePipeline, Pass, Pipeline, RenderPipeline};
use super::resource::{ComputePipeline, Pipeline, RenderPipeline};
/// A trait that represents the label of a resource, slot, or node in the [`RenderGraph`].
pub trait RenderGraphLabel: Debug + 'static {
fn rc_clone(&self) -> Rc<dyn RenderGraphLabel>;
//fn as_dyn(&self) -> &dyn RenderGraphLabel;
//fn as_partial_eq(&self) -> &dyn PartialEq<dyn RenderGraphLabel>;
fn as_label_hash(&self) -> u64;
fn label_eq_rc(&self, other: &Rc<dyn RenderGraphLabel>) -> bool {
@ -38,7 +39,8 @@ pub trait RenderGraphLabel: Debug + 'static {
}
}
/// An owned [`RenderGraphLabel`].
pub struct RenderGraphHash(u64);
#[derive(Clone)]
pub struct RenderGraphLabelValue(Rc<dyn RenderGraphLabel>);
@ -80,39 +82,45 @@ impl PartialEq for RenderGraphLabelValue {
impl Eq for RenderGraphLabelValue {}
struct NodeEntry {
/// The Node
struct PassEntry {
inner: Arc<RefCell<dyn Node>>,
/// The Node descriptor
desc: Rc<RefCell<NodeDesc>>,
/// The index of the node in the execution graph
/// The index of the pass in the execution graph
graph_index: petgraph::matrix_graph::NodeIndex<usize>,
/// The Node's optional pipeline
pipeline: Rc<RefCell<Option<Pipeline>>>,
pipeline: Rc<RefCell<Option<PipelineResource>>>,
}
#[derive(Clone)]
struct BindGroupEntry {
label: RenderGraphLabelValue,
pub struct BindGroupEntry {
pub label: RenderGraphLabelValue,
/// BindGroup
bg: Rc<wgpu::BindGroup>,
pub bg: Rc<wgpu::BindGroup>,
/// BindGroupLayout
layout: Option<Rc<wgpu::BindGroupLayout>>,
pub layout: Option<Rc<wgpu::BindGroupLayout>>,
}
#[allow(dead_code)]
#[derive(Clone)]
struct ResourceSlot {
struct ResourcedSlot {
label: RenderGraphLabelValue,
ty: SlotType,
value: SlotValue,
}
/// Stores the pipeline and other resources it uses.
///
/// This stores the bind groups that have been created for it
pub struct PipelineResource {
pub pipeline: Pipeline,
/// Lookup map for bind groups using names
pub bg_layout_name_lookup: HashMap<String, u32>,
}
pub struct RenderGraph {
device: Arc<wgpu::Device>,
queue: Arc<wgpu::Queue>,
slots: FxHashMap<RenderGraphLabelValue, ResourceSlot>,
nodes: FxHashMap<RenderGraphLabelValue, NodeEntry>,
slots: FxHashMap<RenderGraphLabelValue, ResourcedSlot>,
nodes: FxHashMap<RenderGraphLabelValue, PassEntry>,
sub_graphs: FxHashMap<RenderGraphLabelValue, RenderGraph>,
bind_groups: FxHashMap<RenderGraphLabelValue, BindGroupEntry>,
/// A directed graph used to determine dependencies of nodes.
@ -135,7 +143,7 @@ impl RenderGraph {
}
pub fn device(&self) -> &wgpu::Device {
&self.device
&*self.device
}
/// Add a [`Node`] to the RenderGraph.
@ -148,28 +156,39 @@ impl RenderGraph {
/// * This means that the id of insert slots **ARE NOT STABLE**. **DO NOT** rely on them to
/// not change. The IDs of output slots do stay the same.
/// 3. Ensuring that no two slots share the same ID when the names do not match.
#[instrument(skip(self, node), level = "debug")]
pub fn add_node<P: Node>(&mut self, label: impl RenderGraphLabel, mut node: P) {
let mut desc = node.desc(self);
#[instrument(skip(self, pass), level = "debug")]
pub fn add_node<P: Node>(&mut self, label: impl RenderGraphLabel, mut pass: P) {
let mut desc = pass.desc(self);
// collect all the slots of the node
// collect all the slots of the pass
for slot in &mut desc.slots {
if let Some(other) = self
.slots
.get_mut(&slot.label)
//.map(|s| (id, s))
//.and_then(|id| self.slots.get_mut(id).map(|s| (id, s)))
{
debug_assert_eq!(
slot.ty, other.ty,
"slot {:?} in node {:?} does not match existing slot of same name",
"slot {:?} in pass {:?} does not match existing slot of same name",
slot.label, label
);
/* trace!(
"Found existing slot for {:?}, changing id to {}",
slot.label,
id
); */
// if there is a slot of the same name
//slot.id = *id;
} else {
debug_assert!(!self.slots.contains_key(&slot.label),
"Reuse of id detected in render graph! Node: {:?}, slot: {:?}",
"Reuse of id detected in render graph! Pass: {:?}, slot: {:?}",
label, slot.label,
);
let res_slot = ResourceSlot {
let res_slot = ResourcedSlot {
label: slot.label.clone(),
ty: slot.ty,
value: slot.value.clone().unwrap_or(SlotValue::None),
@ -193,8 +212,8 @@ impl RenderGraph {
self.nodes.insert(
label,
NodeEntry {
inner: Arc::new(RefCell::new(node)),
PassEntry {
inner: Arc::new(RefCell::new(pass)),
desc: Rc::new(RefCell::new(desc)),
graph_index: index,
pipeline: Rc::new(RefCell::new(None)),
@ -202,38 +221,42 @@ impl RenderGraph {
);
}
/// Creates all buffers required for the nodes.
/// Creates all buffers required for the passes, also creates an internal execution path.
///
/// This only needs to be ran when the [`Node`]s in the graph change, or they are removed or
/// added.
#[instrument(skip(self, device))]
pub fn setup(&mut self, device: &wgpu::Device) {
// For all nodes, create their pipelines
for node in self.nodes.values_mut() {
let desc = (*node.desc).borrow();
// For all passes, create their pipelines
for pass in self.nodes.values_mut() {
let desc = (*pass.desc).borrow();
if let Some(pipeline_desc) = &desc.pipeline_desc {
let pipeline = match desc.ty {
NodeType::Render => Pipeline::Render(RenderPipeline::create(
device,
pipeline_desc
.as_render_pipeline_descriptor()
.expect("got compute pipeline descriptor in a render node"),
.expect("got compute pipeline descriptor in a render pass"),
)),
NodeType::Compute => Pipeline::Compute(ComputePipeline::create(
device,
pipeline_desc
.as_compute_pipeline_descriptor()
.expect("got render pipeline descriptor in a compute node"),
.expect("got render pipeline descriptor in a compute pass"),
)),
NodeType::Presenter | NodeType::Node | NodeType::Graph => {
panic!("Present or Node RenderGraph nodes should not have a pipeline descriptor!");
panic!("Present or Node RenderGraph passes should not have a pipeline descriptor!");
},
};
drop(desc);
let res = PipelineResource {
pipeline,
bg_layout_name_lookup: Default::default(),
};
let mut node_pipeline = node.pipeline.borrow_mut();
*node_pipeline = Some(pipeline);
let mut pipeline = pass.pipeline.borrow_mut();
*pipeline = Some(res);
}
}
@ -252,18 +275,18 @@ impl RenderGraph {
let mut sorted: VecDeque<RenderGraphLabelValue> = petgraph::algo::toposort(&self.node_graph, None)
.expect("RenderGraph had cycled!")
.iter()
.map(|i| self.node_graph[*i].clone())
.map(|i| self.node_graph[i.clone()].clone())
.collect();
while let Some(node_label) = sorted.pop_front() {
let node = self.nodes.get(&node_label).unwrap();
while let Some(pass_label) = sorted.pop_front() {
let pass = self.nodes.get(&pass_label).unwrap();
let device = self.device.clone();
let queue = self.queue.clone();
let inner = node.inner.clone();
let inner = pass.inner.clone();
let mut inner = inner.borrow_mut();
let mut context = RenderGraphContext::new(device, queue, None, node_label.clone());
let mut context = RenderGraphContext::new(device, queue, None, pass_label.clone());
inner.prepare(self, world, &mut context);
buffer_writes.append(&mut context.buffer_writes);
}
@ -277,12 +300,14 @@ impl RenderGraph {
let slot = self
.slots
.get(&bufwr.target_slot)
.unwrap_or_else(|| panic!("Failed to find slot '{:?}' for buffer write",
bufwr.target_slot));
.expect(&format!(
"Failed to find slot '{:?}' for buffer write",
bufwr.target_slot
));
let buf = slot
.value
.as_buffer()
.unwrap_or_else(|| panic!("Slot '{:?}' is not a buffer", bufwr.target_slot));
.expect(&format!("Slot '{:?}' is not a buffer", bufwr.target_slot));
self.queue.write_buffer(buf, bufwr.offset, &bufwr.bytes);
}
@ -300,7 +325,7 @@ impl RenderGraph {
let mut sorted: VecDeque<RenderGraphLabelValue> = petgraph::algo::toposort(&self.node_graph, None)
.expect("RenderGraph had cycled!")
.iter()
.map(|i| self.node_graph[*i].clone())
.map(|i| self.node_graph[i.clone()].clone())
.collect();
// A bit of 'encoder hot potato' is played using this.
@ -310,12 +335,12 @@ impl RenderGraph {
// the encoder will be submitted and a new one will be made.
let mut encoder = Some(self.create_encoder());
while let Some(node_label) = sorted.pop_front() {
let node = self.nodes.get(&node_label).unwrap();
let node_inn = node.inner.clone();
while let Some(pass_label) = sorted.pop_front() {
let pass = self.nodes.get(&pass_label).unwrap();
let pass_inn = pass.inner.clone();
let node_desc = node.desc.clone();
let node_desc = (*node_desc).borrow();
let pass_desc = pass.desc.clone();
let pass_desc = (*pass_desc).borrow();
// clone of the Rc's is required to appease the borrow checker
let device = self.device.clone();
@ -326,11 +351,11 @@ impl RenderGraph {
encoder = Some(self.create_encoder());
}
let mut context = RenderGraphContext::new(device, queue, encoder.take(), node_label.clone());
let mut context = RenderGraphContext::new(device, queue, encoder.take(), pass_label.clone());
trace!("Executing {:?}", node_label.0);
let mut inner = node_inn.borrow_mut();
inner.execute(self, &node_desc, &mut context);
trace!("Executing {:?}", pass_label.0);
let mut inner = pass_inn.borrow_mut();
inner.execute(self, &pass_desc, &mut context);
// take back the encoder from the context
encoder = context.encoder;
@ -359,9 +384,8 @@ impl RenderGraph {
.and_then(|p| {
let v = p.pipeline.borrow();
#[allow(clippy::manual_map)]
match &*v {
Some(_) => Some(Ref::map(v, |p| p.as_ref().unwrap())),
Some(_) => Some(Ref::map(v, |p| &p.as_ref().unwrap().pipeline)),
None => None,
}
})
@ -424,26 +448,27 @@ impl RenderGraph {
/// &mut pass,
/// &[
/// // retrieves the `BasePassSlots::DepthTexture` bind group and sets the index 0 in the
/// // node to it.
/// (&BaseNodeSlots::DepthTexture, 0),
/// (&BaseNodeSlots::Camera, 1),
/// (&LightBaseNodeSlots::Lights, 2),
/// (&LightCullComputeNodeSlots::LightIndicesGridGroup, 3),
/// (&BaseNodeSlots::ScreenSize, 4),
/// // pass to it.
/// (&BasePassSlots::DepthTexture, 0),
/// (&BasePassSlots::Camera, 1),
/// (&LightBasePassSlots::Lights, 2),
/// (&LightCullComputePassSlots::LightIndicesGridGroup, 3),
/// (&BasePassSlots::ScreenSize, 4),
/// ],
/// );
/// ```
///
/// # Panics
/// Panics if a bind group of a provided name is not found.
pub fn set_bind_groups<'a, P: Pass<'a>>(
pub fn set_bind_groups<'a>(
&'a self,
pass: &mut P,
pass: &mut ComputePass<'a>,
bind_groups: &[(&dyn RenderGraphLabel, u32)],
) {
for (label, index) in bind_groups {
let bg = self
.bind_group(label.rc_clone());
//.expect(&format!("Could not find bind group '{:?}'", label));
pass.set_bind_group(*index, bg, &[]);
}
@ -453,9 +478,6 @@ impl RenderGraph {
self.sub_graphs.get_mut(&label.into())
}
/// Add a sub graph.
///
/// > Note: the sub graph is not ran unless you add a node that executes it. See [`SubGraphNode`].
pub fn add_sub_graph<L: Into<RenderGraphLabelValue>>(&mut self, label: L, sub: RenderGraph) {
self.sub_graphs.insert(label.into(), sub);
}
@ -526,7 +548,7 @@ impl SubGraphNode {
}
impl Node for SubGraphNode {
fn desc(&mut self, _: &mut RenderGraph) -> NodeDesc {
fn desc<'a, 'b>(&'a mut self, _: &'b mut RenderGraph) -> NodeDesc {
NodeDesc::new(NodeType::Graph, None, vec![])
}

View File

@ -145,7 +145,6 @@ pub struct RenderGraphPipelineInfo {
pub multiview: Option<NonZeroU32>,
}
#[allow(clippy::too_many_arguments)]
impl RenderGraphPipelineInfo {
pub fn new(
label: &str,
@ -161,7 +160,7 @@ impl RenderGraphPipelineInfo {
label: Some(label.to_string()),
bind_group_layouts: bind_group_layouts
.into_iter()
.map(Rc::new)
.map(|bgl| Rc::new(bgl))
.collect(),
vertex,
primitive,
@ -356,7 +355,7 @@ impl NodeDesc {
/// describes all resources the node requires for execution during the `execute` phase.
pub trait Node: 'static {
/// Retrieve a descriptor of the Node.
fn desc(&mut self, graph: &mut RenderGraph) -> NodeDesc;
fn desc<'a, 'b>(&'a mut self, graph: &'b mut RenderGraph) -> NodeDesc;
/// Prepare the node for rendering.
///

View File

@ -55,7 +55,7 @@ impl Node for BasePass {
.visibility(wgpu::ShaderStages::COMPUTE)
.buffer_dynamic_offset(false)
.contents(&[self.screen_size])
.finish_parts(graph.device());
.finish_parts(&graph.device());
let screen_size_bgl = Rc::new(screen_size_bgl);
let screen_size_bg = Rc::new(screen_size_bg);
@ -65,12 +65,12 @@ impl Node for BasePass {
.visibility(wgpu::ShaderStages::all())
.buffer_dynamic_offset(false)
.contents(&[CameraUniform::default()])
.finish_parts(graph.device());
.finish_parts(&graph.device());
let camera_bgl = Rc::new(camera_bgl);
let camera_bg = Rc::new(camera_bg);
// create the depth texture using the utility struct, then take all the required fields
let mut depth_texture = RenderTexture::create_depth_texture(graph.device(), self.screen_size, "depth_texture");
let mut depth_texture = RenderTexture::create_depth_texture(&graph.device(), self.screen_size, "depth_texture");
depth_texture.create_bind_group(&graph.device);
let dt_bg_pair = depth_texture.bindgroup_pair.unwrap();
@ -135,6 +135,7 @@ impl Node for BasePass {
) {
let mut vt = graph.view_target_mut();
vt.primary.create_frame();
//vt.next_chain();
vt.primary.create_frame_view();
/* debug_assert!(
!rt.current_texture.is_some(),

View File

@ -10,7 +10,7 @@ pub struct InitNode {
}
impl Node for InitNode {
fn desc(&mut self, _: &mut crate::render::graph::RenderGraph) -> crate::render::graph::NodeDesc {
fn desc<'a, 'b>(&'a mut self, _: &'b mut crate::render::graph::RenderGraph) -> crate::render::graph::NodeDesc {
let mut desc = NodeDesc::new(NodeType::Node, None, vec![]);
// the slots can just be cloned since the slot attribute doesn't really matter much.
desc.slots = self.slots.clone();

View File

@ -59,7 +59,7 @@ impl Node for LightCullComputePass {
let light_index_counter_buffer =
device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("light_index_counter_buffer"),
contents: bytemuck::cast_slice(&[0]),
contents: &bytemuck::cast_slice(&[0]),
usage: wgpu::BufferUsages::STORAGE | wgpu::BufferUsages::COPY_DST,
});

View File

@ -174,12 +174,12 @@ impl MeshPass {
let material = self.material_buffers.entry(material.uuid())
.or_insert_with(|| {
debug!(uuid=material.uuid().to_string(), "Sending material to gpu");
Rc::new(Material::from_resource(device, queue, self.texture_bind_group_layout.clone().unwrap(), &material_ref))
Rc::new(Material::from_resource(&device, &queue, self.texture_bind_group_layout.clone().unwrap(), &material_ref))
});
// TODO: support material uniforms from multiple uniforms
let uni = MaterialUniform::from(&**material);
queue.write_buffer(self.material_buffer.as_ref().unwrap(), 0, bytemuck::bytes_of(&uni));
queue.write_buffer(&self.material_buffer.as_ref().unwrap(), 0, bytemuck::bytes_of(&uni));
MeshBufferStorage {
buffer_vertex: vertex_buffer,
@ -215,7 +215,7 @@ impl Node for MeshPass {
//let transform_bgl = transforms.bindgroup_layout.clone();
self.transforms = Some(transforms);
let texture_bind_group_layout = Rc::new(RenderTexture::create_layout(device));
let texture_bind_group_layout = Rc::new(RenderTexture::create_layout(&device));
self.texture_bind_group_layout = Some(texture_bind_group_layout.clone());
let (material_bgl, material_bg, material_buf, _) = BufferWrapper::builder()
@ -232,7 +232,7 @@ impl Node for MeshPass {
// load the default texture
let bytes = include_bytes!("../../default_texture.png");
self.default_texture = Some(RenderTexture::from_bytes(device, &graph.queue, texture_bind_group_layout.clone(), bytes, "default_texture").unwrap());
self.default_texture = Some(RenderTexture::from_bytes(&device, &graph.queue, texture_bind_group_layout.clone(), bytes, "default_texture").unwrap());
// get surface config format
/* let main_rt = graph.slot_value(BasePassSlots::MainRenderTarget)
@ -251,9 +251,7 @@ impl Node for MeshPass {
source: include_str!("../../shaders/base.wgsl").to_string(),
}); */
NodeDesc::new(
let desc = NodeDesc::new(
NodeType::Render,
None,
/* Some(PipelineDescriptor::Render(RenderPipelineDescriptor {
@ -298,7 +296,9 @@ impl Node for MeshPass {
vec![
(&MeshesPassSlots::Material, material_bg, Some(material_bgl)),
],
)
);
desc
}
#[instrument(skip(self, graph, world, context))]
@ -368,7 +368,7 @@ impl Node for MeshPass {
let transforms = self.transforms.as_mut().unwrap();
if transforms.needs_expand() {
debug!("Expanding transform buffers");
transforms.expand_buffers(device);
transforms.expand_buffers(&device);
}
}
@ -377,14 +377,14 @@ impl Node for MeshPass {
// if process mesh did not just create a new mesh, and the epoch
// shows that the scene has changed, verify that the mesh buffers
// dont need to be resent to the gpu.
if !self.process_mesh(device, queue, entity, &mesh, mesh_han.uuid())
if !self.process_mesh(&device, &queue, entity, &*mesh, mesh_han.uuid())
&& mesh_epoch == last_epoch {
self.check_mesh_buffers(device, queue, &mesh_han);
self.check_mesh_buffers(&device, &queue, &mesh_han);
}
let transforms = self.transforms.as_mut().unwrap();
let group = TransformGroup::EntityRes(entity, mesh_han.uuid());
let transform_id = transforms.update_or_push(device, queue, &render_limits,
let transform_id = transforms.update_or_push(&device, &queue, &render_limits,
group, interp_transform.calculate_mat4(), glam::Mat3::from_quat(interp_transform.rotation));
let material = mesh.material.as_ref().unwrap()
@ -409,15 +409,15 @@ impl Node for MeshPass {
// if process mesh did not just create a new mesh, and the epoch
// shows that the scene has changed, verify that the mesh buffers
// dont need to be resent to the gpu.
if !self.process_mesh(device, queue, entity, &mesh, mesh_han.uuid())
if !self.process_mesh(&device, &queue, entity, &*mesh, mesh_han.uuid())
&& scene_epoch == last_epoch {
self.check_mesh_buffers(device, queue, &mesh_han);
self.check_mesh_buffers(&device, &queue, &mesh_han);
}
let transforms = self.transforms.as_mut().unwrap();
let scene_mesh_group = TransformGroup::Res(scene_han.uuid(), mesh_han.uuid());
let group = TransformGroup::OwnedGroup(entity, scene_mesh_group.into());
let transform_id = transforms.update_or_push(device, queue, &render_limits,
let transform_id = transforms.update_or_push(&device, &queue, &render_limits,
group, mesh_interpo.calculate_mat4(), glam::Mat3::from_quat(mesh_interpo.rotation) );
let material = mesh.material.as_ref().unwrap()
@ -436,7 +436,7 @@ impl Node for MeshPass {
}
let transforms = self.transforms.as_mut().unwrap();
transforms.send_to_gpu(queue);
transforms.send_to_gpu(&queue);
if self.pipeline.is_none() {
let device = graph.device();
@ -537,7 +537,7 @@ impl Node for MeshPass {
let mut pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
label: Some("Render Pass"),
color_attachments: &[Some(wgpu::RenderPassColorAttachment {
view,
view: &view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color {
@ -551,7 +551,7 @@ impl Node for MeshPass {
})],
// enable depth buffer
depth_stencil_attachment: Some(wgpu::RenderPassDepthStencilAttachment {
view: depth_view,
view: &depth_view,
depth_ops: Some(wgpu::Operations {
load: wgpu::LoadOp::Clear(1.0),
store: true,
@ -560,7 +560,7 @@ impl Node for MeshPass {
}),
});
pass.set_pipeline(pipeline);
pass.set_pipeline(&pipeline);
//let material_buffer_bg = self.material_buffer.as_ref().unwrap().bindgroup();
let default_texture = self.default_texture.as_ref().unwrap();
@ -596,11 +596,11 @@ impl Node for MeshPass {
let offset = transforms.buffer_offset(job.transform_id);
pass.set_bind_group(1, bindgroup, &[ offset, ]);
pass.set_bind_group(2, camera_bg, &[]);
pass.set_bind_group(3, lights_bg, &[]);
pass.set_bind_group(4, material_bg, &[]);
pass.set_bind_group(2, &camera_bg, &[]);
pass.set_bind_group(3, &lights_bg, &[]);
pass.set_bind_group(4, &material_bg, &[]);
pass.set_bind_group(6, light_grid_bg, &[]);
pass.set_bind_group(6, &light_grid_bg, &[]);
// if this mesh uses indices, use them to draw the mesh
if let Some((idx_type, indices)) = buffers.buffer_indices.as_ref() {

View File

@ -15,7 +15,7 @@ pub struct PresentPass;
impl PresentPass {
pub fn new() -> Self {
Self
Self::default()
}
}

View File

@ -7,6 +7,15 @@ use crate::render::{
resource::{FragmentState, PipelineDescriptor, RenderPipelineDescriptor, Shader, VertexState},
};
#[derive(Debug, Clone, Copy, Hash, RenderGraphLabel)]
pub enum TintPassSlots {
InputRenderTarget,
InputTextureView,
TextureViewBindGroup,
Frame,
}
#[derive(Default, Debug, Clone, Copy, Hash, RenderGraphLabel)]
pub struct TintPassLabel;
@ -14,9 +23,6 @@ pub struct TintPassLabel;
pub struct TintPass {
target_sampler: Option<wgpu::Sampler>,
bgl: Option<Rc<wgpu::BindGroupLayout>>,
/// Store bind groups for the input textures.
/// The texture may change due to resizes, or changes to the view target chain
/// from other nodes.
bg_cache: HashMap<wgpu::Id, wgpu::BindGroup>,
}
@ -27,9 +33,9 @@ impl TintPass {
}
impl Node for TintPass {
fn desc(
&mut self,
graph: &mut crate::render::graph::RenderGraph,
fn desc<'a, 'b>(
&'a mut self,
graph: &'b mut crate::render::graph::RenderGraph,
) -> crate::render::graph::NodeDesc {
let device = &graph.device;
@ -64,9 +70,7 @@ impl Node for TintPass {
});
let vt = graph.view_target();
NodeDesc::new(
let desc = NodeDesc::new(
NodeType::Render,
Some(PipelineDescriptor::Render(RenderPipelineDescriptor {
label: Some("tint_pass".into()),
@ -92,7 +96,9 @@ impl Node for TintPass {
multiview: None,
})),
vec![],
)
);
desc
}
fn prepare(
@ -112,7 +118,7 @@ impl Node for TintPass {
) {
let pipeline = graph
.pipeline(context.label.clone())
.expect("Failed to find pipeline for TintPass");
.expect("Failed to find pipeline for MeshPass");
let mut vt = graph.view_target_mut();
let chain = vt.get_chain();
@ -148,7 +154,7 @@ impl Node for TintPass {
let mut pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
label: Some("tint_pass"),
color_attachments: &[Some(wgpu::RenderPassColorAttachment {
view: dest_view,
view: &dest_view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Load,
@ -157,7 +163,7 @@ impl Node for TintPass {
})],
depth_stencil_attachment: None,
});
pass.set_pipeline(pipeline.as_render());
pass.set_pipeline(&pipeline.as_render());
pass.set_bind_group(0, bg, &[]);
pass.draw(0..3, 0..1);

View File

@ -116,6 +116,8 @@ pub enum FrameTexture {
/// Represents the current frame that is being rendered to.
//#[allow(dead_code)]
pub struct Frame {
/* pub(crate) device: Arc<wgpu::Device>,
pub(crate) queue: Arc<wgpu::Queue>, */
pub(crate) size: math::UVec2,
pub(crate) texture: FrameTexture,
}
@ -187,6 +189,8 @@ impl FrameTarget {
}
}
//struct TargetViewChainPrimary
pub struct TargetViewChain<'a> {
pub source: &'a mut FrameTarget,
pub dest: &'a mut FrameTarget,
@ -200,7 +204,6 @@ struct ViewChain {
}
impl ViewChain {
/// Returns the currently active [`FrameTarget`].
fn active(&self) -> &FrameTarget {
if self.active == 0 {
&self.source
@ -231,17 +234,14 @@ impl ViewTarget {
s
}
/// Returns the size of the target.
pub fn size(&self) -> math::UVec2 {
self.primary.size()
}
/// Returns the [`wgpu::TextureFormat`]
pub fn format(&self) -> wgpu::TextureFormat {
self.primary.format()
}
/// Resize all the targets, causes the chain to be recreated.
pub fn resize(&mut self, device: &wgpu::Device, size: math::UVec2) {
if size != self.primary.size() {
self.primary.render_target.resize(device, size);
@ -300,7 +300,7 @@ impl ViewTarget {
}
}
/// Get the [`wgpu::TextureView`] to render to.
/// Get the [`wgpu::TextureView`] to render to
pub fn render_view(&self) -> &wgpu::TextureView {
let chain = self.chain.as_ref().unwrap();
chain.active().frame_view.as_ref().unwrap()

View File

@ -116,9 +116,9 @@ impl BufferWrapper {
/// match the layout of this bind group.
///
/// See [`wgpu::RenderPass::set_bind_group`](https://docs.rs/wgpu/latest/wgpu/struct.RenderPass.html#method.set_bind_group).
pub fn render_pass_bind_at<'a>(
pub fn render_pass_bind_at<'a, 'b>(
&'a self,
pass: &mut wgpu::RenderPass<'a>,
pass: &'b mut wgpu::RenderPass<'a>,
index: u32,
offsets: &[wgpu::DynamicOffset],
) {

View File

@ -168,7 +168,7 @@ impl BasicRenderer {
main_graph.add_edge(TestSubGraphLabel, TintPassLabel);
//let present_pass_label = PresentPassLabel::new(BasePassSlots::Frame);//TintPassSlots::Frame);
let p = PresentPass;
let p = PresentPass::default();
main_graph.add_node(PresentPassLabel, p);
main_graph.add_edge(BasePassLabel, TestSubGraphLabel);

View File

@ -74,7 +74,7 @@ impl ComputePipeline {
// they share the same shader. I tried to do it without an Rc but couldn't get past
// the borrow checker
let compiled_shader = Rc::new(device.create_shader_module(wgpu::ShaderModuleDescriptor {
label: desc.shader.label.as_deref(),
label: desc.shader.label.as_ref().map(|s| s.as_str()),
source: wgpu::ShaderSource::Wgsl(std::borrow::Cow::Borrowed(
&desc.shader.source,
)),

View File

@ -9,6 +9,3 @@ pub use compute_pipeline::*;
mod render_pipeline;
pub use render_pipeline::*;
mod pass;
pub use pass::*;

View File

@ -1,16 +0,0 @@
/// A trait that represents a [`wgpu::ComputePass`] or [`wgpu::RenderPass`].
pub trait Pass<'a> {
fn set_bind_group(&mut self, index: u32, bind_group: &'a wgpu::BindGroup, offsets: &[wgpu::DynamicOffset]);
}
impl<'a> Pass<'a> for wgpu::ComputePass<'a> {
fn set_bind_group(&mut self, index: u32, bind_group: &'a wgpu::BindGroup, offsets: &[wgpu::DynamicOffset]) {
self.set_bind_group(index, bind_group, offsets);
}
}
impl<'a> Pass<'a> for wgpu::RenderPass<'a> {
fn set_bind_group(&mut self, index: u32, bind_group: &'a wgpu::BindGroup, offsets: &[wgpu::DynamicOffset]) {
self.set_bind_group(index, bind_group, offsets);
}
}

View File

@ -1,6 +1,5 @@
use super::{compute_pipeline::ComputePipeline, render_pipeline::RenderPipeline, ComputePipelineDescriptor, RenderPipelineDescriptor};
#[allow(clippy::large_enum_variant)]
pub enum PipelineDescriptor {
Render(RenderPipelineDescriptor),
Compute(ComputePipelineDescriptor),
@ -27,19 +26,19 @@ pub enum Pipeline {
Compute(ComputePipeline),
}
impl From<Pipeline> for RenderPipeline {
fn from(val: Pipeline) -> Self {
match val {
Pipeline::Render(r) => r,
impl Into<RenderPipeline> for Pipeline {
fn into(self) -> RenderPipeline {
match self {
Self::Render(r) => r,
_ => panic!("Pipeline is not a RenderPipeline"),
}
}
}
impl From<Pipeline> for ComputePipeline {
fn from(val: Pipeline) -> Self {
match val {
Pipeline::Compute(c) => c,
impl Into<ComputePipeline> for Pipeline {
fn into(self) -> ComputePipeline {
match self {
Self::Compute(c) => c,
_ => panic!("Pipeline is not a RenderPipeline"),
}
}

View File

@ -88,13 +88,13 @@ impl RenderPipeline {
// they share the same shader. I tried to do it without an Rc but couldn't get past
// the borrow checker
let vrtx_shad = Rc::new(device.create_shader_module(wgpu::ShaderModuleDescriptor {
label: desc.vertex.module.label.as_deref(),
label: desc.vertex.module.label.as_ref().map(|s| s.as_str()),
source: wgpu::ShaderSource::Wgsl(std::borrow::Cow::Borrowed(
&desc.vertex.module.source,
)),
}));
let vrtx_state = wgpu::VertexState {
module: &vrtx_shad,
module: &*vrtx_shad,
entry_point: &desc.vertex.entry_point,
buffers: &vrtx_buffs,
};
@ -104,7 +104,7 @@ impl RenderPipeline {
vrtx_shad.clone()
} else {
Rc::new(device.create_shader_module(wgpu::ShaderModuleDescriptor {
label: f.module.label.as_deref(),
label: f.module.label.as_ref().map(|s| s.as_str()),
source: wgpu::ShaderSource::Wgsl(std::borrow::Cow::Borrowed(&f.module.source)),
}))
}

View File

@ -106,17 +106,14 @@ impl<K: Hash + Eq + PartialEq + Clone, V: Clone, S: BuildHasher> CachedValMap<K,
where
F: FnMut() -> V
{
match self.latest.entry(key) {
std::collections::hash_map::Entry::Occupied(mut e) => {
e.insert(val_fn());
None
}
std::collections::hash_map::Entry::Vacant(e) => {
let val = self.dead.pop_front()
.unwrap_or_else(val_fn);
e.insert(val.clone());
Some(val)
}
if self.latest.contains_key(&key) {
self.latest.insert(key, val_fn());
None
} else {
let val = self.dead.pop_front()
.unwrap_or_else(val_fn);
self.latest.insert(key, val.clone());
Some(val)
}
}
@ -220,7 +217,7 @@ impl TransformBuffers {
entry.len = 0;
let p = entry.transforms.as_ptr();
let bytes = unsafe { std::slice::from_raw_parts(p, entry.transforms.len() * entry.transforms.align()) };
let bytes = unsafe { std::slice::from_raw_parts(p as *const u8, entry.transforms.len() * entry.transforms.align()) };
queue.write_buffer(&entry.buffer, 0, bytes);
}
@ -337,9 +334,9 @@ impl TransformBuffers {
pub fn buffer_offset(&self, transform_index: TransformIndex) -> u32 {
//Self::get_buffer_offset(&self.limits, transform_index)
let transform_index = transform_index.transform_index % self.max_transform_count;
let t = transform_index as u32 * self.limits.min_uniform_buffer_offset_alignment as u32;
//debug!("offset: {t}");
transform_index as u32 * self.limits.min_uniform_buffer_offset_alignment
t
}
/// Returns a boolean indicating if the buffers need to be expanded