Compare commits

...

2 Commits

3 changed files with 250 additions and 112 deletions

View File

@ -1,17 +1,21 @@
use lyra_ecs::Entity; use lyra_ecs::Entity;
use super::transform_buffer_storage::TransformIndex;
pub struct RenderJob { pub struct RenderJob {
pub entity: Entity, pub entity: Entity,
pub shader_id: u64, pub shader_id: u64,
pub mesh_uuid: uuid::Uuid, pub mesh_uuid: uuid::Uuid,
pub transform_id: TransformIndex,
} }
impl RenderJob { impl RenderJob {
pub fn new(entity: Entity, shader_id: u64, mesh_buffer_id: uuid::Uuid) -> Self { pub fn new(entity: Entity, shader_id: u64, mesh_buffer_id: uuid::Uuid, transform_id: TransformIndex) -> Self {
Self { Self {
entity, entity,
shader_id, shader_id,
mesh_uuid: mesh_buffer_id, mesh_uuid: mesh_buffer_id,
transform_id
} }
} }
} }

View File

@ -29,7 +29,7 @@ use super::light_cull_compute::LightCullCompute;
use super::material::Material; use super::material::Material;
use super::render_buffer::BufferWrapper; use super::render_buffer::BufferWrapper;
use super::texture::RenderTexture; use super::texture::RenderTexture;
use super::transform_buffer_storage::TransformBuffers; use super::transform_buffer_storage::{TransformBuffers, TransformGroup};
use super::vertex::Vertex; use super::vertex::Vertex;
use super::{render_pipeline::FullRenderPipeline, render_buffer::BufferStorage, render_job::RenderJob}; use super::{render_pipeline::FullRenderPipeline, render_buffer::BufferStorage, render_job::RenderJob};
@ -374,12 +374,13 @@ impl BasicRenderer {
/// Processes the mesh for the renderer, storing and creating buffers as needed. Returns true if a new mesh was processed. /// Processes the mesh for the renderer, storing and creating buffers as needed. Returns true if a new mesh was processed.
fn process_mesh(&mut self, entity: Entity, transform: Transform, mesh: &Mesh, mesh_uuid: Uuid) -> bool { fn process_mesh(&mut self, entity: Entity, transform: Transform, mesh: &Mesh, mesh_uuid: Uuid) -> bool {
if self.transform_buffers.should_expand() { let _ = transform;
/* if self.transform_buffers.should_expand() {
self.transform_buffers.expand_buffers(&self.device); self.transform_buffers.expand_buffers(&self.device);
} }
self.transform_buffers.update_or_insert(&self.queue, &self.render_limits, self.transform_buffers.update_or_insert(&self.queue, &self.render_limits,
mesh_uuid, || ( transform.calculate_mat4(), glam::Mat3::from_quat(transform.rotation) )); entity, || ( transform.calculate_mat4(), glam::Mat3::from_quat(transform.rotation) )); */
#[allow(clippy::map_entry)] #[allow(clippy::map_entry)]
if !self.mesh_buffers.contains_key(&mesh_uuid) { if !self.mesh_buffers.contains_key(&mesh_uuid) {
@ -451,10 +452,14 @@ impl Renderer for BasicRenderer {
self.check_mesh_buffers(entity, &mesh_han); self.check_mesh_buffers(entity, &mesh_han);
} }
let group = TransformGroup::EntityRes(entity, mesh_han.uuid());
let transform_id = self.transform_buffers.update_or_push(&self.queue, &self.render_limits,
group, || ( interop_pos.calculate_mat4(), glam::Mat3::from_quat(interop_pos.rotation) ));
let material = mesh.material.as_ref().unwrap() let material = mesh.material.as_ref().unwrap()
.data_ref().unwrap(); .data_ref().unwrap();
let shader = material.shader_uuid.unwrap_or(0); let shader = material.shader_uuid.unwrap_or(0);
let job = RenderJob::new(entity, shader, mesh_han.uuid()); let job = RenderJob::new(entity, shader, mesh_han.uuid(), transform_id);
self.render_jobs.push_back(job); self.render_jobs.push_back(job);
} }
} }
@ -479,10 +484,15 @@ impl Renderer for BasicRenderer {
self.check_mesh_buffers(entity, &mesh_han); self.check_mesh_buffers(entity, &mesh_han);
} }
let scene_mesh_group = TransformGroup::Res(scene_han.uuid(), mesh_han.uuid());
let group = TransformGroup::OwnedGroup(entity, scene_mesh_group.into());
let transform_id = self.transform_buffers.update_or_push(&self.queue, &self.render_limits,
group, || ( mesh_interpo.calculate_mat4(), glam::Mat3::from_quat(mesh_interpo.rotation) ));
let material = mesh.material.as_ref().unwrap() let material = mesh.material.as_ref().unwrap()
.data_ref().unwrap(); .data_ref().unwrap();
let shader = material.shader_uuid.unwrap_or(0); let shader = material.shader_uuid.unwrap_or(0);
let job = RenderJob::new(entity, shader, mesh_han.uuid()); let job = RenderJob::new(entity, shader, mesh_han.uuid(), transform_id);
self.render_jobs.push_back(job); self.render_jobs.push_back(job);
} }
} }
@ -576,9 +586,8 @@ impl Renderer for BasicRenderer {
} }
// Get the bindgroup for job's transform and bind to it using an offset. // Get the bindgroup for job's transform and bind to it using an offset.
let transform_indices = *self.transform_buffers.transform_indices(job.mesh_uuid).unwrap(); let bindgroup = self.transform_buffers.bind_group(job.transform_id);
let bindgroup = self.transform_buffers.bind_group(transform_indices).unwrap(); let offset = self.transform_buffers.buffer_offset(job.transform_id);
let offset = TransformBuffers::index_offset(&self.render_limits, transform_indices) as u32;
render_pass.set_bind_group(1, bindgroup, &[ offset, offset, ]); render_pass.set_bind_group(1, bindgroup, &[ offset, offset, ]);
render_pass.set_bind_group(2, &self.camera_buffer.bindgroup(), &[]); render_pass.set_bind_group(2, &self.camera_buffer.bindgroup(), &[]);

View File

@ -1,44 +1,167 @@
use std::{collections::{VecDeque, HashMap}, num::NonZeroU64}; use std::{collections::{HashMap, VecDeque}, hash::{BuildHasher, DefaultHasher, Hash, Hasher, RandomState}, num::NonZeroU64};
use lyra_ecs::Entity;
use uuid::Uuid; use uuid::Uuid;
use wgpu::Limits; use wgpu::Limits;
use std::mem; use std::mem;
/// A group id created from a [`TransformGroup`].
///
/// This is mainly created so that [`TransformGroup::OwnedGroup`] can use another group inside of it.
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub struct TransformGroupId(u64);
impl From<TransformGroup> for TransformGroupId {
fn from(value: TransformGroup) -> Self {
let mut hasher = DefaultHasher::new();
value.hash(&mut hasher);
let hash = hasher.finish();
TransformGroupId(hash)
}
}
/// Used as a key into the [`TransformBuffers`].
///
/// This enum is used as a key to identify a transform for a RenderJob. The renderer uses this
/// to differentiate a transform between two entities that share a resource handle to the same
/// scene:
/// ```nobuild
/// // The group of the mesh in the scene.
/// let scene_mesh_group = TransformGroup::Res(scene_handle.uuid(), mesh_handle.uuid());
/// // The group of the owned entity that has mesh in a scene.
/// let finished_group = TransformGroup::OwnedGroup(entity, scene_mesh_group.into());
/// ```
///
/// A simpler example of the use of a transform group is when processing lone mesh handles
/// owned by entities:
/// ```nobuild
/// let group = TransformGroup::EntityRes(entity, mesh_handle.uuid());
/// ```
///
/// These were made to fix [#6](https://git.seanomik.net/SeanOMik/lyra-engine/issues/6).
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub enum TransformGroup {
/// Just an entity.
Entity(Entity),
/// An entity that owns another group.
OwnedGroup(Entity, TransformGroupId),
/// A resource uuid grouped with an owning Entity.
EntityRes(Entity, Uuid),
/// A resource uuid grouped with another resource uuid.
Res(Uuid, Uuid),
}
/// The index of a specific Transform inside of the buffers.
#[derive(Default, Copy, Clone, PartialEq, Eq, Debug)] #[derive(Default, Copy, Clone, PartialEq, Eq, Debug)]
pub(crate) struct TransformBufferIndices { pub struct TransformIndex {
pub buffer_index: usize, /// The index of the entry in the buffer chain.
pub transform_index: usize, entry_index: usize,
/// The index of the transform in the entry.
transform_index: usize,
} }
/// A struct representing a single transform buffer. There can be multiple of these /// A struct representing a single transform buffer. There can be multiple of these
pub(crate) struct TransformBufferEntry { struct BufferEntry {
pub len: usize, pub len: usize,
pub transform_buf: wgpu::Buffer,
pub normal_mat_buf: wgpu::Buffer,
pub bindgroup: wgpu::BindGroup, pub bindgroup: wgpu::BindGroup,
pub transform_buffer: wgpu::Buffer,
pub normal_buffer: wgpu::Buffer,
}
/// A HashMap that caches values for reuse.
///
/// The map detects dead values by tracking which entries were not updated since the last time
/// [`CachedValMap::update`] was ran. When dead values are collected, they can be reused on an
/// [`insert`](CachedValMap::insert) into the map.
struct CachedValMap<K, V, S = RandomState> {
latest: HashMap<K, V, S>,
old: HashMap<K, V, S>,
dead: VecDeque<V>,
}
impl<K, V, S: Default> Default for CachedValMap<K, V, S> {
fn default() -> Self {
Self {
latest: Default::default(),
old: Default::default(),
dead: Default::default()
}
}
}
#[allow(dead_code)]
impl<K: Hash + Eq + PartialEq + Clone, V: Clone, S: BuildHasher> CachedValMap<K, V, S> {
/// Insert a key, possibly reusing a value in the map.
///
/// Returns the reused value, if one was reused. If its `None`, then the value was retrieved
/// from running `val_fn`.
pub fn insert<F>(&mut self, key: K, mut val_fn: F) -> Option<V>
where
F: FnMut() -> V
{
if self.latest.contains_key(&key) {
self.latest.insert(key, val_fn());
None
} else {
let val = self.dead.pop_front()
.unwrap_or_else(val_fn);
self.latest.insert(key, val.clone());
Some(val)
}
}
/// Returns a reference to the value corresponding to the key.
pub fn get(&mut self, key: K) -> Option<&V> {
if let Some(v) = self.old.remove(&key) {
self.latest.insert(key.clone(), v);
}
self.latest.get(&key)
}
/// Keep a key alive without updating its value.
pub fn keep_alive(&mut self, key: K) {
if let Some(v) = self.old.remove(&key) {
self.latest.insert(key, v);
}
}
/// Returns `true` if the map contains a value for the specified key.
pub fn contains(&self, key: K) -> bool {
self.old.contains_key(&key) || self.latest.contains_key(&key)
}
/// Collects the now dead values for reuse.
///
/// This detects dead values by tracking which entries were not updated since the last time
/// update was ran.
pub fn update(&mut self) {
// drain the dead values into the dead queue
self.dead.extend(self.old.drain().map(|(_, v)| v));
// now drain the latest entries into the old entries
self.old.extend(self.latest.drain());
}
} }
/// A helper struct for managing the Transform buffers for meshes. /// A helper struct for managing the Transform buffers for meshes.
/// ///
/// This struct manages a "chain" of uniform buffers that store Transform for meshes. When /// This struct manages a "chain" of uniform buffers that store Transform for [`TransformGroup`]s.
/// first created it only has a single "chain-link" with a buffer that is the maximum length /// When first created it only has a single "chain-link" with a buffer that is the maximum length
/// the GPU supports. When the first buffer fills up, a new one should be created which will also /// the GPU supports. When the first buffer fills up, a new one should be created which will also
/// be the maximum length the GPU supports. When the new buffer fills up, a new one will be /// be the maximum length the GPU supports. When the new buffer fills up, a new one will be
/// created once again, and so on. /// created once again, and so on.
/// ///
/// `Uuid`'s are used to represent entries (usually Meshes) in the buffer. The Uuid's can be used /// [`TransformGroup`]s are used to represent entries in the buffer. They are used to insert,
/// to insert, update, and retrieve the transforms. /// update, and retrieve the transforms.
pub(crate) struct TransformBuffers { pub struct TransformBuffers {
pub bindgroup_layout: wgpu::BindGroupLayout, pub bindgroup_layout: wgpu::BindGroupLayout,
pub just_updated: HashMap<Uuid, TransformBufferIndices>, groups: CachedValMap<TransformGroupId, TransformIndex>,
pub not_updated: HashMap<Uuid, TransformBufferIndices>, entries: Vec<BufferEntry>,
pub dead_indices: VecDeque<TransformBufferIndices>, limits: wgpu::Limits,
pub next_indices: TransformBufferIndices, max_transform_count: usize,
/// (transform count, buffer, bindgroup)
pub buffer_bindgroups: Vec<TransformBufferEntry>,
/// The max amount of transforms in a buffer
pub max_transform_count: usize,
} }
impl TransformBuffers { impl TransformBuffers {
@ -72,13 +195,11 @@ impl TransformBuffers {
}); });
let mut s = Self { let mut s = Self {
max_transform_count: limits.max_uniform_buffer_binding_size as usize / (mem::size_of::<glam::Mat4>() * 2),
buffer_bindgroups: Vec::new(),
bindgroup_layout, bindgroup_layout,
just_updated: HashMap::new(), groups: Default::default(),
not_updated: HashMap::new(), entries: Default::default(),
dead_indices: VecDeque::new(), max_transform_count: (limits.max_uniform_buffer_binding_size / 2) as usize / (mem::size_of::<glam::Mat4>()),
next_indices: TransformBufferIndices::default(), limits,
}; };
// create the first uniform buffer // create the first uniform buffer
@ -87,106 +208,84 @@ impl TransformBuffers {
s s
} }
/// Update an transform in the buffer. /// Update an existing transform in the buffers.
/// ///
/// # Panics /// # Panics
/// Panics if the entity isn't stored, you can check if it is before with [`TransformBuffers:contains`]. /// Panics if the `entity_group` is not already inside of the buffers.
pub fn update_transform(&mut self, queue: &wgpu::Queue, limits: &Limits, uuid: Uuid, transform: glam::Mat4, normal_matrix: glam::Mat3) -> TransformBufferIndices { pub fn update_transform(&mut self, queue: &wgpu::Queue, limits: &Limits, entity_group: TransformGroup, transform: glam::Mat4, normal_matrix: glam::Mat3) -> TransformIndex {
let indices = self.not_updated.remove(&uuid) let index = *self.groups.get(entity_group.into())
.or_else(|| self.just_updated.remove(&uuid)) .expect("Use 'push_transform' for new entities");
.expect("Use 'insert_entity' for new entities"); let entry = self.entries.get_mut(index.entry_index).unwrap();
self.just_updated.insert(uuid, indices);
let normal_matrix = glam::Mat4::from_mat3(normal_matrix); let normal_matrix = glam::Mat4::from_mat3(normal_matrix);
let buffer = self.buffer_bindgroups.get(indices.buffer_index).unwrap(); // write the transform and normal to the end of the transform
let offset = Self::index_offset(limits, indices); let offset = Self::get_buffer_offset(limits, index) as _;
queue.write_buffer(&buffer.transform_buf, offset, bytemuck::bytes_of(&transform)); queue.write_buffer(&entry.transform_buffer, offset, bytemuck::bytes_of(&transform));
queue.write_buffer(&buffer.normal_mat_buf, offset, bytemuck::bytes_of(&normal_matrix)); queue.write_buffer(&entry.normal_buffer, offset, bytemuck::bytes_of(&normal_matrix));
indices
index
} }
/// Insert a new transform into the buffer, returns where in the buffer it was stored. /// Push a new transform into the buffers.
pub fn insert_transform(&mut self, queue: &wgpu::Queue, limits: &Limits, uuid: Uuid, transform: glam::Mat4, normal_matrix: glam::Mat3) -> TransformBufferIndices { pub fn push_transform(&mut self, queue: &wgpu::Queue, limits: &Limits, entity_group: TransformGroup, transform: glam::Mat4, normal_matrix: glam::Mat3) -> TransformIndex {
let indices = match self.dead_indices.pop_front() { self.groups.insert(entity_group.into(), || {
Some(indices) => indices, // this closure is only called when there are no values that can be reused,
None => { // so we get a brand new index at the end of the last entry in the chain.
let indices = &mut self.next_indices; let last = self.entries.last_mut().unwrap();
let this_idx = *indices;
let entry = self.buffer_bindgroups.get_mut(indices.buffer_index).unwrap();
if entry.len >= self.max_transform_count { // ensure the gpu buffer is not overflown
panic!("Transform buffer is filled and 'next_indices' was not incremented! Was a new buffer created?"); debug_assert!(last.len < self.max_transform_count,
} "Transform buffer is filled and 'next_indices' was not incremented! \
Was a new buffer created?");
entry.len += 1; let tidx = last.len;
indices.transform_index += 1; last.len += 1;
this_idx
TransformIndex {
entry_index: self.entries.len() - 1,
transform_index: tidx
} }
}; });
self.just_updated.insert(uuid, indices); self.update_transform(queue, limits, entity_group, transform, normal_matrix)
self.update_transform(queue, limits, uuid, transform, normal_matrix)
}
/// Update or insert a transform
pub fn update_or_insert<TFn>(&mut self, queue: &wgpu::Queue, limits: &Limits, uuid: Uuid, transform_fn: TFn) -> TransformBufferIndices
where TFn: Fn() -> (glam::Mat4, glam::Mat3)
{
let (tran, norm) = transform_fn();
if self.contains(uuid) {
self.update_transform(queue, limits, uuid, tran, norm)
} else {
self.insert_transform(queue, limits, uuid, tran, norm)
}
}
/// Returns true if the transform related to the `uuid` is stored (does not mean its up-to-date).
pub fn contains(&self, uuid: Uuid) -> bool {
self.not_updated.contains_key(&uuid) || self.just_updated.contains_key(&uuid)
} }
/// Collect the dead transforms and prepare self to check next time. /// Collect the dead transforms and prepare self to check next time.
pub fn tick(&mut self) { pub fn tick(&mut self) {
// take the dead entities, these were ones that were not updated this tick self.groups.update();
let dead: VecDeque<TransformBufferIndices> = self.not_updated.values().copied().collect();
self.dead_indices = dead;
self.not_updated = self.just_updated.clone();
self.just_updated.clear();
} }
/// Returns the offset for the transform index in the buffer /// Returns a boolean indicating if the buffer contains this group.
pub fn index_offset(limits: &Limits, indices: TransformBufferIndices) -> u64 { pub fn contains(&self, group: TransformGroup) -> bool {
indices.transform_index as u64 * limits.min_uniform_buffer_offset_alignment as u64 self.groups.contains(group.into())
} }
/// Returns whether or not the transform buffers should be expanded /// Update an existing transform group or if its not existing yet, pushes it to the buffer.
pub fn should_expand(&self) -> bool { ///
if let Some(entry) = self.buffer_bindgroups.last() { /// Returns: the index that the transform is at in the buffers.
entry.len >= self.max_transform_count pub fn update_or_push<F>(&mut self, queue: &wgpu::Queue, limits: &Limits, group: TransformGroup, transform_fn: F) -> TransformIndex
where F: Fn() -> (glam::Mat4, glam::Mat3)
{
let (transform, normal_matrix) = transform_fn();
if self.contains(group) {
self.update_transform(queue, limits, group, transform, normal_matrix)
} else { } else {
true self.push_transform(queue, limits, group, transform, normal_matrix)
} }
} }
/// Returns the bind group for the index
pub fn bind_group(&self, index: TransformBufferIndices) -> Option<&wgpu::BindGroup> {
self.buffer_bindgroups.get(index.buffer_index)
.map(|entry| &entry.bindgroup)
}
/// Expand the Transform buffers by adding another uniform buffer binding. /// Expand the Transform buffers by adding another uniform buffer binding.
/// ///
/// This object has a chain of uniform buffers, when the buffers are expanded, a new /// This object has a chain of uniform buffers, when the buffers are expanded, a new
/// "chain-link" is created. /// "chain-link" is created.
pub fn expand_buffers(&mut self, device: &wgpu::Device) { pub fn expand_buffers(&mut self, device: &wgpu::Device) {
let limits = device.limits(); let limits = device.limits();
let max_buffer_sizes = (limits.max_uniform_buffer_binding_size as u64) / 2; let max_buffer_sizes = self.max_transform_count as u64 * limits.min_uniform_buffer_offset_alignment as u64;
let transform_buffer = device.create_buffer( let transform_buffer = device.create_buffer(
&wgpu::BufferDescriptor { &wgpu::BufferDescriptor {
label: Some(&format!("B_Transform_{}", self.buffer_bindgroups.len())), label: Some(&format!("B_Transform_{}", self.entries.len())),
usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST, usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
size: max_buffer_sizes, size: max_buffer_sizes,
mapped_at_creation: false, mapped_at_creation: false,
@ -195,7 +294,7 @@ impl TransformBuffers {
let normal_mat_buffer = device.create_buffer( let normal_mat_buffer = device.create_buffer(
&wgpu::BufferDescriptor { &wgpu::BufferDescriptor {
label: Some(&format!("B_NormalMatrix_{}", self.buffer_bindgroups.len())), label: Some(&format!("B_NormalMatrix_{}", self.entries.len())),
usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST, usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
size: max_buffer_sizes, size: max_buffer_sizes,
mapped_at_creation: false, mapped_at_creation: false,
@ -234,17 +333,43 @@ impl TransformBuffers {
label: Some("BG_Transforms"), label: Some("BG_Transforms"),
}); });
let entry = TransformBufferEntry { let entry = BufferEntry {
bindgroup: transform_bind_group, bindgroup: transform_bind_group,
transform_buf: transform_buffer, transform_buffer,
normal_mat_buf: normal_mat_buffer, normal_buffer: normal_mat_buffer,
len: 0, len: 0,
}; };
self.buffer_bindgroups.push(entry); self.entries.push(entry);
} }
/// Returns the indices of the Transform /// Returns the bind group for the transform index.
pub fn transform_indices(&self, uuid: Uuid) -> Option<&TransformBufferIndices> { pub fn bind_group(&self, transform_id: TransformIndex) -> &wgpu::BindGroup {
self.just_updated.get(&uuid).or_else(|| self.not_updated.get(&uuid)) let entry = self.entries.get(transform_id.entry_index).unwrap();
&entry.bindgroup
}
/// Get the buffer offset for a transform using wgpu limits.
///
/// If its possible to borrow immutably, use [`TransformBuffers::buffer_offset`].
fn get_buffer_offset(limits: &wgpu::Limits, transform_index: TransformIndex) -> u32 {
transform_index.transform_index as u32 * limits.min_uniform_buffer_offset_alignment as u32
}
/// Returns the offset of the transform inside the bind group buffer.
///
/// ```nobuild
/// let bindgroup = transform_buffers.bind_group(job.transform_id);
/// let offset = transform_buffers.buffer_offset(job.transform_id);
/// render_pass.set_bind_group(1, bindgroup, &[ offset, offset, ]);
/// ```
pub fn buffer_offset(&self, transform_index: TransformIndex) -> u32 {
Self::get_buffer_offset(&self.limits, transform_index)
} }
} }
#[repr(C)]
#[derive(Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
struct TransformNormalMatPair {
transform: glam::Mat4,
normal_mat: glam::Mat4,
}