Some code cleaup of the renderer file
ci/woodpecker/push/build Pipeline was successful
Details
ci/woodpecker/push/build Pipeline was successful
Details
This commit is contained in:
parent
6b935739ef
commit
31799cae05
|
@ -120,7 +120,7 @@ async fn main() {
|
||||||
game.world().insert_resource(TpsAccumulator(0.0));
|
game.world().insert_resource(TpsAccumulator(0.0));
|
||||||
|
|
||||||
let mut sys = BatchedSystem::new();
|
let mut sys = BatchedSystem::new();
|
||||||
sys.with_criteria(FixedTimestep::new(60));
|
sys.with_criteria(FixedTimestep::new(30));
|
||||||
sys.with_system(spin_system);
|
sys.with_system(spin_system);
|
||||||
sys.with_system(fps_system);
|
sys.with_system(fps_system);
|
||||||
|
|
||||||
|
|
|
@ -9,4 +9,5 @@ pub mod texture;
|
||||||
pub mod shader_loader;
|
pub mod shader_loader;
|
||||||
pub mod material;
|
pub mod material;
|
||||||
pub mod camera;
|
pub mod camera;
|
||||||
pub mod window;
|
pub mod window;
|
||||||
|
pub mod transform_buffer_storage;
|
|
@ -20,6 +20,7 @@ use crate::math::Transform;
|
||||||
use super::camera::RenderCamera;
|
use super::camera::RenderCamera;
|
||||||
use super::desc_buf_lay::DescVertexBufferLayout;
|
use super::desc_buf_lay::DescVertexBufferLayout;
|
||||||
use super::texture::RenderTexture;
|
use super::texture::RenderTexture;
|
||||||
|
use super::transform_buffer_storage::{TransformBufferIndices, TransformBuffers};
|
||||||
use super::vertex::Vertex;
|
use super::vertex::Vertex;
|
||||||
use super::{render_pipeline::FullRenderPipeline, render_buffer::BufferStorage, render_job::RenderJob};
|
use super::{render_pipeline::FullRenderPipeline, render_buffer::BufferStorage, render_job::RenderJob};
|
||||||
|
|
||||||
|
@ -34,7 +35,7 @@ pub trait Renderer {
|
||||||
fn add_render_pipeline(&mut self, shader_id: u64, pipeline: Arc<FullRenderPipeline>);
|
fn add_render_pipeline(&mut self, shader_id: u64, pipeline: Arc<FullRenderPipeline>);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct RenderBufferStorage {
|
struct MeshBufferStorage {
|
||||||
buffer_vertex: BufferStorage,
|
buffer_vertex: BufferStorage,
|
||||||
buffer_indices: Option<(wgpu::IndexFormat, BufferStorage)>,
|
buffer_indices: Option<(wgpu::IndexFormat, BufferStorage)>,
|
||||||
|
|
||||||
|
@ -47,105 +48,6 @@ struct RenderBufferStorage {
|
||||||
transform_index: TransformBufferIndices,
|
transform_index: TransformBufferIndices,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
|
|
||||||
struct TransformBufferIndices {
|
|
||||||
buffer_index: usize,
|
|
||||||
transform_index: usize,
|
|
||||||
}
|
|
||||||
|
|
||||||
struct TransformBuffers {
|
|
||||||
//transform_layout: wgpu::BindGroupLayout,
|
|
||||||
/// A vector storing the EntityId and
|
|
||||||
just_updated: HashMap<EntityId, TransformBufferIndices>,
|
|
||||||
not_updated: HashMap<EntityId, TransformBufferIndices>,
|
|
||||||
dead_indices: VecDeque<TransformBufferIndices>,
|
|
||||||
next_indices: TransformBufferIndices,
|
|
||||||
/// (transform count, buffer, bindgroup)
|
|
||||||
buffer_bindgroups: Vec<(usize, wgpu::Buffer, wgpu::BindGroup)>,
|
|
||||||
/// The max amount of transforms in a buffer
|
|
||||||
max_transform_count: usize,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl TransformBuffers {
|
|
||||||
/// Update an entity's buffer with the new transform. Will panic if the entity isn't stored
|
|
||||||
fn update_entity(&mut self, queue: &wgpu::Queue, limits: &Limits, entity: EntityId, transform: glam::Mat4) -> TransformBufferIndices {
|
|
||||||
let indices = self.not_updated.remove(&entity)
|
|
||||||
.or_else(|| self.just_updated.remove(&entity))
|
|
||||||
.expect("Use 'insert_entity' for new entities");
|
|
||||||
self.just_updated.insert(entity, indices);
|
|
||||||
|
|
||||||
let (_, buffer, _) = self.buffer_bindgroups.get(indices.buffer_index).unwrap();
|
|
||||||
queue.write_buffer(buffer, indices.transform_index as u64 * limits.min_uniform_buffer_offset_alignment as u64, bytemuck::bytes_of(&transform));
|
|
||||||
indices
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Insert a new entity into the buffer, returns where it was stored.
|
|
||||||
fn insert_entity(&mut self, queue: &wgpu::Queue, limits: &Limits, entity: EntityId, transform: glam::Mat4) -> TransformBufferIndices {
|
|
||||||
// get a dead index, or create a new one
|
|
||||||
let (indices, buffer) = if let Some(index) = self.dead_indices.pop_front() {
|
|
||||||
let (_, buffer, _) = self.buffer_bindgroups.get(index.buffer_index).unwrap();
|
|
||||||
(index, buffer)
|
|
||||||
} else {
|
|
||||||
let indices = &mut self.next_indices;
|
|
||||||
let this_idx = *indices;
|
|
||||||
let (count, buffer, _) = self.buffer_bindgroups.get_mut(indices.buffer_index).unwrap();
|
|
||||||
|
|
||||||
if *count >= self.max_transform_count {
|
|
||||||
panic!("Transform buffer is filled and 'next_indices' was not incremented! Was a new buffer created?");
|
|
||||||
}
|
|
||||||
|
|
||||||
*count += 1;
|
|
||||||
indices.transform_index += 1;
|
|
||||||
|
|
||||||
(this_idx, &*buffer)
|
|
||||||
};
|
|
||||||
|
|
||||||
queue.write_buffer(buffer, Self::get_offset_for(limits, indices), bytemuck::bytes_of(&transform));
|
|
||||||
|
|
||||||
self.just_updated.insert(entity, indices);
|
|
||||||
indices
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Update or insert an entities transform
|
|
||||||
fn update_or_insert<TFn>(&mut self, queue: &wgpu::Queue, limits: &Limits, entity: EntityId, transform_fn: TFn) -> TransformBufferIndices
|
|
||||||
where TFn: Fn() -> glam::Mat4
|
|
||||||
{
|
|
||||||
if self.contains(entity) {
|
|
||||||
self.update_entity(queue, limits, entity, transform_fn())
|
|
||||||
} else {
|
|
||||||
self.insert_entity(queue, limits, entity, transform_fn())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns true if the entity's transform is stored (does not mean its up-to-date).
|
|
||||||
fn contains(&self, entity: EntityId) -> bool {
|
|
||||||
self.not_updated.contains_key(&entity) || self.just_updated.contains_key(&entity)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Collect the dead entities, mark entities and not updated for next updates.
|
|
||||||
fn tick(&mut self) {
|
|
||||||
// take the dead entities, these were ones that were not updated this tick
|
|
||||||
let dead: VecDeque<TransformBufferIndices> = self.not_updated.values().copied().collect();
|
|
||||||
self.dead_indices = dead;
|
|
||||||
|
|
||||||
self.not_updated = self.just_updated.clone();
|
|
||||||
self.just_updated.clear();
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_offset_for(limits: &Limits, indices: TransformBufferIndices) -> u64 {
|
|
||||||
indices.transform_index as u64 * limits.min_uniform_buffer_offset_alignment as u64
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns whether or not the transform buffers should be expanded
|
|
||||||
fn should_expand(&self) -> bool {
|
|
||||||
if let Some(( count, _, _ )) = self.buffer_bindgroups.last() {
|
|
||||||
*count >= self.max_transform_count
|
|
||||||
} else {
|
|
||||||
true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct BasicRenderer {
|
pub struct BasicRenderer {
|
||||||
pub surface: wgpu::Surface,
|
pub surface: wgpu::Surface,
|
||||||
pub device: wgpu::Device,
|
pub device: wgpu::Device,
|
||||||
|
@ -159,7 +61,7 @@ pub struct BasicRenderer {
|
||||||
pub render_pipelines: HashMap<u64, Arc<FullRenderPipeline>>,
|
pub render_pipelines: HashMap<u64, Arc<FullRenderPipeline>>,
|
||||||
pub render_jobs: VecDeque<RenderJob>,
|
pub render_jobs: VecDeque<RenderJob>,
|
||||||
|
|
||||||
mesh_buffers: HashMap<uuid::Uuid, RenderBufferStorage>, // TODO: clean up left over buffers from deleted entities/components
|
mesh_buffers: HashMap<uuid::Uuid, MeshBufferStorage>, // TODO: clean up left over buffers from deleted entities/components
|
||||||
entity_meshes: HashMap<EntityId, uuid::Uuid>,
|
entity_meshes: HashMap<EntityId, uuid::Uuid>,
|
||||||
|
|
||||||
transform_buffers: TransformBuffers,
|
transform_buffers: TransformBuffers,
|
||||||
|
@ -506,7 +408,7 @@ impl BasicRenderer {
|
||||||
( vertex_buffer, indices )
|
( vertex_buffer, indices )
|
||||||
}
|
}
|
||||||
|
|
||||||
fn create_mesh_buffers(&mut self, mesh: &Mesh, transform_indices: TransformBufferIndices) -> RenderBufferStorage {
|
fn create_mesh_buffers(&mut self, mesh: &Mesh, transform_indices: TransformBufferIndices) -> MeshBufferStorage {
|
||||||
let (vertex_buffer, buffer_indices) = self.create_vertex_index_buffers(mesh);
|
let (vertex_buffer, buffer_indices) = self.create_vertex_index_buffers(mesh);
|
||||||
|
|
||||||
let diffuse_bindgroup = if let Some(model_texture) = &mesh.material().base_color_texture {
|
let diffuse_bindgroup = if let Some(model_texture) = &mesh.material().base_color_texture {
|
||||||
|
@ -535,7 +437,7 @@ impl BasicRenderer {
|
||||||
None
|
None
|
||||||
};
|
};
|
||||||
|
|
||||||
RenderBufferStorage {
|
MeshBufferStorage {
|
||||||
buffer_vertex: vertex_buffer,
|
buffer_vertex: vertex_buffer,
|
||||||
buffer_indices,
|
buffer_indices,
|
||||||
render_texture: None,
|
render_texture: None,
|
||||||
|
|
|
@ -0,0 +1,103 @@
|
||||||
|
use std::collections::{VecDeque, HashMap};
|
||||||
|
|
||||||
|
use edict::EntityId;
|
||||||
|
use wgpu::Limits;
|
||||||
|
|
||||||
|
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
|
||||||
|
pub(crate) struct TransformBufferIndices {
|
||||||
|
pub buffer_index: usize,
|
||||||
|
pub transform_index: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) struct TransformBuffers {
|
||||||
|
//transform_layout: wgpu::BindGroupLayout,
|
||||||
|
/// A vector storing the EntityId and
|
||||||
|
pub just_updated: HashMap<EntityId, TransformBufferIndices>,
|
||||||
|
pub not_updated: HashMap<EntityId, TransformBufferIndices>,
|
||||||
|
pub dead_indices: VecDeque<TransformBufferIndices>,
|
||||||
|
pub next_indices: TransformBufferIndices,
|
||||||
|
/// (transform count, buffer, bindgroup)
|
||||||
|
pub buffer_bindgroups: Vec<(usize, wgpu::Buffer, wgpu::BindGroup)>,
|
||||||
|
/// The max amount of transforms in a buffer
|
||||||
|
pub max_transform_count: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TransformBuffers {
|
||||||
|
/// Update an entity's buffer with the new transform. Will panic if the entity isn't stored
|
||||||
|
pub fn update_entity(&mut self, queue: &wgpu::Queue, limits: &Limits, entity: EntityId, transform: glam::Mat4) -> TransformBufferIndices {
|
||||||
|
let indices = self.not_updated.remove(&entity)
|
||||||
|
.or_else(|| self.just_updated.remove(&entity))
|
||||||
|
.expect("Use 'insert_entity' for new entities");
|
||||||
|
self.just_updated.insert(entity, indices);
|
||||||
|
|
||||||
|
let (_, buffer, _) = self.buffer_bindgroups.get(indices.buffer_index).unwrap();
|
||||||
|
queue.write_buffer(buffer, indices.transform_index as u64 * limits.min_uniform_buffer_offset_alignment as u64, bytemuck::bytes_of(&transform));
|
||||||
|
indices
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Insert a new entity into the buffer, returns where it was stored.
|
||||||
|
pub fn insert_entity(&mut self, queue: &wgpu::Queue, limits: &Limits, entity: EntityId, transform: glam::Mat4) -> TransformBufferIndices {
|
||||||
|
// get a dead index, or create a new one
|
||||||
|
let (indices, buffer) = if let Some(index) = self.dead_indices.pop_front() {
|
||||||
|
let (_, buffer, _) = self.buffer_bindgroups.get(index.buffer_index).unwrap();
|
||||||
|
(index, buffer)
|
||||||
|
} else {
|
||||||
|
let indices = &mut self.next_indices;
|
||||||
|
let this_idx = *indices;
|
||||||
|
let (count, buffer, _) = self.buffer_bindgroups.get_mut(indices.buffer_index).unwrap();
|
||||||
|
|
||||||
|
if *count >= self.max_transform_count {
|
||||||
|
panic!("Transform buffer is filled and 'next_indices' was not incremented! Was a new buffer created?");
|
||||||
|
}
|
||||||
|
|
||||||
|
*count += 1;
|
||||||
|
indices.transform_index += 1;
|
||||||
|
|
||||||
|
(this_idx, &*buffer)
|
||||||
|
};
|
||||||
|
|
||||||
|
queue.write_buffer(buffer, Self::get_offset_for(limits, indices), bytemuck::bytes_of(&transform));
|
||||||
|
|
||||||
|
self.just_updated.insert(entity, indices);
|
||||||
|
indices
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Update or insert an entities transform
|
||||||
|
pub fn update_or_insert<TFn>(&mut self, queue: &wgpu::Queue, limits: &Limits, entity: EntityId, transform_fn: TFn) -> TransformBufferIndices
|
||||||
|
where TFn: Fn() -> glam::Mat4
|
||||||
|
{
|
||||||
|
if self.contains(entity) {
|
||||||
|
self.update_entity(queue, limits, entity, transform_fn())
|
||||||
|
} else {
|
||||||
|
self.insert_entity(queue, limits, entity, transform_fn())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns true if the entity's transform is stored (does not mean its up-to-date).
|
||||||
|
pub fn contains(&self, entity: EntityId) -> bool {
|
||||||
|
self.not_updated.contains_key(&entity) || self.just_updated.contains_key(&entity)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Collect the dead entities, mark entities and not updated for next updates.
|
||||||
|
pub fn tick(&mut self) {
|
||||||
|
// take the dead entities, these were ones that were not updated this tick
|
||||||
|
let dead: VecDeque<TransformBufferIndices> = self.not_updated.values().copied().collect();
|
||||||
|
self.dead_indices = dead;
|
||||||
|
|
||||||
|
self.not_updated = self.just_updated.clone();
|
||||||
|
self.just_updated.clear();
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_offset_for(limits: &Limits, indices: TransformBufferIndices) -> u64 {
|
||||||
|
indices.transform_index as u64 * limits.min_uniform_buffer_offset_alignment as u64
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns whether or not the transform buffers should be expanded
|
||||||
|
pub fn should_expand(&self) -> bool {
|
||||||
|
if let Some(( count, _, _ )) = self.buffer_bindgroups.last() {
|
||||||
|
*count >= self.max_transform_count
|
||||||
|
} else {
|
||||||
|
true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
Loading…
Reference in New Issue