101 lines
3.6 KiB
Rust
101 lines
3.6 KiB
Rust
use std::{collections::VecDeque, sync::Arc};
|
|
|
|
use tracing::instrument;
|
|
|
|
use super::{RenderGraphLabel, RenderGraphLabelValue};
|
|
|
|
/// A queued write to a GPU buffer targeting a graph slot.
|
|
pub(crate) struct GraphBufferWrite {
|
|
/// The name of the slot that has the resource that will be written
|
|
pub(crate) target_slot: RenderGraphLabelValue,
|
|
pub(crate) offset: u64,
|
|
pub(crate) bytes: Vec<u8>,
|
|
}
|
|
|
|
#[allow(dead_code)]
|
|
pub struct RenderGraphContext<'a> {
|
|
/// The [`wgpu::CommandEncoder`] used to encode GPU operations.
|
|
///
|
|
/// This is `None` during the `prepare` stage.
|
|
pub encoder: Option<wgpu::CommandEncoder>,
|
|
/// The gpu device that is being used.
|
|
pub device: Arc<wgpu::Device>,
|
|
pub queue: Arc<wgpu::Queue>,
|
|
pub(crate) buffer_writes: VecDeque<GraphBufferWrite>,
|
|
renderpass_desc: Vec<wgpu::RenderPassDescriptor<'a>>,
|
|
/// The label of this Node.
|
|
pub label: RenderGraphLabelValue,
|
|
}
|
|
|
|
impl<'a> RenderGraphContext<'a> {
|
|
pub(crate) fn new(device: Arc<wgpu::Device>, queue: Arc<wgpu::Queue>, encoder: Option<wgpu::CommandEncoder>, label: RenderGraphLabelValue) -> Self {
|
|
Self {
|
|
encoder,
|
|
device,
|
|
queue,
|
|
buffer_writes: Default::default(),
|
|
renderpass_desc: vec![],
|
|
label,
|
|
}
|
|
}
|
|
|
|
pub fn begin_render_pass(
|
|
&'a mut self,
|
|
desc: wgpu::RenderPassDescriptor<'a>,
|
|
) -> wgpu::RenderPass<'a> {
|
|
self.encoder
|
|
.as_mut()
|
|
.expect(
|
|
"RenderGraphContext is missing a command encoder. This is likely \
|
|
because you are trying to run render commands in the prepare stage.",
|
|
)
|
|
.begin_render_pass(&desc)
|
|
}
|
|
|
|
pub fn begin_compute_pass(&mut self, desc: &wgpu::ComputePassDescriptor) -> wgpu::ComputePass {
|
|
self.encoder
|
|
.as_mut()
|
|
.expect(
|
|
"RenderGraphContext is missing a command encoder. This is likely \
|
|
because you are trying to run render commands in the prepare stage.",
|
|
)
|
|
.begin_compute_pass(desc)
|
|
}
|
|
|
|
/// Queue a data write to a buffer at that is contained in `target_slot`.
|
|
///
|
|
/// This does not submit the data to the GPU immediately, or add it to the `wgpu::Queue`. The
|
|
/// data will be submitted to the GPU queue right after the prepare stage for all passes
|
|
/// is ran.
|
|
#[instrument(skip(self, bytes), level="trace", fields(size = bytes.len()))]
|
|
pub fn queue_buffer_write(&mut self, target_slot: impl RenderGraphLabel, offset: u64, bytes: &[u8]) {
|
|
self.buffer_writes.push_back(GraphBufferWrite {
|
|
target_slot: target_slot.into(),
|
|
offset,
|
|
bytes: bytes.to_vec(),
|
|
})
|
|
}
|
|
|
|
/// Queue a data write of a type that to a buffer at that is contained in `target_slot`.
|
|
#[instrument(skip(self, bytes), level="trace", fields(size = std::mem::size_of::<T>()))]
|
|
pub fn queue_buffer_write_with<T: bytemuck::NoUninit>(
|
|
&mut self,
|
|
target_slot: impl RenderGraphLabel,
|
|
offset: u64,
|
|
bytes: T,
|
|
) {
|
|
self.queue_buffer_write(target_slot, offset, bytemuck::bytes_of(&bytes));
|
|
}
|
|
|
|
/// Submit the encoder to the gpu queue.
|
|
///
|
|
/// The `encoder` of this context will be `None` until the next node is executed, then another
|
|
/// one will be made. You likely don't need to run this yourself until you are manually
|
|
/// presenting a surface texture.
|
|
pub fn submit_encoder(&mut self) {
|
|
let en = self.encoder.take()
|
|
.unwrap()
|
|
.finish();
|
|
self.queue.submit(std::iter::once(en));
|
|
}
|
|
}
|