Compare commits
2 Commits
main
...
feature/sc
Author | SHA1 | Date |
---|---|---|
SeanOMik | 35815fa019 | |
SeanOMik | b51f1e16ef |
|
@ -1,36 +0,0 @@
|
|||
name: CI
|
||||
|
||||
env:
|
||||
# Runners don't expose the TSC but we want to make sure these tests work, so we
|
||||
# can ignore it.
|
||||
TRACY_NO_INVARIANT_CHECK: 1
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: docker
|
||||
container: git.seanomik.net/seanomik/rust-nightly:2023-11-21-bookworm
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
submodules: true
|
||||
|
||||
- name: Install system dependencies
|
||||
run: |
|
||||
apt update
|
||||
apt install libudev-dev lua5.4 liblua5.4-dev -y
|
||||
|
||||
- name: Build
|
||||
run: |
|
||||
cargo build
|
||||
|
||||
- name: Test
|
||||
run: |
|
||||
cargo test --all
|
|
@ -1,6 +1,3 @@
|
|||
[submodule "lyra-scripting/elua"]
|
||||
path = crates/lyra-scripting/elua
|
||||
path = lyra-scripting/elua
|
||||
url = ../elua.git # git@git.seanomik.net:SeanOMik/elua.git
|
||||
[submodule "wgsl-preprocessor"]
|
||||
path = crates/wgsl-preprocessor
|
||||
url = git@git.seanomik.net:SeanOMik/wgsl-preprocessor.git
|
||||
|
|
|
@ -1,29 +0,0 @@
|
|||
# The run config is used for both run mode and debug mode
|
||||
|
||||
[[configs]]
|
||||
# the name of this task
|
||||
name = "Example 'simple_scene'"
|
||||
|
||||
# the type of the debugger. If not set, it can't be debugged but can still be run
|
||||
type = "lldb"
|
||||
|
||||
# the program to run
|
||||
program = "../../target/debug/simple_scene"
|
||||
|
||||
# the program arguments, e.g. args = ["arg1", "arg2"], optional
|
||||
# args = []
|
||||
|
||||
# current working directory, optional
|
||||
cwd = "${workspace}/examples/simple_scene"
|
||||
|
||||
# environment variables, optional
|
||||
# [configs.env]
|
||||
# VAR1 = "VAL1"
|
||||
# VAR2 = "VAL2"
|
||||
|
||||
# task to run before the run/debug session is started, optional
|
||||
[configs.prelaunch]
|
||||
program = "cargo"
|
||||
args = [
|
||||
"build",
|
||||
]
|
|
@ -4,24 +4,6 @@
|
|||
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
|
||||
"version": "0.2.0",
|
||||
"configurations": [
|
||||
{
|
||||
"type": "lldb",
|
||||
"request": "launch",
|
||||
"name": "Debug lyra lua-scripting",
|
||||
"cargo": {
|
||||
"args": [
|
||||
"build",
|
||||
"--manifest-path", "${workspaceFolder}/examples/lua-scripting/Cargo.toml"
|
||||
//"--bin=testbed",
|
||||
],
|
||||
"filter": {
|
||||
"name": "lua-scripting",
|
||||
"kind": "bin"
|
||||
}
|
||||
},
|
||||
"args": [],
|
||||
"cwd": "${workspaceFolder}/examples/lua-scripting"
|
||||
},
|
||||
{
|
||||
"type": "lldb",
|
||||
"request": "launch",
|
||||
|
@ -40,42 +22,6 @@
|
|||
"args": [],
|
||||
"cwd": "${workspaceFolder}/examples/testbed"
|
||||
},
|
||||
{
|
||||
"type": "lldb",
|
||||
"request": "launch",
|
||||
"name": "Debug lyra shadows",
|
||||
"cargo": {
|
||||
"args": [
|
||||
"build",
|
||||
"--manifest-path", "${workspaceFolder}/examples/shadows/Cargo.toml"
|
||||
//"--bin=shadows",
|
||||
],
|
||||
"filter": {
|
||||
"name": "shadows",
|
||||
"kind": "bin"
|
||||
}
|
||||
},
|
||||
"args": [],
|
||||
"cwd": "${workspaceFolder}/examples/shadows"
|
||||
},
|
||||
{
|
||||
"type": "lldb",
|
||||
"request": "launch",
|
||||
"name": "Debug example simple_scene",
|
||||
"cargo": {
|
||||
"args": [
|
||||
"build",
|
||||
"--manifest-path", "${workspaceFolder}/examples/simple_scene/Cargo.toml"
|
||||
//"--bin=testbed",
|
||||
],
|
||||
"filter": {
|
||||
"name": "simple_scene",
|
||||
"kind": "bin"
|
||||
}
|
||||
},
|
||||
"args": [],
|
||||
"cwd": "${workspaceFolder}/examples/simple_scene"
|
||||
},
|
||||
{
|
||||
"type": "lldb",
|
||||
"request": "launch",
|
||||
|
|
|
@ -0,0 +1,21 @@
|
|||
variables:
|
||||
- &rust_image 'git.seanomik.net/seanomik/rust-nightly:2023-11-21-bookworm'
|
||||
|
||||
when:
|
||||
event: [push, manual, pull_request]
|
||||
branch: main
|
||||
|
||||
steps:
|
||||
Build - Debug:
|
||||
image: *rust_image
|
||||
commands:
|
||||
- apt update
|
||||
- apt install libudev-dev lua5.4 liblua5.4-dev -y
|
||||
- cargo build
|
||||
|
||||
Test - Debug:
|
||||
image: *rust_image
|
||||
commands:
|
||||
- apt update
|
||||
- apt install libudev-dev lua5.4 liblua5.4-dev -y
|
||||
- cargo test --all
|
|
@ -0,0 +1,20 @@
|
|||
variables:
|
||||
- &rust_image 'git.seanomik.net/seanomik/rust-nightly:2023-11-21-bookworm'
|
||||
|
||||
when:
|
||||
event: [release, pull_request, manual]
|
||||
|
||||
steps:
|
||||
Build - Release:
|
||||
image: *rust_image
|
||||
commands:
|
||||
- apt update
|
||||
- apt install libudev-dev lua5.4 liblua5.4-dev -y
|
||||
- cargo build --release
|
||||
|
||||
Test - Release:
|
||||
image: *rust_image
|
||||
commands:
|
||||
- apt update
|
||||
- apt install libudev-dev lua5.4 liblua5.4-dev -y
|
||||
- cargo test --all --release
|
File diff suppressed because it is too large
Load Diff
25
Cargo.toml
25
Cargo.toml
|
@ -5,28 +5,17 @@ edition = "2021"
|
|||
|
||||
[workspace]
|
||||
members = [
|
||||
"crates/*",
|
||||
|
||||
"examples/2d",
|
||||
"examples/fixed-timestep-rotating-model",
|
||||
"examples/lua-scripting",
|
||||
"examples/many-lights",
|
||||
"examples/shadows",
|
||||
"examples/simple_scene",
|
||||
"examples/testbed",
|
||||
]
|
||||
"lyra-resource",
|
||||
"lyra-ecs",
|
||||
"lyra-reflect",
|
||||
"lyra-scripting",
|
||||
"lyra-game", "lyra-math", "lyra-scene"]
|
||||
|
||||
[features]
|
||||
scripting = ["dep:lyra-scripting"]
|
||||
lua_scripting = ["scripting", "lyra-scripting/lua"]
|
||||
tracy = ["lyra-game/tracy"]
|
||||
|
||||
[dependencies]
|
||||
lyra-game = { path = "crates/lyra-game" }
|
||||
lyra-scripting = { path = "crates/lyra-scripting", optional = true }
|
||||
|
||||
#[profile.dev]
|
||||
#opt-level = 1
|
||||
|
||||
[profile.release]
|
||||
debug = true
|
||||
lyra-game = { path = "lyra-game" }
|
||||
lyra-scripting = { path = "lyra-scripting", optional = true }
|
||||
|
|
|
@ -1,272 +0,0 @@
|
|||
use std::{any::Any, cell::RefMut, mem::{self, MaybeUninit}, ptr::{self, NonNull}};
|
||||
|
||||
use crate::{system::FnArgFetcher, Access, Bundle, Entities, Entity, World};
|
||||
|
||||
/// A Command be used to delay mutation of the world until after this system is ran.
|
||||
pub trait Command: Any {
|
||||
fn as_any_boxed(self: Box<Self>) -> Box<dyn Any>;
|
||||
|
||||
/// Executes the command
|
||||
fn run(self, world: &mut World);
|
||||
}
|
||||
|
||||
impl<F> Command for F
|
||||
where
|
||||
F: FnOnce(&mut World) + 'static
|
||||
{
|
||||
fn as_any_boxed(self: Box<Self>) -> Box<dyn Any> {
|
||||
self
|
||||
}
|
||||
|
||||
fn run(self, world: &mut World) {
|
||||
self(world)
|
||||
}
|
||||
}
|
||||
|
||||
type RunCommand = unsafe fn(cmd: *mut (), world: Option<&mut World>) -> usize;
|
||||
|
||||
#[repr(C, packed)]
|
||||
struct PackedCommand<T: Command> {
|
||||
run: RunCommand,
|
||||
cmd: T,
|
||||
}
|
||||
|
||||
/// Stores a queue of commands that will get executed after the system is ran.
|
||||
///
|
||||
/// This struct can be inserted as a resource into the world, and the commands will be
|
||||
/// executed by the [`GraphExecutor`](crate::system::GraphExecutor) after the system is executed.
|
||||
#[derive(Default)]
|
||||
pub struct CommandQueue {
|
||||
data: Vec<MaybeUninit<u8>>,
|
||||
}
|
||||
|
||||
impl CommandQueue {
|
||||
/// Execute the commands in the queue.
|
||||
///
|
||||
/// If `world` is `None`, the commands will just be dropped and the memory freed.
|
||||
fn execute(&mut self, mut world: Option<&mut World>) {
|
||||
let range = self.data.as_mut_ptr_range();
|
||||
let mut current = range.start;
|
||||
let end = range.end;
|
||||
|
||||
while current < end {
|
||||
// Retrieve the runner for the command.
|
||||
// Safety: current pointer will either be the start of the buffer, or at the start of a new PackedCommand
|
||||
let run_fn = unsafe { current.cast::<RunCommand>().read_unaligned() };
|
||||
|
||||
// Retrieves the pointer to the command which is just after RunCommand due to PackedCommand.
|
||||
// Safety: PackedCommand is repr C and packed, so it will be right after the RunCommand.
|
||||
current = unsafe { current.add(mem::size_of::<RunCommand>()) };
|
||||
|
||||
// Now run the command, providing the type erased pointer to the command.
|
||||
let read_size = unsafe { run_fn(current.cast(), world.as_deref_mut()) };
|
||||
|
||||
// The pointer is added to so that it is just after the command that was ran.
|
||||
// Safety: the RunCommand returns the size of the command
|
||||
current = unsafe { current.add(read_size) };
|
||||
}
|
||||
|
||||
// Safety: all of the commands were just read from the pointers.
|
||||
unsafe { self.data.set_len(0) };
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for CommandQueue {
|
||||
fn drop(&mut self) {
|
||||
if !self.data.is_empty() {
|
||||
println!("CommandQueue has commands but is being dropped");
|
||||
}
|
||||
|
||||
self.execute(None);
|
||||
}
|
||||
}
|
||||
|
||||
/// Used in a system to queue up commands that will run right after this system.
|
||||
///
|
||||
/// This can be used to delay the mutation of the world until after the system is ran. These
|
||||
/// must be used if you're mutating the world inside a [`View`](crate::query::View).
|
||||
///
|
||||
/// ```nobuild
|
||||
/// fn particle_spawner_system(
|
||||
/// commands: Commands,
|
||||
/// view: View<(&Campfire, &Transform)>
|
||||
/// ) -> anyhow::Result<()> {
|
||||
/// for (campfire, pos) in view.iter() {
|
||||
/// // If you do not use commands to spawn this, the next iteration
|
||||
/// // of the view will cause a segfault.
|
||||
/// commands.spawn((pos, Particle::new(/* ... */)));
|
||||
/// }
|
||||
///
|
||||
/// Ok(())
|
||||
/// }
|
||||
/// ```
|
||||
pub struct Commands<'a, 'b> {
|
||||
queue: &'b mut CommandQueue,
|
||||
entities: &'a mut Entities,
|
||||
}
|
||||
|
||||
impl<'a, 'b> Commands<'a, 'b> {
|
||||
pub fn new(queue: &'b mut CommandQueue, world: &'a mut World) -> Self {
|
||||
Self {
|
||||
queue,
|
||||
entities: &mut world.entities,
|
||||
}
|
||||
}
|
||||
|
||||
/// Add a command to the end of the command queue
|
||||
pub fn add<C: Command>(&mut self, cmd: C) {
|
||||
let run_fn = |cmd_ptr: *mut (), world: Option<&mut World>| {
|
||||
// Safety: the pointer is a type-erased pointer to the command. The pointer is read
|
||||
// then dropped out of scope, this closure will not be ran again so no use-after-free
|
||||
// will occur.
|
||||
let cmd: C = unsafe { ptr::read_unaligned(cmd_ptr.cast::<C>()) };
|
||||
match world {
|
||||
Some(world) => cmd.run(world),
|
||||
None => {} // cmd just gets dropped
|
||||
}
|
||||
|
||||
// the size of the command must be returned to increment the pointer when applying
|
||||
// the command queue.
|
||||
mem::size_of::<C>()
|
||||
};
|
||||
|
||||
let data = &mut self.queue.data;
|
||||
|
||||
// Reserve enough bytes from the vec to store the packed command and its run fn.
|
||||
let old_len = data.len();
|
||||
data.reserve(mem::size_of::<PackedCommand<C>>());
|
||||
|
||||
// Get a pointer to the end of the packed data. Safe since we just reserved enough memory
|
||||
// to store this command.
|
||||
let end_ptr = unsafe { data.as_mut_ptr().add(old_len) };
|
||||
|
||||
unsafe {
|
||||
// write the command and its runner into the buffer
|
||||
end_ptr.cast::<PackedCommand<C>>()
|
||||
// written unaligned to keep everything packed
|
||||
.write_unaligned(PackedCommand {
|
||||
run: run_fn,
|
||||
cmd,
|
||||
});
|
||||
|
||||
// we wrote to the vec's buffer without using its api, so we need manually
|
||||
// set the length of the vec.
|
||||
data.set_len(old_len + mem::size_of::<PackedCommand<C>>());
|
||||
}
|
||||
}
|
||||
|
||||
/// Spawn an entity into the World. See [`World::spawn`]
|
||||
pub fn spawn<B: Bundle + 'static>(&mut self, bundle: B) -> Entity {
|
||||
let e = self.entities.reserve();
|
||||
|
||||
self.add(move |world: &mut World| {
|
||||
world.spawn_into(e, bundle);
|
||||
});
|
||||
|
||||
e
|
||||
}
|
||||
|
||||
/// Execute all commands in the queue, in order of insertion
|
||||
pub fn execute(&mut self, world: &mut World) {
|
||||
self.queue.execute(Some(world));
|
||||
}
|
||||
}
|
||||
|
||||
impl FnArgFetcher for Commands<'_, '_> {
|
||||
type State = CommandQueue;
|
||||
type Arg<'a, 'state> = Commands<'a, 'state>;
|
||||
|
||||
fn world_access(&self) -> Access {
|
||||
Access::Write
|
||||
}
|
||||
|
||||
unsafe fn get<'a, 'state>(state: &'state mut Self::State, mut world_ptr: ptr::NonNull<World>) -> Self::Arg<'a, 'state> {
|
||||
let world = world_ptr.as_mut();
|
||||
Commands::new(state, world)
|
||||
}
|
||||
|
||||
fn create_state(_: NonNull<World>) -> Self::State {
|
||||
CommandQueue::default()
|
||||
}
|
||||
|
||||
fn apply_deferred<'a>(mut state: Self::State, mut world_ptr: NonNull<World>) {
|
||||
let world = unsafe { world_ptr.as_mut() };
|
||||
|
||||
let mut cmds = Commands::new(&mut state, world);
|
||||
// safety: Commands has a mut borrow only to entities in the world
|
||||
let world = unsafe { world_ptr.as_mut() };
|
||||
cmds.execute(world);
|
||||
}
|
||||
}
|
||||
|
||||
/// A system for executing deferred commands that are stored in a [`World`] as a Resource.
|
||||
///
|
||||
/// Commands are usually added inside a system from a [`Commands`] object created just for it
|
||||
/// as an fn argument. However, there may be cases that commands cannot be added that way, so
|
||||
/// they can also be added as a resource and executed later in this system.
|
||||
pub fn execute_deferred_commands(world: &mut World, mut commands: RefMut<Commands>) -> anyhow::Result<()> {
|
||||
commands.execute(world);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::{cell::Ref, ptr::NonNull, sync::{atomic::{AtomicU32, Ordering}, Arc}};
|
||||
|
||||
use crate::{system::{GraphExecutor, IntoSystem}, tests::Vec2, CommandQueue, Commands, DynTypeId, World};
|
||||
|
||||
#[test]
|
||||
fn deferred_commands() {
|
||||
let mut world = World::new();
|
||||
let vecs = vec![Vec2::rand(), Vec2::rand(), Vec2::rand()];
|
||||
world.spawn((vecs[0],));
|
||||
world.spawn((vecs[1],));
|
||||
world.spawn((vecs[2],));
|
||||
|
||||
let spawned_vec = Vec2::rand();
|
||||
|
||||
let spawned_vec_cl = spawned_vec.clone();
|
||||
let test_sys = move |mut commands: Commands| -> anyhow::Result<()> {
|
||||
commands.spawn((spawned_vec_cl.clone(),));
|
||||
|
||||
Ok(())
|
||||
};
|
||||
|
||||
let mut graph_exec = GraphExecutor::new();
|
||||
graph_exec.insert_system("test", test_sys.into_system(), &[]);
|
||||
graph_exec.execute(NonNull::from(&world), true).unwrap();
|
||||
|
||||
assert_eq!(world.entities.len(), 4);
|
||||
|
||||
// there's only one archetype
|
||||
let arch = world.archetypes.values().next().unwrap();
|
||||
let col = arch.get_column(DynTypeId::of::<Vec2>()).unwrap();
|
||||
let vec2: Ref<Vec2> = unsafe { col.get(3) };
|
||||
assert_eq!(vec2.clone(), spawned_vec);
|
||||
}
|
||||
|
||||
/// A test that ensures a command in a command queue will only ever run once.
|
||||
#[test]
|
||||
fn commands_only_one_exec() {
|
||||
let mut world = World::new();
|
||||
|
||||
let counter = Arc::new(AtomicU32::new(0));
|
||||
|
||||
let mut queue = CommandQueue::default();
|
||||
let mut commands = Commands::new(&mut queue, &mut world);
|
||||
|
||||
let counter_cl = counter.clone();
|
||||
commands.add(move |_world: &mut World| {
|
||||
counter_cl.fetch_add(1, Ordering::AcqRel);
|
||||
});
|
||||
|
||||
queue.execute(Some(&mut world));
|
||||
assert_eq!(1, counter.load(Ordering::Acquire));
|
||||
|
||||
queue.execute(Some(&mut world));
|
||||
// If its not one, the command somehow was executed.
|
||||
// I would be surprised it wouldn't cause some segfault but still increment the counter
|
||||
assert_eq!(1, counter.load(Ordering::Acquire));
|
||||
}
|
||||
}
|
|
@ -1,238 +0,0 @@
|
|||
use std::ops::Range;
|
||||
|
||||
use crate::{query::Fetch, Archetype, ArchetypeEntityId, ArchetypeId, Entity, World};
|
||||
|
||||
use super::{DynamicType, FetchDynamicTypeUnsafe, QueryDynamicType};
|
||||
|
||||
/// Stores the state of a dynamic view.
|
||||
///
|
||||
/// See [`DynamicView`].
|
||||
///
|
||||
/// This backs [`DynamicView`] which you should probably use. The only reason you would use this
|
||||
/// instead is if you cant borrow the world when storing this type, and its iterators.
|
||||
/// [`DynamicViewState`] provides an 'iterator' of [`DynamicViewStateIter`], which requires you to
|
||||
/// provide a world borrow on each `next` of the iterator. View [`DynamicViewStateIter`] for more
|
||||
/// info.
|
||||
pub struct DynamicViewState {
|
||||
pub(crate) queries: Vec<QueryDynamicType>
|
||||
}
|
||||
|
||||
impl DynamicViewState {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
queries: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn queries_num(&self) -> usize {
|
||||
self.queries.len()
|
||||
}
|
||||
|
||||
pub fn push(&mut self, dyn_query: QueryDynamicType) {
|
||||
self.queries.push(dyn_query);
|
||||
}
|
||||
|
||||
pub fn into_iter(self) -> DynamicViewStateIter {
|
||||
DynamicViewStateIter {
|
||||
queries: self.queries,
|
||||
fetchers: Vec::new(),
|
||||
next_archetype: 0,
|
||||
component_indices: 0..0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct DynamicViewItem {
|
||||
pub row: Vec<DynamicType>,
|
||||
pub entity: Entity,
|
||||
}
|
||||
|
||||
/// A view iterator on dynamic types.
|
||||
///
|
||||
/// You will likely want to use [`DynamicViewIter`] unless you need to store the iterator
|
||||
/// without also borrowing from the world. [`DynamicViewStateIter`] doesn't
|
||||
/// actually implement [`Iterator`] since it requires a `&World` to be provided to it
|
||||
/// each time `next` is ran (see [`DynamicViewStateIter::next`]).
|
||||
pub struct DynamicViewStateIter {
|
||||
pub queries: Vec<QueryDynamicType>,
|
||||
fetchers: Vec<FetchDynamicTypeUnsafe>,
|
||||
next_archetype: usize,
|
||||
component_indices: Range<u64>,
|
||||
}
|
||||
|
||||
impl DynamicViewStateIter {
|
||||
pub fn next(&mut self, world: &World) -> Option<(Entity, Vec<DynamicType>)> {
|
||||
let archetypes = world.archetypes.values().collect::<Vec<_>>();
|
||||
|
||||
loop {
|
||||
if let Some(entity_index) = self.component_indices.next() {
|
||||
let entity = {
|
||||
let arch_id = self.next_archetype - 1;
|
||||
let arch = unsafe { archetypes.get_unchecked(arch_id) };
|
||||
arch.entity_at_index(ArchetypeEntityId(entity_index)).unwrap()
|
||||
};
|
||||
|
||||
let mut fetch_res = vec![];
|
||||
|
||||
for fetcher in self.fetchers.iter_mut() {
|
||||
let entity_index = ArchetypeEntityId(entity_index);
|
||||
if !fetcher.can_visit_item(entity_index) {
|
||||
break;
|
||||
} else {
|
||||
let i = unsafe { fetcher.get_item(entity_index) };
|
||||
fetch_res.push(i);
|
||||
}
|
||||
}
|
||||
|
||||
if fetch_res.len() != self.fetchers.len() {
|
||||
continue;
|
||||
}
|
||||
|
||||
return Some((entity, fetch_res));
|
||||
} else {
|
||||
if self.next_archetype >= archetypes.len() {
|
||||
return None; // ran out of archetypes to go through
|
||||
}
|
||||
|
||||
let arch_id = self.next_archetype;
|
||||
self.next_archetype += 1;
|
||||
let arch = unsafe { archetypes.get_unchecked(arch_id) };
|
||||
|
||||
if arch.entity_ids.is_empty() {
|
||||
continue;
|
||||
}
|
||||
|
||||
if self.queries.iter().any(|q| !q.can_visit_archetype(arch)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
self.fetchers = self.queries.iter()
|
||||
.map(|q| unsafe { q.fetch(world, ArchetypeId(arch_id as u64), arch) } )
|
||||
.collect();
|
||||
self.component_indices = 0..arch.entity_ids.len() as u64;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A view of dynamic types (types that are not known to Rust).
|
||||
///
|
||||
/// This view gives you the ability to iterate over types that are unknown to Rust, which we call
|
||||
/// dynamic types. This is great for embedding with a scripting language (*cough* *cough* WASM)
|
||||
/// since Rust doesn't actually need to know the types of what its iterating over.
|
||||
pub struct DynamicView<'a> {
|
||||
world: &'a World,
|
||||
inner: DynamicViewState,
|
||||
}
|
||||
|
||||
impl<'a> DynamicView<'a> {
|
||||
pub fn new(world: &'a World) -> Self {
|
||||
Self {
|
||||
world,
|
||||
inner: DynamicViewState::new(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn push(&mut self, dyn_query: QueryDynamicType) {
|
||||
self.inner.queries.push(dyn_query);
|
||||
}
|
||||
}
|
||||
|
||||
/// A view iterator on dynamic types.
|
||||
///
|
||||
/// This view gives you the ability to iterate over types that are completely unknown to Rust.
|
||||
/// This works great for a embedding with a scripting language (*cough* *cough* WASM) since
|
||||
/// Rust doesn't actually need to know the types of what its iterating over.
|
||||
impl<'a> IntoIterator for DynamicView<'a> {
|
||||
type Item = (Entity, Vec<DynamicType>);
|
||||
|
||||
type IntoIter = DynamicViewIter<'a>;
|
||||
|
||||
fn into_iter(self) -> Self::IntoIter {
|
||||
let archetypes = self.world.archetypes.values().collect();
|
||||
|
||||
DynamicViewIter {
|
||||
world: self.world,
|
||||
archetypes,
|
||||
inner: self.inner.into_iter(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct DynamicViewIter<'a> {
|
||||
pub world: &'a World,
|
||||
pub archetypes: Vec<&'a Archetype>,
|
||||
inner: DynamicViewStateIter,
|
||||
}
|
||||
|
||||
impl<'a> Iterator for DynamicViewIter<'a> {
|
||||
type Item = (Entity, Vec<DynamicType>);
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
self.inner.next(&self.world)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::{alloc::Layout, ptr::NonNull};
|
||||
|
||||
use crate::{World, ComponentInfo, DynTypeId, DynamicBundle, query::dynamic::QueryDynamicType};
|
||||
|
||||
use super::{DynamicView, DynamicViewState};
|
||||
|
||||
#[test]
|
||||
fn single_dynamic_view_state() {
|
||||
let comp_layout = Layout::new::<u32>();
|
||||
let comp_info = ComponentInfo::new_unknown(Some("u32".to_string()), DynTypeId::Unknown(100), comp_layout);
|
||||
|
||||
let mut dynamic_bundle = DynamicBundle::default();
|
||||
let comp = 50u32;
|
||||
let ptr = NonNull::from(&comp).cast::<u8>();
|
||||
dynamic_bundle.push_unknown(ptr, comp_info.clone());
|
||||
|
||||
let mut world = World::new();
|
||||
world.spawn(dynamic_bundle);
|
||||
|
||||
let query = QueryDynamicType::from_info(comp_info);
|
||||
let mut view = DynamicViewState::new();
|
||||
view.push(query);
|
||||
|
||||
let mut view_iter = view.into_iter();
|
||||
while let Some((_e, view_row)) = view_iter.next(&world) {
|
||||
assert_eq!(view_row.len(), 1);
|
||||
let mut row_iter = view_row.iter();
|
||||
|
||||
let dynamic_type = row_iter.next().unwrap();
|
||||
let component_data = unsafe { dynamic_type.ptr.cast::<u32>().as_ref() };
|
||||
assert_eq!(*component_data, 50);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn single_dynamic_view() {
|
||||
let comp_layout = Layout::new::<u32>();
|
||||
let comp_info = ComponentInfo::new_unknown(Some("u32".to_string()), DynTypeId::Unknown(100), comp_layout);
|
||||
|
||||
let mut dynamic_bundle = DynamicBundle::default();
|
||||
let comp = 50u32;
|
||||
let ptr = NonNull::from(&comp).cast::<u8>();
|
||||
dynamic_bundle.push_unknown(ptr, comp_info.clone());
|
||||
|
||||
let mut world = World::new();
|
||||
world.spawn(dynamic_bundle);
|
||||
|
||||
let query = QueryDynamicType::from_info(comp_info);
|
||||
let mut view = DynamicView::new(&world);
|
||||
view.push(query);
|
||||
|
||||
for (_e, view_row) in view.into_iter() {
|
||||
assert_eq!(view_row.len(), 1);
|
||||
let mut row_iter = view_row.iter();
|
||||
|
||||
let dynamic_type = row_iter.next().unwrap();
|
||||
let component_data = unsafe { dynamic_type.ptr.cast::<u32>().as_ref() };
|
||||
assert_eq!(*component_data, 50);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,168 +0,0 @@
|
|||
use std::ops::{Deref, DerefMut};
|
||||
|
||||
use crate::{query::Fetch, Entity, World};
|
||||
|
||||
use super::{DynamicType, FetchDynamicTypeUnsafe, QueryDynamicType};
|
||||
|
||||
/// A view of dynamic types (types that are not known to Rust).
|
||||
///
|
||||
/// This view gives you the ability to iterate over types that are unknown to Rust, which we call
|
||||
/// dynamic types. This is great for embedding with a scripting language (*cough* *cough* WASM)
|
||||
/// since Rust doesn't actually need to know the types of what its iterating over.
|
||||
pub struct DynamicViewOne<'a> {
|
||||
world: &'a World,
|
||||
inner: DynamicViewOneOwned,
|
||||
}
|
||||
|
||||
impl<'a> Deref for DynamicViewOne<'a> {
|
||||
type Target = DynamicViewOneOwned;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.inner
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> DerefMut for DynamicViewOne<'a> {
|
||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||
&mut self.inner
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> DynamicViewOne<'a> {
|
||||
pub fn new(world: &'a World, entity: Entity) -> Self {
|
||||
Self {
|
||||
world,
|
||||
inner: DynamicViewOneOwned::new(entity)
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new [`DynamicViewOne`] with queries.
|
||||
pub fn new_with(world: &'a World, entity: Entity, queries: Vec<QueryDynamicType>) -> Self {
|
||||
Self {
|
||||
world,
|
||||
inner: DynamicViewOneOwned::new_with(entity, queries)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get(self) -> Option<Vec<DynamicType>> {
|
||||
self.inner.get(&self.world)
|
||||
}
|
||||
}
|
||||
|
||||
/// A variant of [`DynamicViewOne`] that doesn't store a borrow of the world.
|
||||
#[derive(Clone)]
|
||||
pub struct DynamicViewOneOwned {
|
||||
pub entity: Entity,
|
||||
pub queries: Vec<QueryDynamicType>
|
||||
}
|
||||
|
||||
impl DynamicViewOneOwned {
|
||||
pub fn new(entity: Entity) -> Self {
|
||||
Self {
|
||||
entity,
|
||||
queries: vec![],
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new [`DynamicViewOne`] with queries.
|
||||
pub fn new_with(entity: Entity, queries: Vec<QueryDynamicType>) -> Self {
|
||||
Self {
|
||||
entity,
|
||||
queries
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get(self, world: &World) -> Option<Vec<DynamicType>> {
|
||||
dynamic_view_one_get_impl(world, &self.queries, self.entity)
|
||||
}
|
||||
}
|
||||
|
||||
fn dynamic_view_one_get_impl(world: &World, queries: &Vec<QueryDynamicType>, entity: Entity) -> Option<Vec<DynamicType>> {
|
||||
let arch = world.entity_archetype(entity)?;
|
||||
let aid = arch.entity_indexes().get(&entity)?;
|
||||
|
||||
// get all fetchers for the queries
|
||||
let mut fetchers: Vec<FetchDynamicTypeUnsafe> = queries.iter()
|
||||
.map(|q| unsafe { q.fetch(world, arch.id(), arch) } )
|
||||
.collect();
|
||||
|
||||
let mut fetch_res = vec![];
|
||||
for fetcher in fetchers.iter_mut() {
|
||||
if !fetcher.can_visit_item(*aid) {
|
||||
return None;
|
||||
} else {
|
||||
let i = unsafe { fetcher.get_item(*aid) };
|
||||
fetch_res.push(i);
|
||||
}
|
||||
}
|
||||
|
||||
if fetch_res.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(fetch_res)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::{alloc::Layout, ptr::NonNull};
|
||||
|
||||
use crate::{World, ComponentInfo, DynTypeId, DynamicBundle, query::dynamic::QueryDynamicType};
|
||||
|
||||
use super::DynamicViewOne;
|
||||
|
||||
#[test]
|
||||
fn single_dynamic_view_one_state() {
|
||||
let comp_layout = Layout::new::<u32>();
|
||||
let comp_info = ComponentInfo::new_unknown(Some("u32".to_string()), DynTypeId::Unknown(100), comp_layout);
|
||||
|
||||
let mut dynamic_bundle = DynamicBundle::default();
|
||||
let comp = 50u32;
|
||||
let ptr = NonNull::from(&comp).cast::<u8>();
|
||||
dynamic_bundle.push_unknown(ptr, comp_info.clone());
|
||||
|
||||
let mut world = World::new();
|
||||
let e = world.spawn(dynamic_bundle);
|
||||
|
||||
let query = QueryDynamicType::from_info(comp_info);
|
||||
let view = DynamicViewOne::new_with(&world, e, vec![query]);
|
||||
|
||||
let view_row = view.get()
|
||||
.expect("failed to get entity row");
|
||||
assert_eq!(view_row.len(), 1);
|
||||
|
||||
let mut row_iter = view_row.iter();
|
||||
let dynamic_type = row_iter.next().unwrap();
|
||||
|
||||
let component_data = unsafe { dynamic_type.ptr.cast::<u32>().as_ref() };
|
||||
assert_eq!(*component_data, 50);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn single_dynamic_view_one() {
|
||||
let comp_layout = Layout::new::<u32>();
|
||||
let comp_info = ComponentInfo::new_unknown(Some("u32".to_string()), DynTypeId::Unknown(100), comp_layout);
|
||||
|
||||
let mut dynamic_bundle = DynamicBundle::default();
|
||||
let comp = 50u32;
|
||||
let ptr = NonNull::from(&comp).cast::<u8>();
|
||||
dynamic_bundle.push_unknown(ptr, comp_info.clone());
|
||||
|
||||
let mut world = World::new();
|
||||
let e = world.spawn(dynamic_bundle);
|
||||
|
||||
let query = QueryDynamicType::from_info(comp_info);
|
||||
let view = DynamicViewOne::new_with(&world, e, vec![query]);
|
||||
|
||||
let view_row = view.get()
|
||||
.expect("failed to get entity row");
|
||||
assert_eq!(view_row.len(), 1);
|
||||
|
||||
let mut row_iter = view_row.iter();
|
||||
|
||||
let dynamic_type = row_iter.next().unwrap();
|
||||
|
||||
let component_data = unsafe { dynamic_type.ptr.cast::<u32>().as_ref() };
|
||||
assert_eq!(*component_data, 50);
|
||||
}
|
||||
}
|
|
@ -1,97 +0,0 @@
|
|||
use std::marker::PhantomData;
|
||||
|
||||
use crate::{query::{AsFilter, AsQuery, Fetch, Filter, Query}, Component, ComponentColumn, DynTypeId, Tick, World};
|
||||
|
||||
pub struct ChangedFetcher<'a, T> {
|
||||
col: &'a ComponentColumn,
|
||||
tick: Tick,
|
||||
_phantom: PhantomData<&'a T>,
|
||||
}
|
||||
|
||||
impl<'a, T> Fetch<'a> for ChangedFetcher<'a, T>
|
||||
where
|
||||
T: 'a,
|
||||
{
|
||||
type Item = bool;
|
||||
|
||||
fn dangling() -> Self {
|
||||
unreachable!()
|
||||
}
|
||||
|
||||
unsafe fn get_item(&mut self, entity: crate::world::ArchetypeEntityId) -> Self::Item {
|
||||
let tick = self.col.entity_ticks[entity.0 as usize];
|
||||
*tick >= (*self.tick) - 1
|
||||
}
|
||||
}
|
||||
|
||||
/// A filter that fetches components that have changed.
|
||||
///
|
||||
/// Since [`AsQuery`] is implemented for `&T`, you can use this query like this:
|
||||
/// ```nobuild
|
||||
/// for ts in world.view::<&T>() {
|
||||
/// println!("Got a &T!");
|
||||
/// }
|
||||
/// ```
|
||||
pub struct Changed<T> {
|
||||
type_id: DynTypeId,
|
||||
_phantom: PhantomData<T>
|
||||
}
|
||||
|
||||
impl<T: Component> Default for Changed<T> {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
type_id: DynTypeId::of::<T>(),
|
||||
_phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// manually implemented to avoid a Copy bound on T
|
||||
impl<T> Copy for Changed<T> {}
|
||||
|
||||
// manually implemented to avoid a Clone bound on T
|
||||
impl<T> Clone for Changed<T> {
|
||||
fn clone(&self) -> Self {
|
||||
*self
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Component> Changed<T> {
|
||||
pub fn new() -> Self {
|
||||
Self::default()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Component> Query for Changed<T>
|
||||
where
|
||||
T: 'static
|
||||
{
|
||||
type Item<'a> = bool;
|
||||
type Fetch<'a> = ChangedFetcher<'a, T>;
|
||||
|
||||
fn new() -> Self {
|
||||
Changed::<T>::new()
|
||||
}
|
||||
|
||||
fn can_visit_archetype(&self, archetype: &crate::archetype::Archetype) -> bool {
|
||||
archetype.has_column(self.type_id)
|
||||
}
|
||||
|
||||
unsafe fn fetch<'a>(&self, w: &'a World, a: &'a crate::archetype::Archetype, _: crate::Tick) -> Self::Fetch<'a> {
|
||||
ChangedFetcher {
|
||||
col: a.get_column(self.type_id).unwrap(),
|
||||
tick: w.current_tick(),
|
||||
_phantom: PhantomData::<&T>,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Component> AsQuery for Changed<T> {
|
||||
type Query = Self;
|
||||
}
|
||||
|
||||
impl<T: Component> Filter for Changed<T> { }
|
||||
|
||||
impl<T: Component> AsFilter for Changed<T> {
|
||||
type Filter = Self;
|
||||
}
|
|
@ -1,52 +0,0 @@
|
|||
use std::marker::PhantomData;
|
||||
|
||||
use crate::{query::{AsFilter, AsQuery, Filter, Query}, Archetype, Component, DynTypeId, World};
|
||||
|
||||
use super::StaticFetcher;
|
||||
|
||||
/// A filter query that fetches when the entity has the component `C`.
|
||||
///
|
||||
/// This does not return a reference to the component, it returns `()` if the entity has
|
||||
/// the component. This query is great when its used with [`Or`](super::Or).
|
||||
#[derive(Default)]
|
||||
pub struct Has<C: Component> {
|
||||
_marker: PhantomData<C>
|
||||
}
|
||||
|
||||
impl<C: Component> Copy for Has<C> {}
|
||||
|
||||
impl<C: Component> Clone for Has<C> {
|
||||
fn clone(&self) -> Self {
|
||||
Self { _marker: self._marker.clone() }
|
||||
}
|
||||
}
|
||||
|
||||
impl<C: Component> Query for Has<C> {
|
||||
type Item<'a> = bool;
|
||||
type Fetch<'a> = StaticFetcher<bool>;
|
||||
|
||||
fn new() -> Self {
|
||||
Has {
|
||||
_marker: PhantomData
|
||||
}
|
||||
}
|
||||
|
||||
fn can_visit_archetype(&self, archetype: &Archetype) -> bool {
|
||||
archetype.has_column(DynTypeId::of::<C>())
|
||||
}
|
||||
|
||||
unsafe fn fetch<'a>(&self, _world: &'a World, _: &'a Archetype, _: crate::Tick) -> Self::Fetch<'a> {
|
||||
// if fetch is called, it means that 'can_visit_archetype' returned true
|
||||
StaticFetcher::new(true)
|
||||
}
|
||||
}
|
||||
|
||||
impl<C: Component> AsQuery for Has<C> {
|
||||
type Query = Self;
|
||||
}
|
||||
|
||||
impl<C: Component> Filter for Has<C> { }
|
||||
|
||||
impl<C: Component> AsFilter for Has<C> {
|
||||
type Filter = Self;
|
||||
}
|
|
@ -1,44 +0,0 @@
|
|||
mod has;
|
||||
use std::marker::PhantomData;
|
||||
|
||||
pub use has::*;
|
||||
|
||||
mod or;
|
||||
pub use or::*;
|
||||
|
||||
mod not;
|
||||
pub use not::*;
|
||||
|
||||
mod changed;
|
||||
pub use changed::*;
|
||||
|
||||
use super::Fetch;
|
||||
|
||||
/// A fetcher that just returns a provided value
|
||||
pub struct StaticFetcher<T: Clone> {
|
||||
value: T,
|
||||
}
|
||||
|
||||
impl<'a, T: Clone> StaticFetcher<T> {
|
||||
pub fn new(value: T) -> Self {
|
||||
Self {
|
||||
value
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
impl<'a, T> Fetch<'a> for StaticFetcher<T>
|
||||
where
|
||||
T: Clone + 'a,
|
||||
{
|
||||
type Item = T;
|
||||
|
||||
fn dangling() -> Self {
|
||||
unreachable!()
|
||||
}
|
||||
|
||||
unsafe fn get_item(&mut self, _: crate::world::ArchetypeEntityId) -> Self::Item {
|
||||
self.value.clone()
|
||||
}
|
||||
}
|
|
@ -1,52 +0,0 @@
|
|||
use crate::{query::{AsFilter, AsQuery, Filter, Query}, Archetype, World};
|
||||
|
||||
use super::StaticFetcher;
|
||||
|
||||
/// A filter query that fetches the inverse of `Q`.
|
||||
///
|
||||
/// This means that entities that `Q` fetches are skipped, and entities that
|
||||
/// `Q` does not fetch are not skipped.
|
||||
///
|
||||
/// ```nobuild
|
||||
/// // Iterate over entities that has a transform, and are not the origin of a `ChildOf` relationship.
|
||||
/// for (en, pos, _) in world
|
||||
/// .view::<(Entities, &Transform, Not<Has<RelationOriginComponent<ChildOf>>>)>()
|
||||
/// .iter()
|
||||
/// {
|
||||
/// // ...
|
||||
/// }
|
||||
/// ```
|
||||
#[derive(Default, Copy, Clone)]
|
||||
pub struct Not<Q: Query> {
|
||||
query: Q,
|
||||
}
|
||||
|
||||
impl<Q: Query> Query for Not<Q> {
|
||||
type Item<'a> = bool;
|
||||
type Fetch<'a> = StaticFetcher<bool>;
|
||||
|
||||
fn new() -> Self {
|
||||
Not {
|
||||
query: Q::new(),
|
||||
}
|
||||
}
|
||||
|
||||
fn can_visit_archetype(&self, archetype: &Archetype) -> bool {
|
||||
!self.query.can_visit_archetype(archetype)
|
||||
}
|
||||
|
||||
unsafe fn fetch<'a>(&self, _world: &'a World, _: &'a Archetype, _: crate::Tick) -> Self::Fetch<'a> {
|
||||
// if fetch is called, it means that 'can_visit_archetype' returned true
|
||||
StaticFetcher::new(true)
|
||||
}
|
||||
}
|
||||
|
||||
impl<Q: Query> AsQuery for Not<Q> {
|
||||
type Query = Self;
|
||||
}
|
||||
|
||||
impl<Q: Query> Filter for Not<Q> { }
|
||||
|
||||
impl<Q: Query> AsFilter for Not<Q> {
|
||||
type Filter = Self;
|
||||
}
|
|
@ -1,113 +0,0 @@
|
|||
use crate::{query::{AsQuery, Fetch, Query}, Archetype, World};
|
||||
|
||||
pub struct OrFetch<'a, Q1: Query, Q2: Query> {
|
||||
left: Option<Q1::Fetch<'a>>,
|
||||
right: Option<Q2::Fetch<'a>>,
|
||||
}
|
||||
|
||||
impl<'a, Q1: Query, Q2: Query> Fetch<'a> for OrFetch<'a, Q1, Q2> {
|
||||
type Item = (Option<Q1::Item<'a>>, Option<Q2::Item<'a>>);
|
||||
|
||||
fn dangling() -> Self {
|
||||
Self {
|
||||
left: None,
|
||||
right: None,
|
||||
}
|
||||
}
|
||||
|
||||
unsafe fn get_item(&mut self, entity: crate::ArchetypeEntityId) -> Self::Item {
|
||||
let mut res = (None, None);
|
||||
|
||||
if let Some(left) = self.left.as_mut() {
|
||||
let i = left.get_item(entity);
|
||||
res.0 = Some(i);
|
||||
}
|
||||
|
||||
if let Some(right) = self.right.as_mut() {
|
||||
let i = right.get_item(entity);
|
||||
res.1 = Some(i);
|
||||
}
|
||||
|
||||
res
|
||||
}
|
||||
}
|
||||
|
||||
/// A filter query returning when either `Q1` or `Q2` returns.
|
||||
///
|
||||
/// This checks if `Q1` can fetch before checking `Q2`.
|
||||
///
|
||||
/// ```nobuild
|
||||
/// for (en, pos, _) in world
|
||||
/// .view::<(Entities, &Transform, Or<Has<Mesh>, Has<Scene>>)>()
|
||||
/// .iter()
|
||||
/// {
|
||||
/// // do some things with the position of the entities
|
||||
///
|
||||
/// // now handle do things with the Mesh or Scene that the entity could have
|
||||
/// if let Some(mesh) = world.view_one::<&Mesh>(en).get() {
|
||||
/// // do mesh things
|
||||
/// }
|
||||
///
|
||||
/// if let Some(scene) = world.view_one::<&Scene>(en).get() {
|
||||
/// // do scene things
|
||||
/// }
|
||||
/// }
|
||||
/// ```
|
||||
#[derive(Default)]
|
||||
pub struct Or<Q1: AsQuery, Q2: AsQuery> {
|
||||
left: Q1::Query,
|
||||
right: Q2::Query,
|
||||
can_visit_left: bool,
|
||||
can_visit_right: bool,
|
||||
}
|
||||
|
||||
impl<Q1: AsQuery, Q2: AsQuery> Copy for Or<Q1, Q2> {}
|
||||
|
||||
impl<Q1: AsQuery, Q2: AsQuery> Clone for Or<Q1, Q2> {
|
||||
fn clone(&self) -> Self {
|
||||
Self {
|
||||
left: self.left.clone(),
|
||||
right: self.right.clone(),
|
||||
can_visit_left: self.can_visit_left,
|
||||
can_visit_right: self.can_visit_right,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<Q1: AsQuery, Q2: AsQuery> Query for Or<Q1, Q2> {
|
||||
type Item<'a> = (Option<<Q1::Query as Query>::Item<'a>>, Option<<Q2::Query as Query>::Item<'a>>);
|
||||
|
||||
type Fetch<'a> = OrFetch<'a, Q1::Query, Q2::Query>;
|
||||
|
||||
fn new() -> Self {
|
||||
Or {
|
||||
left: Q1::Query::new(),
|
||||
right: Q2::Query::new(),
|
||||
can_visit_left: false,
|
||||
can_visit_right: false,
|
||||
}
|
||||
}
|
||||
|
||||
fn can_visit_archetype(&self, archetype: &Archetype) -> bool {
|
||||
self.left.can_visit_archetype(archetype) || self.right.can_visit_archetype(archetype)
|
||||
}
|
||||
|
||||
unsafe fn fetch<'a>(&self, world: &'a World, archetype: &'a Archetype, tick: crate::Tick) -> Self::Fetch<'a> {
|
||||
let mut f = OrFetch::<Q1::Query, Q2::Query>::dangling();
|
||||
|
||||
// TODO: store the result of Self::can_visit_archetype so this isn't ran twice
|
||||
if self.left.can_visit_archetype(archetype) {
|
||||
f.left = Some(self.left.fetch(world, archetype, tick));
|
||||
}
|
||||
|
||||
if self.right.can_visit_archetype(archetype) {
|
||||
f.right = Some(self.right.fetch(world, archetype, tick));
|
||||
}
|
||||
|
||||
f
|
||||
}
|
||||
}
|
||||
|
||||
impl<Q1: AsQuery, Q2: AsQuery> AsQuery for Or<Q1, Q2> {
|
||||
type Query = Self;
|
||||
}
|
|
@ -1,76 +0,0 @@
|
|||
use crate::{Archetype, World};
|
||||
|
||||
use super::{AsQuery, Fetch, Query};
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct OptionalFetcher<'a, Q: AsQuery> {
|
||||
fetcher: Option<<Q::Query as Query>::Fetch<'a>>,
|
||||
}
|
||||
|
||||
impl<'a, Q: AsQuery> Fetch<'a> for OptionalFetcher<'a, Q> {
|
||||
type Item = Option<<Q::Query as Query>::Item<'a>>;
|
||||
|
||||
fn dangling() -> Self {
|
||||
unreachable!()
|
||||
}
|
||||
|
||||
unsafe fn get_item(&mut self, entity: crate::ArchetypeEntityId) -> Self::Item {
|
||||
self.fetcher.as_mut()
|
||||
.map(|f| f.get_item(entity))
|
||||
}
|
||||
|
||||
fn can_visit_item(&mut self, entity: crate::ArchetypeEntityId) -> bool {
|
||||
self.fetcher.as_mut()
|
||||
.map(|f| f.can_visit_item(entity))
|
||||
.unwrap_or(true)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct Optional<Q: AsQuery> {
|
||||
query: Q::Query,
|
||||
}
|
||||
|
||||
impl<Q: AsQuery> Copy for Optional<Q> { }
|
||||
|
||||
impl<Q: AsQuery> Clone for Optional<Q> {
|
||||
fn clone(&self) -> Self {
|
||||
Self { query: self.query.clone() }
|
||||
}
|
||||
}
|
||||
|
||||
impl<Q: AsQuery> Query for Optional<Q> {
|
||||
type Item<'a> = Option<<Q::Query as Query>::Item<'a>>;
|
||||
|
||||
type Fetch<'a> = OptionalFetcher<'a, Q>;
|
||||
|
||||
fn new() -> Self {
|
||||
Optional {
|
||||
query: Q::Query::new(),
|
||||
}
|
||||
}
|
||||
|
||||
fn can_visit_archetype(&self, _: &Archetype) -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
unsafe fn fetch<'a>(&self, world: &'a World, arch: &'a Archetype, tick: crate::Tick) -> Self::Fetch<'a> {
|
||||
let fetcher = if self.query.can_visit_archetype(arch) {
|
||||
Some(self.query.fetch(world, arch, tick))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
OptionalFetcher {
|
||||
fetcher,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<Q: AsQuery> AsQuery for Optional<Q> {
|
||||
type Query = Self;
|
||||
}
|
||||
|
||||
impl<Q: AsQuery> AsQuery for Option<Q> {
|
||||
type Query = Optional<Q>;
|
||||
}
|
|
@ -1,102 +0,0 @@
|
|||
use std::ops::Deref;
|
||||
|
||||
use crate::{system::FnArgFetcher, Tick, World};
|
||||
use super::{Fetch, Query, AsQuery};
|
||||
|
||||
/// Fetcher used to fetch the current tick of the world.
|
||||
pub struct FetchWorldTick {
|
||||
tick: Tick
|
||||
}
|
||||
|
||||
impl<'a> Fetch<'a> for FetchWorldTick {
|
||||
type Item = WorldTick;
|
||||
|
||||
fn dangling() -> Self {
|
||||
unreachable!()
|
||||
}
|
||||
|
||||
fn can_visit_item(&mut self, _entity: crate::ArchetypeEntityId) -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
unsafe fn get_item(&mut self, _entity: crate::world::ArchetypeEntityId) -> Self::Item {
|
||||
WorldTick(self.tick)
|
||||
}
|
||||
}
|
||||
|
||||
/// Query used to query the current tick of the world.
|
||||
#[derive(Clone, Copy)]
|
||||
pub struct QueryWorldTick;
|
||||
|
||||
impl Default for QueryWorldTick {
|
||||
fn default() -> Self {
|
||||
Self
|
||||
}
|
||||
}
|
||||
|
||||
impl Query for QueryWorldTick {
|
||||
type Item<'a> = WorldTick;
|
||||
|
||||
type Fetch<'a> = FetchWorldTick;
|
||||
|
||||
const ALWAYS_FETCHES: bool = true;
|
||||
|
||||
fn new() -> Self {
|
||||
QueryWorldTick
|
||||
}
|
||||
|
||||
fn can_visit_archetype(&self, _archetype: &crate::archetype::Archetype) -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
unsafe fn fetch<'a>(&self, world: &'a World, _archetype: &'a crate::archetype::Archetype, _tick: crate::Tick) -> Self::Fetch<'a> {
|
||||
FetchWorldTick {
|
||||
tick: world.current_tick()
|
||||
}
|
||||
}
|
||||
|
||||
unsafe fn fetch_world<'a>(&self, world: &'a World) -> Option<Self::Fetch<'a>> {
|
||||
Some(FetchWorldTick {
|
||||
tick: world.current_tick()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl AsQuery for QueryWorldTick {
|
||||
type Query = Self;
|
||||
}
|
||||
|
||||
/// Type that can be used in an fn system for fetching the current world tick.
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct WorldTick(Tick);
|
||||
|
||||
impl Deref for WorldTick {
|
||||
type Target = Tick;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl AsQuery for WorldTick {
|
||||
type Query = QueryWorldTick;
|
||||
}
|
||||
|
||||
impl FnArgFetcher for WorldTick {
|
||||
type State = ();
|
||||
|
||||
type Arg<'a, 'state> = WorldTick;
|
||||
|
||||
fn create_state(_: std::ptr::NonNull<World>) -> Self::State {
|
||||
()
|
||||
}
|
||||
|
||||
unsafe fn get<'a, 'state>(_: &'state mut Self::State, world: std::ptr::NonNull<World>) -> Self::Arg<'a, 'state> {
|
||||
let world = world.as_ref();
|
||||
WorldTick(world.current_tick())
|
||||
}
|
||||
|
||||
fn apply_deferred(_: Self::State, _: std::ptr::NonNull<World>) {
|
||||
|
||||
}
|
||||
}
|
|
@ -1,94 +0,0 @@
|
|||
use std::{any::{Any, TypeId}, sync::Arc};
|
||||
|
||||
use atomic_refcell::{AtomicRef, AtomicRefCell, AtomicRefMut};
|
||||
|
||||
use crate::{Tick, TickTracker};
|
||||
|
||||
/// Shorthand for `Send + Sync + 'static`, so it never needs to be implemented manually.
|
||||
pub trait ResourceObject: Send + Sync + Any {
|
||||
fn as_any(&self) -> &dyn Any;
|
||||
fn as_any_mut(&mut self) -> &mut dyn Any;
|
||||
}
|
||||
|
||||
impl<T: Send + Sync + Any> ResourceObject for T {
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
|
||||
fn as_any_mut(&mut self) -> &mut dyn Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
pub struct TrackedResource<T: ?Sized> {
|
||||
pub tick: Tick,
|
||||
pub res: T,
|
||||
}
|
||||
|
||||
/// A type erased storage for a Resource.
|
||||
#[derive(Clone)]
|
||||
pub struct ResourceData {
|
||||
pub(crate) data: Arc<AtomicRefCell<TrackedResource<dyn ResourceObject>>>,
|
||||
type_id: TypeId,
|
||||
}
|
||||
|
||||
impl ResourceData {
|
||||
pub fn new<T: ResourceObject>(data: T, tick: Tick) -> Self {
|
||||
|
||||
Self {
|
||||
data: Arc::new(AtomicRefCell::new(TrackedResource { tick, res: data })),
|
||||
type_id: TypeId::of::<T>(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a boolean indicating whether or not `T`` is of the same type of the Resource
|
||||
pub fn is<T: ResourceObject>(&self) -> bool {
|
||||
self.type_id == TypeId::of::<T>()
|
||||
}
|
||||
|
||||
/// Borrow the data inside of the resource.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// * If the data is already borrowed mutably, this will panic.
|
||||
/// * If the type of `T` is not the same as the resource type.
|
||||
pub fn get<T: ResourceObject>(&self) -> AtomicRef<T> {
|
||||
AtomicRef::map(self.data.borrow(), |a| a.res.as_any().downcast_ref().unwrap())
|
||||
}
|
||||
|
||||
/// Mutably borrow the data inside of the resource.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// * If the data is already borrowed mutably, this will panic.
|
||||
/// * If the type of `T` is not the same as the resource type.
|
||||
pub fn get_mut<T: ResourceObject>(&self) -> AtomicRefMut<T> {
|
||||
AtomicRefMut::map(self.data.borrow_mut(), |a| a.res.as_any_mut().downcast_mut().unwrap())
|
||||
}
|
||||
|
||||
/// Borrow the data inside of the resource.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// * If the type of `T` is not the same as the resource type.
|
||||
pub fn try_get<T: ResourceObject>(&self) -> Option<AtomicRef<T>> {
|
||||
self.data.try_borrow()
|
||||
.map(|r| AtomicRef::map(r, |a| a.res.as_any().downcast_ref().unwrap()))
|
||||
.ok()
|
||||
}
|
||||
|
||||
/// Mutably borrow the data inside of the resource.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// * If the type of `T` is not the same as the resource type.
|
||||
pub fn try_get_mut<T: ResourceObject>(&self) -> Option<AtomicRefMut<T>> {
|
||||
self.data.try_borrow_mut()
|
||||
.map(|r| AtomicRefMut::map(r, |a| a.res.as_any_mut().downcast_mut().unwrap()))
|
||||
.ok()
|
||||
}
|
||||
|
||||
pub fn changed(&self, tick: Tick) -> bool {
|
||||
*self.data.borrow().tick >= *tick - 1
|
||||
}
|
||||
}
|
|
@ -1,805 +0,0 @@
|
|||
use std::{any::TypeId, collections::HashMap, ops::Deref, ptr::NonNull};
|
||||
|
||||
use atomic_refcell::{AtomicRef, AtomicRefMut};
|
||||
|
||||
use crate::{archetype::{Archetype, ArchetypeId}, bundle::Bundle, query::{dynamic::DynamicView, AsFilter, AsQuery, Query, Res, ResMut, ViewIter, ViewOne, ViewState}, resource::ResourceData, ComponentInfo, DynTypeId, DynamicBundle, Entities, Entity, ResourceObject, Tick, TickTracker, TrackedResource};
|
||||
|
||||
/// The id of the entity for the Archetype.
|
||||
///
|
||||
/// The Archetype uses this as the index in the component columns
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
|
||||
pub struct ArchetypeEntityId(pub u64);
|
||||
|
||||
impl Deref for ArchetypeEntityId {
|
||||
type Target = u64;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)]
|
||||
pub struct Record {
|
||||
pub id: ArchetypeId,
|
||||
pub index: ArchetypeEntityId,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct World {
|
||||
pub archetypes: HashMap<ArchetypeId, Archetype>,
|
||||
next_archetype_id: ArchetypeId,
|
||||
resources: HashMap<TypeId, ResourceData>,
|
||||
tracker: TickTracker,
|
||||
pub entities: Entities,
|
||||
}
|
||||
|
||||
impl Default for World {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
archetypes: HashMap::new(),
|
||||
next_archetype_id: ArchetypeId(0),
|
||||
resources: HashMap::new(),
|
||||
tracker: TickTracker::new(),
|
||||
entities: Entities::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl World {
|
||||
pub fn new() -> Self {
|
||||
Self::default()
|
||||
}
|
||||
|
||||
/// Reserves an entity in the world
|
||||
pub fn reserve_entity(&mut self) -> Entity {
|
||||
self.entities.reserve()
|
||||
}
|
||||
|
||||
pub fn spawn<B>(&mut self, bundle: B) -> Entity
|
||||
where
|
||||
B: Bundle
|
||||
{
|
||||
let new_entity = self.reserve_entity();
|
||||
self.spawn_into(new_entity, bundle);
|
||||
new_entity
|
||||
}
|
||||
|
||||
/// Spawn the components into a reserved entity. Only do this with entities that
|
||||
/// were reserved with [`World::reserve_entity`].
|
||||
///
|
||||
/// # Safety
|
||||
/// Do not use this method with an entity that is currently alive, it WILL cause undefined behavior.
|
||||
pub fn spawn_into<B>(&mut self, entity: Entity, bundle: B)
|
||||
where
|
||||
B: Bundle
|
||||
{
|
||||
let tick = self.current_tick();
|
||||
let bundle_types = bundle.type_ids();
|
||||
|
||||
// try to find an archetype
|
||||
let archetype = self.archetypes
|
||||
.values_mut()
|
||||
.find(|a| a.is_archetype_for(&bundle_types));
|
||||
|
||||
if let Some(archetype) = archetype {
|
||||
// make at just one check to ensure you're not spawning twice
|
||||
debug_assert!(!archetype.entity_ids.contains_key(&entity),
|
||||
"You attempted to spawn components into an entity that already exists!");
|
||||
|
||||
let arche_idx = archetype.add_entity(entity, bundle, &tick);
|
||||
|
||||
// Create entity record and store it
|
||||
let record = Record {
|
||||
id: archetype.id(),
|
||||
index: arche_idx,
|
||||
};
|
||||
|
||||
self.entities.insert_entity_record(entity, record);
|
||||
}
|
||||
// create a new archetype if one isn't found
|
||||
else {
|
||||
// create archetype
|
||||
let new_arch_id = self.next_archetype_id.increment();
|
||||
let mut archetype = Archetype::from_bundle_info(new_arch_id, bundle.info());
|
||||
let entity_arch_id = archetype.add_entity(entity, bundle, &tick);
|
||||
|
||||
// store archetype
|
||||
self.archetypes.insert(new_arch_id, archetype);
|
||||
|
||||
// Create entity record and store it
|
||||
let record = Record {
|
||||
id: new_arch_id,
|
||||
// this is the first entity in the archetype
|
||||
index: entity_arch_id,
|
||||
};
|
||||
|
||||
self.entities.insert_entity_record(entity, record);
|
||||
}
|
||||
}
|
||||
|
||||
/// Despawn an entity from the World
|
||||
pub fn despawn(&mut self, entity: Entity) {
|
||||
let tick = self.current_tick();
|
||||
if let Some(record) = self.entities.arch_index.get_mut(&entity.id) {
|
||||
let arch = self.archetypes.get_mut(&record.id).unwrap();
|
||||
|
||||
if let Some((moved, new_index)) = arch.remove_entity(entity, &tick) {
|
||||
// replace the archetype index of the moved index with its new index.
|
||||
self.entities.arch_index.get_mut(&moved.id).unwrap().index = new_index;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Insert a component bundle into an existing entity.
|
||||
///
|
||||
/// If the components are already existing on the entity, they will be updated, else the
|
||||
/// entity will be moved to a different Archetype that can store the entity. That may
|
||||
/// involve creating a new Archetype.
|
||||
pub fn insert<B>(&mut self, entity: Entity, bundle: B)
|
||||
where
|
||||
B: Bundle
|
||||
{
|
||||
let tick = self.current_tick();
|
||||
let record = self.entities.entity_record(entity);
|
||||
|
||||
if record.is_none() {
|
||||
//let mut combined_column_infos: Vec<ComponentInfo> = bundle.info().columns.iter().map(|c| c.info).collect();
|
||||
let new_arch_id = self.next_archetype_id.increment();
|
||||
let mut archetype = Archetype::from_bundle_info(new_arch_id, bundle.info());
|
||||
|
||||
let mut dbun = DynamicBundle::new();
|
||||
dbun.push_bundle(bundle);
|
||||
|
||||
let entity_arch_id = archetype.add_entity(entity, dbun, &tick);
|
||||
|
||||
self.archetypes.insert(new_arch_id, archetype);
|
||||
|
||||
// Create entity record and store it
|
||||
let record = Record {
|
||||
id: new_arch_id,
|
||||
index: entity_arch_id,
|
||||
};
|
||||
|
||||
self.entities.insert_entity_record(entity, record);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
let record = record.unwrap();
|
||||
|
||||
let current_arch = self.archetypes.get(&record.id).unwrap();
|
||||
let current_arch_len = current_arch.len();
|
||||
|
||||
let mut contains_all = true;
|
||||
for id in bundle.type_ids() {
|
||||
contains_all = contains_all && current_arch.get_column(id).is_some();
|
||||
}
|
||||
|
||||
if contains_all {
|
||||
let current_arch = self.archetypes.get_mut(&record.id).unwrap();
|
||||
let entry_idx = *current_arch.entity_indexes()
|
||||
.get(&entity).unwrap();
|
||||
|
||||
bundle.take(|ptr, id, _info| {
|
||||
let col = current_arch.get_column_mut(id).unwrap();
|
||||
unsafe { col.set_at(entry_idx.0 as _, ptr, tick) };
|
||||
});
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
// contains the type ids for the old component columns + the ids for the new components
|
||||
let mut combined_column_types: Vec<DynTypeId> = current_arch.columns.iter().map(|c| c.info.type_id()).collect();
|
||||
combined_column_types.extend(bundle.type_ids());
|
||||
|
||||
// contains the ComponentInfo for the old component columns + the info for the new components
|
||||
let mut combined_column_infos: Vec<ComponentInfo> = current_arch.columns.iter().map(|c| c.info).collect();
|
||||
combined_column_infos.extend(bundle.info());
|
||||
|
||||
// pointers only for the old columns
|
||||
let old_columns: Vec<(NonNull<u8>, ComponentInfo)> = current_arch.columns.iter()
|
||||
.map(|c| unsafe { (NonNull::new_unchecked(c.borrow_ptr().as_ptr()), c.info) })
|
||||
.collect();
|
||||
|
||||
// try to find an archetype that this entity and its new components can fit into
|
||||
if let Some(arch) = self.archetypes.values_mut().find(|a| a.is_archetype_for(&combined_column_types)) {
|
||||
let mut dbun = DynamicBundle::new();
|
||||
// move old entity components into new archetype columns
|
||||
for (col_ptr, col_info) in old_columns.into_iter() {
|
||||
unsafe {
|
||||
let ptr = NonNull::new_unchecked(col_ptr.as_ptr()
|
||||
.add(record.index.0 as usize * col_info.layout().size()));
|
||||
dbun.push_unknown(ptr, col_info);
|
||||
}
|
||||
}
|
||||
dbun.push_bundle(bundle);
|
||||
|
||||
let res_index = arch.add_entity(entity, dbun, &tick);
|
||||
arch.ensure_synced();
|
||||
|
||||
let new_record = Record {
|
||||
id: arch.id(),
|
||||
index: res_index,
|
||||
};
|
||||
self.entities.insert_entity_record(entity, new_record);
|
||||
} else {
|
||||
if current_arch_len == 1 {
|
||||
// if this entity is the only entity for this archetype, add more columns to it
|
||||
let current_arch = self.archetypes.get_mut(&record.id).unwrap();
|
||||
current_arch.extend(&tick, vec![bundle]);
|
||||
return;
|
||||
}
|
||||
|
||||
let new_arch_id = self.next_archetype_id.increment();
|
||||
let mut archetype = Archetype::from_bundle_info(new_arch_id, combined_column_infos);
|
||||
|
||||
let mut dbun = DynamicBundle::new();
|
||||
for (column_ptr, column_info) in old_columns.into_iter() {
|
||||
unsafe {
|
||||
// ptr of component for the entity
|
||||
let comp_ptr = NonNull::new_unchecked(column_ptr.as_ptr()
|
||||
.add(record.index.0 as usize * column_info.layout().size()));
|
||||
dbun.push_unknown(comp_ptr, column_info);
|
||||
}
|
||||
}
|
||||
dbun.push_bundle(bundle);
|
||||
|
||||
let entity_arch_id = archetype.add_entity(entity, dbun, &tick);
|
||||
|
||||
self.archetypes.insert(new_arch_id, archetype);
|
||||
|
||||
// Create entity record and store it
|
||||
let record = Record {
|
||||
id: new_arch_id,
|
||||
index: entity_arch_id,
|
||||
};
|
||||
|
||||
self.entities.insert_entity_record(entity, record);
|
||||
}
|
||||
|
||||
let current_arch = self.archetypes.get_mut(&record.id).unwrap();
|
||||
if let Some((en, enar)) = current_arch.remove_entity(entity, &tick) {
|
||||
let rec = Record {
|
||||
id: current_arch.id(),
|
||||
index: enar
|
||||
};
|
||||
self.entities.insert_entity_record(en, rec);
|
||||
}
|
||||
|
||||
current_arch.ensure_synced();
|
||||
}
|
||||
|
||||
/// A method used for debugging implementation details of the ECS.
|
||||
///
|
||||
/// Here's an example of the output:
|
||||
/// ```nobuild
|
||||
/// Entities
|
||||
/// 1 in archetype 0 at 0
|
||||
/// 0 in archetype 1 at 0
|
||||
/// 2 in archetype 0 at 1
|
||||
/// 3 in archetype 2 at 0
|
||||
/// Arch 1 -- 1 entities
|
||||
/// Col 175564825027445222460146453544114453753
|
||||
/// 0: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 128 63 0 0 128 63 0 0 128 63 0 0 128 63 78 86 0 0
|
||||
/// Col 162279302565774655543278578489329315472
|
||||
/// 0: 0 0 32 65 0 0 32 65 0 0 32 65 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 128 63 0 0 128 63 0 0 128 63 0 0 128 63 0 0 0 0
|
||||
/// Col 24291284537013640759061027938209843602
|
||||
/// 0: 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
|
||||
/// Arch 2 -- 1 entities
|
||||
/// Col 175564825027445222460146453544114453753
|
||||
/// 0: 0 0 0 0 0 0 0 0 0 0 0 0 237 127 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 128 63 0 0 128 63 0 0 128 63 0 0 128 63 78 86 0 0
|
||||
/// Col 162279302565774655543278578489329315472
|
||||
/// 0: 0 0 76 66 0 0 170 66 0 0 136 65 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 128 63 0 0 128 63 0 0 128 63 0 0 128 63 0 0 0 0
|
||||
/// Col 142862377085187052737282554588643015580
|
||||
/// 0: 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
|
||||
/// Arch 0 -- 2 entities
|
||||
/// Col 175564825027445222460146453544114453753
|
||||
/// 0: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 32 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 128 63 0 0 128 63 0 0 128 63 0 0 128 63 0 0 0 0
|
||||
/// 1: 0 0 0 0 0 0 0 0 0 0 0 0 237 127 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 128 63 0 0 128 63 0 0 128 63 0 0 128 63 78 86 0 0
|
||||
/// Col 162279302565774655543278578489329315472
|
||||
/// 0: 0 0 112 65 0 0 112 65 0 0 112 65 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 128 63 0 0 128 63 0 0 128 63 0 0 128 63 0 0 0 0
|
||||
/// 1: 0 0 27 67 0 0 184 65 0 0 192 64 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 128 63 0 0 128 63 0 0 128 63 0 0 128 63 0 0 0 0
|
||||
/// Col 142862377085187052737282554588643015580
|
||||
/// 0: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
|
||||
/// 1: 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
|
||||
/// Col 24291284537013640759061027938209843602
|
||||
/// 0: 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
|
||||
/// 1: 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
|
||||
/// Arch 3 -- 0 entities
|
||||
/// Col 175564825027445222460146453544114453753
|
||||
/// Col 162279302565774655543278578489329315472
|
||||
/// ```
|
||||
///
|
||||
/// This output prints all Entity ids, the archetype they're in, and the index that they're
|
||||
/// in inside of the archetype. Additionally, the archetypes are printing, including their
|
||||
/// columns type ids, and the contents of the type ids. This output can be used to debug
|
||||
/// the contents of entities inside the archetypes.
|
||||
///
|
||||
/// Below is a template of the output:
|
||||
/// ```nobuild
|
||||
/// Entities
|
||||
/// %ENTITY_ID% in archetype %ARCHETYPE_ID% at %INDEX%
|
||||
/// Arch ID -- %ARCHETYPE_LEN% entities
|
||||
/// %FOR EACH COL%
|
||||
/// Col COLUMN_COMPONENT_TYPE_ID
|
||||
/// %FOR EACH ENTITY%
|
||||
/// %ENTITY_INDEX%: %COMPONENT_BYTES%
|
||||
/// ```
|
||||
/// If the template above doesn't help you in understanding the output, read the source code
|
||||
/// of the function. The source code is pretty simple.
|
||||
pub fn debug_print_world(&self) {
|
||||
println!("Entities");
|
||||
for (en, rec) in &self.entities.arch_index {
|
||||
println!(" {} in archetype {} at {}", en.0, rec.id.0, rec.index.0);
|
||||
}
|
||||
|
||||
for arch in self.archetypes.values() {
|
||||
println!("Arch {} -- {} entities", arch.id().0, arch.len());
|
||||
|
||||
for col in &arch.columns {
|
||||
// no clue if doing this is stable, but this is a debug function so :shrug:
|
||||
let tyid: u128 = unsafe { std::mem::transmute(col.info.type_id().as_rust()) };
|
||||
println!(" Col {}", tyid);
|
||||
|
||||
for en in 0..col.len {
|
||||
// get the ptr starting at the component
|
||||
let p = col.borrow_ptr();
|
||||
let p = unsafe { p.as_ptr().add(en * col.info.layout().size()) };
|
||||
|
||||
print!(" {}: ", en);
|
||||
// print each byte of the component
|
||||
for i in 0..col.info.layout().size() {
|
||||
let d = unsafe { *p.add(i) };
|
||||
print!("{} ", d);
|
||||
}
|
||||
println!();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn entity_archetype(&self, entity: Entity) -> Option<&Archetype> {
|
||||
self.entities.entity_record(entity)
|
||||
.and_then(|record| self.archetypes.get(&record.id))
|
||||
}
|
||||
|
||||
pub fn entity_archetype_mut(&mut self, entity: Entity) -> Option<&mut Archetype> {
|
||||
self.entities.entity_record(entity)
|
||||
.and_then(|record| self.archetypes.get_mut(&record.id))
|
||||
}
|
||||
|
||||
/// View into the world for a set of entities that satisfy the queries.
|
||||
pub fn view<Q: AsQuery>(&self) -> ViewState<Q::Query, ()> {
|
||||
self.filtered_view::<Q, ()>()
|
||||
}
|
||||
|
||||
/// View into the world for a set of entities that satisfy the query and the filter.
|
||||
pub fn filtered_view<Q: AsQuery, F: AsFilter>(&self) -> ViewState<Q::Query, F::Filter> {
|
||||
let archetypes = self.archetypes.values().collect();
|
||||
ViewState::<Q::Query, F::Filter>::new(self, Q::Query::new(), F::Filter::new(), archetypes)
|
||||
}
|
||||
|
||||
/// View into the world for a set of entities that satisfy the queries.
|
||||
pub fn view_iter<Q: AsQuery>(&self) -> ViewIter<Q::Query, ()> {
|
||||
let archetypes = self.archetypes.values().collect();
|
||||
let v = ViewState::new(self, Q::Query::new(), (), archetypes);
|
||||
v.into_iter()
|
||||
}
|
||||
|
||||
/// View into the world for a set of entities that satisfy the queries.
|
||||
pub fn filtered_view_iter<Q: AsQuery, F: AsFilter>(&self) -> ViewIter<Q::Query, F::Filter> {
|
||||
let archetypes = self.archetypes.values().collect();
|
||||
let v = ViewState::new(self, Q::Query::new(), F::Filter::new(), archetypes);
|
||||
v.into_iter()
|
||||
}
|
||||
|
||||
pub fn dynamic_view(&self) -> DynamicView {
|
||||
DynamicView::new(self)
|
||||
}
|
||||
|
||||
pub fn view_one<T: AsQuery>(&self, entity: Entity) -> ViewOne<T::Query> {
|
||||
ViewOne::new(self, entity.id, T::Query::new())
|
||||
}
|
||||
|
||||
/// Add a resource to the world.
|
||||
pub fn add_resource<T: ResourceObject>(&mut self, data: T) {
|
||||
self.resources.insert(TypeId::of::<T>(), ResourceData::new(data, self.current_tick()));
|
||||
}
|
||||
|
||||
/// Add the default value of a resource.
|
||||
///
|
||||
/// > Note: This will replace existing values.
|
||||
pub fn add_resource_default<T: ResourceObject + Default>(&mut self) {
|
||||
self.resources.insert(TypeId::of::<T>(), ResourceData::new(T::default(), self.current_tick()));
|
||||
}
|
||||
|
||||
/// Add the default value of a resource if it does not already exist.
|
||||
///
|
||||
/// Returns a boolean indicating if the resource was added.
|
||||
pub fn add_resource_default_if_absent<T: ResourceObject + Default>(&mut self) -> bool {
|
||||
let id = TypeId::of::<T>();
|
||||
if !self.resources.contains_key(&id) {
|
||||
self.resources.insert(id, ResourceData::new(T::default(), self.current_tick()));
|
||||
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
/// Get a resource from the world, or insert it into the world with the provided
|
||||
/// `fn` and return it.
|
||||
pub fn get_resource_or_else<T: ResourceObject, F>(&mut self, f: F) -> ResMut<T>
|
||||
where
|
||||
F: Fn() -> T + 'static
|
||||
{
|
||||
let tick = self.current_tick();
|
||||
let res = self.resources.entry(TypeId::of::<T>())
|
||||
.or_insert_with(|| ResourceData::new(f(), tick));
|
||||
|
||||
ResMut {
|
||||
inner: res.data.borrow_mut(),
|
||||
world_tick: tick,
|
||||
_marker: std::marker::PhantomData::<T>,
|
||||
}
|
||||
}
|
||||
|
||||
/// Get a resource from the world, or insert its default value.
|
||||
pub fn get_resource_or_default<T: ResourceObject + Default>(&mut self) -> ResMut<T>
|
||||
{
|
||||
let tick = self.current_tick();
|
||||
let res = self.resources.entry(TypeId::of::<T>())
|
||||
.or_insert_with(|| ResourceData::new(T::default(), tick));
|
||||
|
||||
ResMut {
|
||||
inner: res.data.borrow_mut(),
|
||||
world_tick: tick,
|
||||
_marker: std::marker::PhantomData::<T>,
|
||||
}
|
||||
}
|
||||
|
||||
/// Gets a resource from the World.
|
||||
pub fn get_resource<T: ResourceObject>(&self) -> Option<Res<T>> {
|
||||
self.get_tracked_resource::<T>().map(|r| Res {
|
||||
inner: r,
|
||||
world_tick: self.current_tick(),
|
||||
_marker: std::marker::PhantomData::<T>,
|
||||
})
|
||||
}
|
||||
|
||||
/// Get the tick of a resource.
|
||||
///
|
||||
/// This tick represents the last time the resource was mutated.
|
||||
pub fn get_resource_tick<T: ResourceObject>(&self) -> Option<Tick> {
|
||||
self.get_tracked_resource::<T>().map(|r| r.tick)
|
||||
}
|
||||
|
||||
/// Gets a reference to a change tracked resource.
|
||||
///
|
||||
/// You will have to manually downcast the inner resource. Most people don't need this, see
|
||||
/// [`World::get_resource`].
|
||||
pub fn get_tracked_resource<T: ResourceObject>(&self) -> Option<AtomicRef<TrackedResource<dyn ResourceObject>>> {
|
||||
self.resources.get(&TypeId::of::<T>())
|
||||
.map(|r| r.data.borrow())
|
||||
}
|
||||
|
||||
/// Gets a mutable borrow to a change tracked resource.
|
||||
///
|
||||
/// You will have to manually downcast the inner resource. Most people don't need this, see
|
||||
/// [`World::get_resource_mut`].
|
||||
pub fn get_tracked_resource_mut<T: ResourceObject>(&self) -> Option<AtomicRefMut<TrackedResource<dyn ResourceObject>>> {
|
||||
self.resources.get(&TypeId::of::<T>())
|
||||
.map(|r| r.data.borrow_mut())
|
||||
}
|
||||
|
||||
/// Returns a boolean indicating if the resource changed.
|
||||
///
|
||||
/// This will return false if the resource doesn't exist.
|
||||
pub fn has_resource_changed<T: ResourceObject>(&self) -> bool {
|
||||
let tick = self.current_tick();
|
||||
self.resources.get(&TypeId::of::<T>())
|
||||
.map(|r| r.changed(tick))
|
||||
.unwrap_or(false)
|
||||
}
|
||||
|
||||
/// Returns the [`Tick`] that the resource was last modified at.
|
||||
pub fn resource_tick<T: ResourceObject>(&self) -> Option<Tick> {
|
||||
self.resources.get(&TypeId::of::<T>())
|
||||
.map(|r| r.data.borrow().tick)
|
||||
}
|
||||
|
||||
/// Returns boolean indicating if the World contains a resource of type `T`.
|
||||
pub fn has_resource<T: ResourceObject>(&self) -> bool {
|
||||
self.resources.contains_key(&TypeId::of::<T>())
|
||||
}
|
||||
|
||||
/// Gets a mutable borrow of a resource from the World.
|
||||
pub fn get_resource_mut<T: ResourceObject>(&self) -> Option<ResMut<T>> {
|
||||
self.get_tracked_resource_mut::<T>().map(|r| ResMut {
|
||||
inner: r,
|
||||
world_tick: self.current_tick(),
|
||||
_marker: std::marker::PhantomData::<T>,
|
||||
})
|
||||
}
|
||||
|
||||
/// Get the corresponding [`ResourceData`].
|
||||
pub fn get_resource_data<T: ResourceObject>(&self) -> Option<ResourceData> {
|
||||
self.resources.get(&TypeId::of::<T>())
|
||||
.map(|r| r.clone())
|
||||
}
|
||||
|
||||
/// Increments the world current tick for tracking changes to components and resources.
|
||||
///
|
||||
/// # Note:
|
||||
/// For change tracking to work correctly, this must be ran each loop before you run world
|
||||
/// systems.
|
||||
pub fn tick(&self) -> Tick {
|
||||
self.tracker.tick()
|
||||
}
|
||||
|
||||
/// Gets the current tick that the world is at.
|
||||
///
|
||||
/// See [`World::tick`].
|
||||
pub fn current_tick(&self) -> Tick {
|
||||
self.tracker.current()
|
||||
}
|
||||
|
||||
pub fn tick_tracker(&self) -> &TickTracker {
|
||||
&self.tracker
|
||||
}
|
||||
|
||||
/// Attempts to find a resource in the world and returns a NonNull pointer to it
|
||||
pub unsafe fn get_resource_ptr<T: ResourceObject>(&self) -> Option<NonNull<T>> {
|
||||
self.resources.get(&TypeId::of::<T>())
|
||||
.map(|d| unsafe {
|
||||
let data = d.data.borrow();
|
||||
let ptr = NonNull::from(&data.res);
|
||||
NonNull::new_unchecked(ptr.as_ptr() as *mut T)
|
||||
})
|
||||
}
|
||||
|
||||
pub fn archetype_count(&self) -> usize {
|
||||
self.archetypes.len()
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Ensure that all non-send resources are only accessible on the main thread.
|
||||
unsafe impl Send for World {}
|
||||
unsafe impl Sync for World {}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::{query::TickOf, tests::{Vec2, Vec3}, Entity};
|
||||
|
||||
use super::World;
|
||||
|
||||
struct SimpleCounter(i32);
|
||||
|
||||
#[test]
|
||||
fn spawning_entity() {
|
||||
let mut world = World::new();
|
||||
let _e = world.spawn((Vec2 {
|
||||
x: 10.0,
|
||||
y: 15.0,
|
||||
}, ));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn world_view_entities() {
|
||||
let mut world = World::new();
|
||||
world.spawn((Vec2 {
|
||||
x: 10.0,
|
||||
y: 15.0,
|
||||
}, ));
|
||||
world.spawn((Vec2 {
|
||||
x: 152.0,
|
||||
y: 3585.0,
|
||||
}, ));
|
||||
world.spawn((Vec2 {
|
||||
x: 235.0,
|
||||
y: 734.0,
|
||||
}, ));
|
||||
|
||||
let mut count = 0;
|
||||
for pos in world.view_iter::<&Vec2>() {
|
||||
println!("Found entity at {:?}", pos);
|
||||
count += 1;
|
||||
}
|
||||
assert!(count == 3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn despawn_entity() {
|
||||
let mut world = World::new();
|
||||
world.spawn((Vec2::rand(),));
|
||||
let middle_en = world.spawn((Vec2::rand(),));
|
||||
let last_en = world.spawn((Vec2::rand(),));
|
||||
|
||||
world.despawn(middle_en);
|
||||
|
||||
let record = world.entities.entity_record(last_en).unwrap();
|
||||
assert_eq!(record.index.0, 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn simple_resource() {
|
||||
let mut world = World::new();
|
||||
{
|
||||
let counter = SimpleCounter(0);
|
||||
world.add_resource(counter);
|
||||
}
|
||||
|
||||
let counter = world.get_resource::<SimpleCounter>()
|
||||
.expect("Counter resource is missing");
|
||||
assert_eq!(counter.0, 0);
|
||||
drop(counter);
|
||||
|
||||
let mut counter = world.get_resource_mut::<SimpleCounter>()
|
||||
.expect("Counter resource is missing");
|
||||
counter.0 += 4582;
|
||||
drop(counter);
|
||||
|
||||
assert!(world.get_resource::<u32>().is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn resource_multi_borrow() {
|
||||
let mut world = World::new();
|
||||
let counter = SimpleCounter(4582);
|
||||
world.add_resource(counter);
|
||||
|
||||
// test multiple borrows at the same time
|
||||
let counter = world.get_resource::<SimpleCounter>()
|
||||
.expect("Counter resource is missing");
|
||||
assert_eq!(counter.0, 4582);
|
||||
let counter2 = world.get_resource::<SimpleCounter>()
|
||||
.expect("Counter resource is missing");
|
||||
assert_eq!(counter.0, 4582);
|
||||
assert_eq!(counter2.0, 4582);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn resource_one_mutable_borrow() {
|
||||
let mut world = World::new();
|
||||
{
|
||||
let counter = SimpleCounter(4582);
|
||||
world.add_resource(counter);
|
||||
}
|
||||
|
||||
// test that its only possible to get a single mutable borrow
|
||||
let counter = world.get_resource_mut::<SimpleCounter>()
|
||||
.expect("Counter resource is missing");
|
||||
assert_eq!(counter.0, 4582);
|
||||
assert!(world.get_resource_mut::<SimpleCounter>().is_none());
|
||||
assert_eq!(counter.0, 4582);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn insert_into_existing_archetype() {
|
||||
let mut world = World::new();
|
||||
let e = world.spawn((Vec2::rand(),));
|
||||
world.spawn((Vec2::rand(),Vec3::rand()));
|
||||
|
||||
world.insert(e, (Vec3::rand(),));
|
||||
|
||||
assert!(world.view_one::<&Vec3>(e).get().is_some())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn insert_into_new_archetype() {
|
||||
let mut world = World::new();
|
||||
let e = world.spawn((Vec2::rand(),));
|
||||
|
||||
world.insert(e, (Vec3::rand(),));
|
||||
|
||||
assert!(world.view_one::<&Vec3>(e).get().is_some())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn insert_multiple_times() {
|
||||
let v2s = &[Vec2::rand(), Vec2::rand(), Vec2::rand()];
|
||||
let v3s = &[Vec3::rand(), Vec3::rand(), Vec3::rand()];
|
||||
|
||||
let mut world = World::new();
|
||||
let e1 = world.spawn(v2s[0]);
|
||||
let e2 = world.spawn(v2s[1]);
|
||||
let e3 = world.spawn(v2s[2]);
|
||||
println!("Spawned entities");
|
||||
|
||||
let ev2 = world.view_one::<&Vec2>(e2).get()
|
||||
.expect("Failed to find Vec2 and Vec3 on inserted entity!");
|
||||
assert_eq!(*ev2, v2s[1]);
|
||||
drop(ev2);
|
||||
|
||||
let insert_and_assert = |world: &mut World, e: Entity, v2: Vec2, v3: Vec3| {
|
||||
println!("inserting entity");
|
||||
world.insert(e, (v3,));
|
||||
println!("inserted entity");
|
||||
|
||||
let (ev2, ev3) = world.view_one::<(&Vec2, &Vec3)>(e).get()
|
||||
.expect("Failed to find Vec2 and Vec3 on inserted entity!");
|
||||
assert_eq!(*ev2, v2);
|
||||
assert_eq!(*ev3, v3);
|
||||
};
|
||||
|
||||
insert_and_assert(&mut world, e2, v2s[1], v3s[1]);
|
||||
println!("Entity 2 is good");
|
||||
insert_and_assert(&mut world, e3, v2s[2], v3s[2]);
|
||||
println!("Entity 3 is good");
|
||||
assert_eq!(world.archetypes.len(), 2);
|
||||
println!("No extra archetypes were created");
|
||||
|
||||
insert_and_assert(&mut world, e1, v2s[0], v3s[0]);
|
||||
println!("Entity 1 is good");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn view_one() {
|
||||
let v = Vec2::rand();
|
||||
|
||||
let mut world = World::new();
|
||||
let e = world.spawn((v,));
|
||||
|
||||
let view = world.view_one::<&Vec2>(e);
|
||||
assert_eq!(*view.get().unwrap(), v);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn view_change_tracking() {
|
||||
let mut world = World::new();
|
||||
|
||||
println!("spawning");
|
||||
world.spawn((Vec2::new(10.0, 10.0),));
|
||||
world.spawn((Vec2::new(5.0, 5.0),));
|
||||
println!("spawned");
|
||||
|
||||
for mut v in world.view_iter::<&mut Vec2>() {
|
||||
v.y += 50.0;
|
||||
println!("Moved v to {:?}", v);
|
||||
}
|
||||
|
||||
let world_tick = world.current_tick();
|
||||
println!("The world tick is {}", *world_tick);
|
||||
for (v, tick) in world.view_iter::<(&Vec2, TickOf<Vec2>)>() {
|
||||
println!("Is at {:?}, it was changed at {}", v, *tick);
|
||||
assert!(v.y > 50.0);
|
||||
assert!(tick >= world_tick);
|
||||
}
|
||||
}
|
||||
|
||||
/// Tests replacing components using World::insert
|
||||
#[test]
|
||||
fn entity_insert_replace() {
|
||||
let mut world = World::new();
|
||||
let first = world.spawn((Vec2::new(10.0, 10.0),));
|
||||
let second = world.spawn((Vec2::new(5.0, 5.0),));
|
||||
|
||||
world.insert(first, Vec2::new(50.0, 50.0));
|
||||
|
||||
let pos = world.view_one::<&mut Vec2>(first).get().unwrap();
|
||||
assert_eq!(*pos, Vec2::new(50.0, 50.0));
|
||||
drop(pos);
|
||||
|
||||
let pos = world.view_one::<&mut Vec2>(second).get().unwrap();
|
||||
assert_eq!(*pos, Vec2::new(5.0, 5.0));
|
||||
}
|
||||
|
||||
/// Tests resource change checks
|
||||
#[test]
|
||||
fn resource_changed() {
|
||||
let mut world = World::new();
|
||||
world.add_resource(SimpleCounter(50));
|
||||
|
||||
assert!(world.has_resource_changed::<SimpleCounter>());
|
||||
|
||||
world.spawn(Vec2::new(50.0, 50.0));
|
||||
|
||||
assert!(!world.has_resource_changed::<SimpleCounter>());
|
||||
|
||||
let mut counter = world.get_resource_mut::<SimpleCounter>()
|
||||
.expect("Counter resource is missing");
|
||||
counter.0 += 100;
|
||||
|
||||
assert!(world.has_resource_changed::<SimpleCounter>());
|
||||
}
|
||||
}
|
|
@ -1,47 +0,0 @@
|
|||
[package]
|
||||
name = "lyra-game"
|
||||
version = "0.0.1"
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
lyra-game-derive = { path = "./lyra-game-derive" }
|
||||
lyra-resource = { path = "../lyra-resource" }
|
||||
lyra-ecs = { path = "../lyra-ecs", features = [ "math" ] }
|
||||
lyra-reflect = { path = "../lyra-reflect", features = [ "math" ] }
|
||||
lyra-math = { path = "../lyra-math" }
|
||||
lyra-scene = { path = "../lyra-scene" }
|
||||
lyra-gltf = { path = "../lyra-gltf" }
|
||||
wgsl_preprocessor = { path = "../wgsl-preprocessor" }
|
||||
|
||||
winit = "0.30.5"
|
||||
wgpu = { version = "22.1.0" }
|
||||
|
||||
tracing = "0.1.37"
|
||||
tracing-subscriber = { version = "0.3.16", features = [ "tracing-log" ] }
|
||||
tracing-log = "0.2.0"
|
||||
tracing-appender = "0.2.2"
|
||||
tracing-tracy = { version = "0.11.0", optional = true }
|
||||
|
||||
async-std = { version = "1.12.0", features = [ "unstable", "attributes" ] }
|
||||
cfg-if = "1"
|
||||
bytemuck = { version = "1.12", features = [ "derive", "min_const_generics" ] }
|
||||
image = "0.25.2"
|
||||
anyhow = "1.0"
|
||||
instant = "0.1"
|
||||
async-trait = "0.1.65"
|
||||
glam = { version = "0.29.0", features = ["bytemuck", "debug-glam-assert"] }
|
||||
syn = "2.0.26"
|
||||
quote = "1.0.29"
|
||||
uuid = { version = "1.5.0", features = ["v4", "fast-rng"] }
|
||||
itertools = "0.13.0"
|
||||
thiserror = "1.0.56"
|
||||
unique = "0.9.1"
|
||||
rustc-hash = "2.0.0"
|
||||
petgraph = { version = "0.6.5", features = ["matrix_graph"] }
|
||||
bind_match = "0.1.2"
|
||||
round_mult = "0.1.3"
|
||||
fast_poisson = { version = "1.0.0", features = ["single_precision"] }
|
||||
atomic_refcell = "0.1.13"
|
||||
|
||||
[features]
|
||||
tracy = ["dep:tracing-tracy"]
|
|
@ -1,14 +0,0 @@
|
|||
[package]
|
||||
name = "lyra-game-derive"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[lib]
|
||||
proc-macro = true
|
||||
|
||||
[dependencies]
|
||||
proc-macro2 = "1.0.70"
|
||||
quote = "1.0.33"
|
||||
syn = "2.0.41"
|
|
@ -1,35 +0,0 @@
|
|||
use quote::quote;
|
||||
use syn::{parse_macro_input, DeriveInput};
|
||||
|
||||
#[proc_macro_derive(RenderGraphLabel)]
|
||||
pub fn derive_render_graph_label(input: proc_macro::TokenStream) -> proc_macro::TokenStream {
|
||||
let input = parse_macro_input!(input as DeriveInput);
|
||||
let (impl_generics, ty_generics, where_clause) = input.generics.split_for_impl();
|
||||
|
||||
let type_ident = &input.ident;
|
||||
|
||||
proc_macro::TokenStream::from(quote! {
|
||||
impl #impl_generics crate::render::graph::RenderGraphLabel for #type_ident #ty_generics #where_clause {
|
||||
fn rc_clone(&self) -> std::rc::Rc<dyn crate::render::graph::RenderGraphLabel> {
|
||||
std::rc::Rc::new(self.clone())
|
||||
}
|
||||
|
||||
/* fn as_dyn(&self) -> &dyn crate::render::graph::RenderGraphLabel {
|
||||
&self
|
||||
}
|
||||
|
||||
fn as_partial_eq(&self) -> &dyn PartialEq<dyn crate::render::graph::RenderGraphLabel> {
|
||||
self
|
||||
} */
|
||||
|
||||
fn as_label_hash(&self) -> u64 {
|
||||
let tyid = ::std::any::TypeId::of::<Self>();
|
||||
|
||||
let mut s = ::std::hash::DefaultHasher::new();
|
||||
::std::hash::Hash::hash(&tyid, &mut s);
|
||||
::std::hash::Hash::hash(self, &mut s);
|
||||
::std::hash::Hasher::finish(&s)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
|
@ -1,216 +0,0 @@
|
|||
use std::sync::Arc;
|
||||
|
||||
use atomic_refcell::AtomicRefCell;
|
||||
use lyra_ecs::{query::{ResMut, WorldTick}, system::FnArgFetcher, Tick};
|
||||
|
||||
pub trait Event: Clone + Send + Sync + 'static {}
|
||||
impl<T: Clone + Send + Sync + 'static> Event for T {}
|
||||
|
||||
/// A Vec with other Vecs in it to track relative age of items.
|
||||
///
|
||||
/// The vec has 3 levels, a `newest`, `medium` and `old`. Items are pushed to the `newest`
|
||||
/// internal vec. When [`WaterfallVec::waterfall`] is called the items in `newest` are
|
||||
/// put into `medium`, and items in `medium` goes to `old`.
|
||||
///
|
||||
/// By checking the items in each internal vec, you can see a relative age between the items.
|
||||
/// The event system uses this to clear the `old` vec to ensure keep events for only two
|
||||
/// frames at a time.
|
||||
struct WaterfallVec<T> {
|
||||
newest: Vec<T>,
|
||||
medium: Vec<T>,
|
||||
old: Vec<T>,
|
||||
}
|
||||
|
||||
impl<T> Default for WaterfallVec<T> {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
newest: Default::default(),
|
||||
medium: Default::default(),
|
||||
old: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> WaterfallVec<T> {
|
||||
fn total_len(&self) -> usize {
|
||||
self.newest.len() + self.medium.len() + self.old.len()
|
||||
}
|
||||
|
||||
fn get(&self, mut i: usize) -> Option<&T> {
|
||||
if i >= self.old.len() {
|
||||
i -= self.old.len();
|
||||
|
||||
if i >= self.medium.len() {
|
||||
i -= self.medium.len();
|
||||
self.newest.get(i)
|
||||
} else {
|
||||
self.medium.get(i)
|
||||
}
|
||||
} else {
|
||||
self.old.get(i)
|
||||
}
|
||||
}
|
||||
|
||||
/// Age elements.
|
||||
///
|
||||
/// This moves elements in `newest` to `medium` and elements in `medium` to `old`.
|
||||
/// This is what drives the relative age of the [`WaterfallVec`].
|
||||
fn waterfall(&mut self) {
|
||||
self.old.append(&mut self.medium);
|
||||
self.medium.append(&mut self.newest);
|
||||
}
|
||||
|
||||
/// Push a new element to the newest queue.
|
||||
fn push(&mut self, event: T) {
|
||||
self.newest.push(event);
|
||||
}
|
||||
|
||||
/// Clear oldest items.
|
||||
fn clear_oldest(&mut self) {
|
||||
self.old.clear();
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Events<T: Event> {
|
||||
events: Arc<AtomicRefCell<WaterfallVec<T>>>,
|
||||
/// Used to track when the old events were last cleared.
|
||||
last_cleared_at: Tick,
|
||||
/// Used to indicate when the cursor in readers should be reset to zero.
|
||||
/// This becomes true after the old events are cleared.
|
||||
reset_cursor: bool,
|
||||
}
|
||||
|
||||
impl<T: Event> Default for Events<T> {
|
||||
fn default() -> Self {
|
||||
Self { events: Default::default(), last_cleared_at: Default::default(), reset_cursor: false }
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Event> Events<T> {
|
||||
pub fn new() -> Self {
|
||||
Self::default()
|
||||
}
|
||||
|
||||
pub fn push_event(&mut self, event: T) {
|
||||
let mut events = self.events.borrow_mut();
|
||||
events.push(event);
|
||||
}
|
||||
|
||||
pub fn reader(&self) -> EventReader<T> {
|
||||
EventReader {
|
||||
events: self.events.clone(),
|
||||
cursor: Arc::new(AtomicRefCell::new(0)),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn writer(&self) -> EventWriter<T> {
|
||||
EventWriter {
|
||||
events: self.events.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct EventReader<T: Event> {
|
||||
events: Arc<AtomicRefCell<WaterfallVec<T>>>,
|
||||
cursor: Arc<AtomicRefCell<usize>>,
|
||||
}
|
||||
|
||||
impl<T: Event> EventReader<T> {
|
||||
pub fn read(&self) -> Option<atomic_refcell::AtomicRef<T>> {
|
||||
let events = self.events.borrow();
|
||||
|
||||
let mut cursor = self.cursor.borrow_mut();
|
||||
if *cursor >= events.total_len() {
|
||||
None
|
||||
} else {
|
||||
let e = atomic_refcell::AtomicRef::map(events,
|
||||
|e| e.get(*cursor).unwrap());
|
||||
*cursor += 1;
|
||||
Some(e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct EventWriter<T: Event> {
|
||||
events: Arc<AtomicRefCell<WaterfallVec<T>>>,
|
||||
}
|
||||
|
||||
impl<T: Event> EventWriter<T> {
|
||||
pub fn write(&self, event: T) {
|
||||
let mut events = self.events.borrow_mut();
|
||||
events.push(event);
|
||||
}
|
||||
}
|
||||
|
||||
/// Clean events of event type `T` every 2 ticks.
|
||||
pub fn event_cleaner_system<T>(tick: WorldTick, mut events: ResMut<Events<T>>) -> anyhow::Result<()>
|
||||
where
|
||||
T: Event
|
||||
{
|
||||
let last_tick = *events.last_cleared_at;
|
||||
let world_tick = **tick;
|
||||
|
||||
if last_tick + 2 < world_tick {
|
||||
events.last_cleared_at = *tick;
|
||||
events.reset_cursor = true;
|
||||
|
||||
let mut events = events.events.borrow_mut();
|
||||
events.clear_oldest();
|
||||
} else {
|
||||
events.reset_cursor = false;
|
||||
}
|
||||
|
||||
let mut events = events.events.borrow_mut();
|
||||
events.waterfall();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
impl<T: Event> FnArgFetcher for EventReader<T> {
|
||||
type State = Arc<AtomicRefCell<usize>>;
|
||||
|
||||
type Arg<'a, 'state> = EventReader<T>;
|
||||
|
||||
fn create_state(_: std::ptr::NonNull<lyra_ecs::World>) -> Self::State {
|
||||
Arc::new(AtomicRefCell::new(0))
|
||||
}
|
||||
|
||||
unsafe fn get<'a, 'state>(state: &'state mut Self::State, world: std::ptr::NonNull<lyra_ecs::World>) -> Self::Arg<'a, 'state> {
|
||||
let world = world.as_ref();
|
||||
let events = world.get_resource::<Events<T>>()
|
||||
.unwrap_or_else(|| panic!("world missing Events<{}> resource", std::any::type_name::<T>()));
|
||||
|
||||
if events.reset_cursor {
|
||||
let mut state_num = state.borrow_mut();
|
||||
*state_num = 0;
|
||||
}
|
||||
|
||||
let reader = EventReader {
|
||||
events: events.events.clone(),
|
||||
cursor: state.clone(),
|
||||
};
|
||||
|
||||
reader
|
||||
}
|
||||
|
||||
fn apply_deferred(_: Self::State, _: std::ptr::NonNull<lyra_ecs::World>) { }
|
||||
}
|
||||
|
||||
impl<T: Event> FnArgFetcher for EventWriter<T> {
|
||||
type State = ();
|
||||
|
||||
type Arg<'a, 'state> = EventWriter<T>;
|
||||
|
||||
fn create_state(_: std::ptr::NonNull<lyra_ecs::World>) -> Self::State {
|
||||
()
|
||||
}
|
||||
|
||||
unsafe fn get<'a, 'state>(_: &'state mut Self::State, world: std::ptr::NonNull<lyra_ecs::World>) -> Self::Arg<'a, 'state> {
|
||||
let world = world.as_ref();
|
||||
let events = world.get_resource::<Events<T>>()
|
||||
.unwrap_or_else(|| panic!("world missing Events<{}> resource", std::any::type_name::<T>()));
|
||||
events.writer()
|
||||
}
|
||||
|
||||
fn apply_deferred(_: Self::State, _: std::ptr::NonNull<lyra_ecs::World>) { }
|
||||
}
|
|
@ -1,235 +0,0 @@
|
|||
use std::{cell::OnceCell, collections::VecDeque, ptr::NonNull};
|
||||
|
||||
use lyra_ecs::{system::{IntoSystem, System}, ResourceObject, World};
|
||||
use lyra_math::IVec2;
|
||||
use tracing::{error, info, Level};
|
||||
use tracing_appender::non_blocking;
|
||||
use tracing_subscriber::{
|
||||
layer::SubscriberExt,
|
||||
filter,
|
||||
util::SubscriberInitExt, fmt,
|
||||
};
|
||||
|
||||
use crate::{event_cleaner_system, plugin::Plugin, render::renderer::Renderer, Event, Events, Stage, StagedExecutor};
|
||||
|
||||
#[derive(Clone, Copy, Hash, Debug)]
|
||||
pub enum GameStages {
|
||||
/// This stage runs before all other stages.
|
||||
First,
|
||||
/// This stage runs before `Update`.
|
||||
PreUpdate,
|
||||
/// This stage is where most game logic would be.
|
||||
Update,
|
||||
/// This stage is ran after `Update`.
|
||||
PostUpdate,
|
||||
/// This stage runs after all other stages.
|
||||
Last,
|
||||
}
|
||||
|
||||
impl Stage for GameStages {}
|
||||
|
||||
pub struct Controls<'a> {
|
||||
pub world: &'a mut World,
|
||||
}
|
||||
|
||||
#[derive(Clone, Default)]
|
||||
pub struct WindowState {
|
||||
/// Indicates if the window is currently focused.
|
||||
pub focused: bool,
|
||||
/// Indicates if the window is currently occluded.
|
||||
pub occluded: bool,
|
||||
/// Indicates if the cursor is inside of the window.
|
||||
pub cursor_inside_window: bool,
|
||||
pub position: IVec2,
|
||||
}
|
||||
|
||||
impl WindowState {
|
||||
pub fn new() -> Self {
|
||||
Self::default()
|
||||
}
|
||||
}
|
||||
|
||||
pub struct App {
|
||||
pub(crate) renderer: OnceCell<Box<dyn Renderer>>,
|
||||
pub world: World,
|
||||
plugins: VecDeque<Box<dyn Plugin>>,
|
||||
startup_systems: VecDeque<Box<dyn System>>,
|
||||
staged_exec: StagedExecutor,
|
||||
run_fn: OnceCell<Box<dyn FnOnce(App)>>,
|
||||
}
|
||||
|
||||
impl App {
|
||||
pub fn new() -> Self {
|
||||
// init logging
|
||||
let (stdout_layer, stdout_nb) = non_blocking(std::io::stdout());
|
||||
{
|
||||
let t = tracing_subscriber::registry()
|
||||
.with(fmt::layer().with_writer(stdout_layer));
|
||||
|
||||
#[cfg(feature = "tracy")]
|
||||
let t = t.with(tracing_tracy::TracyLayer::default());
|
||||
|
||||
t.with(filter::Targets::new()
|
||||
// done by prefix, so it includes all lyra subpackages
|
||||
.with_target("lyra", Level::DEBUG)
|
||||
.with_target("wgsl_preprocessor", Level::INFO)
|
||||
.with_target("wgpu", Level::WARN)
|
||||
.with_target("winit", Level::DEBUG)
|
||||
.with_default(Level::INFO))
|
||||
.init();
|
||||
}
|
||||
|
||||
// store the logger worker guard to ensure logging still happens
|
||||
let mut world = World::new();
|
||||
world.add_resource(stdout_nb);
|
||||
|
||||
// initialize ecs system stages
|
||||
let mut staged = StagedExecutor::new();
|
||||
staged.add_stage(GameStages::First);
|
||||
staged.add_stage_after(GameStages::First, GameStages::PreUpdate);
|
||||
staged.add_stage_after(GameStages::PreUpdate, GameStages::Update);
|
||||
staged.add_stage_after(GameStages::Update, GameStages::PostUpdate);
|
||||
staged.add_stage_after(GameStages::PostUpdate, GameStages::Last);
|
||||
|
||||
Self {
|
||||
renderer: OnceCell::new(),
|
||||
world,
|
||||
plugins: Default::default(),
|
||||
startup_systems: Default::default(),
|
||||
staged_exec: staged,
|
||||
run_fn: OnceCell::new(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn update(&mut self) {
|
||||
self.world.tick();
|
||||
let wptr = NonNull::from(&self.world);
|
||||
|
||||
if let Err(e) = self.staged_exec.execute(wptr, true) {
|
||||
error!("Error when executing staged systems: '{}'", e);
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn on_resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>) {
|
||||
self.renderer.get_mut()
|
||||
.expect("renderer was not initialized")
|
||||
.on_resize(&mut self.world, new_size);
|
||||
}
|
||||
|
||||
pub(crate) fn on_exit(&mut self) {
|
||||
info!("On exit!");
|
||||
}
|
||||
|
||||
pub fn add_resource<T: ResourceObject>(&mut self, data: T) {
|
||||
self.world.add_resource(data);
|
||||
}
|
||||
|
||||
/// Add a system to the ecs world
|
||||
pub fn with_system<S, A>(&mut self, name: &str, system: S, depends: &[&str]) -> &mut Self
|
||||
where
|
||||
S: IntoSystem<A>,
|
||||
<S as IntoSystem<A>>::System: 'static
|
||||
{
|
||||
self.staged_exec.add_system_to_stage(GameStages::Update, name, system.into_system(), depends);
|
||||
|
||||
self
|
||||
}
|
||||
|
||||
/// Add a stage.
|
||||
///
|
||||
/// This stage could run at any moment if nothing is dependent on it.
|
||||
pub fn add_stage<T: Stage>(&mut self, stage: T) -> &mut Self {
|
||||
self.staged_exec.add_stage(stage);
|
||||
|
||||
self
|
||||
}
|
||||
|
||||
/// Add a stage that executes after another one.
|
||||
///
|
||||
/// Parameters:
|
||||
/// * `before` - The stage that will run before `after`.
|
||||
/// * `after` - The stage that will run after `before`.
|
||||
pub fn add_stage_after<T: Stage, U: Stage>(&mut self, before: T, after: U) -> &mut Self {
|
||||
self.staged_exec.add_stage_after(before, after);
|
||||
|
||||
self
|
||||
}
|
||||
|
||||
/// Add a system to an already existing stage.
|
||||
///
|
||||
/// # Panics
|
||||
/// Panics if the stage was not already added to the executor
|
||||
pub fn add_system_to_stage<T, S, A>(&mut self, stage: T,
|
||||
name: &str, system: S, depends: &[&str]) -> &mut Self
|
||||
where
|
||||
T: Stage,
|
||||
S: IntoSystem<A>,
|
||||
<S as IntoSystem<A>>::System: 'static
|
||||
{
|
||||
self.staged_exec.add_system_to_stage(stage, name, system.into_system(), depends);
|
||||
|
||||
self
|
||||
}
|
||||
|
||||
/// Add a startup system that will be ran right after plugins are setup.
|
||||
/// They will only be ran once
|
||||
pub fn with_startup_system<S>(&mut self, system: S) -> &mut Self
|
||||
where
|
||||
S: System + 'static
|
||||
{
|
||||
self.startup_systems.push_back(Box::new(system));
|
||||
|
||||
self
|
||||
}
|
||||
|
||||
/// Add a plugin to the game. These are executed as they are added.
|
||||
pub fn with_plugin<P>(&mut self, mut plugin: P) -> &mut Self
|
||||
where
|
||||
P: Plugin + 'static
|
||||
{
|
||||
plugin.setup(self);
|
||||
let plugin = Box::new(plugin);
|
||||
self.plugins.push_back(plugin);
|
||||
|
||||
self
|
||||
}
|
||||
|
||||
/// Override the default (empty) world
|
||||
///
|
||||
/// This isn't recommended, you should create a startup system and add it to `with_startup_system`
|
||||
pub fn with_world(&mut self, world: World) -> &mut Self {
|
||||
self.world = world;
|
||||
self
|
||||
}
|
||||
|
||||
pub fn set_run_fn<F>(&self, f: F)
|
||||
where
|
||||
F: FnOnce(App) + 'static
|
||||
{
|
||||
// ignore if a runner function was already set
|
||||
let _ = self.run_fn.set(Box::new(f));
|
||||
}
|
||||
|
||||
pub fn run(mut self) {
|
||||
let f = self.run_fn.take()
|
||||
.expect("No run function set");
|
||||
f(self);
|
||||
}
|
||||
|
||||
pub fn register_event<T: Event>(&mut self) {
|
||||
let world = &mut self.world;
|
||||
// only register the event if it isn't already registered.
|
||||
if !world.has_resource::<Events<T>>() {
|
||||
world.add_resource_default::<Events<T>>();
|
||||
let sys_name = format!("{}_event_cleaner_system", std::any::type_name::<T>().to_lowercase());
|
||||
self.add_system_to_stage(GameStages::First, &sys_name, event_cleaner_system::<T>, &[]);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn push_event<T: Event>(&mut self, event: T) {
|
||||
let world = &mut self.world;
|
||||
let mut events = world.get_resource_mut::<Events<T>>()
|
||||
.expect("missing events for event type! Must use `App::register_event` first");
|
||||
events.push_event(event);
|
||||
}
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -1,138 +0,0 @@
|
|||
use std::ops::Deref;
|
||||
|
||||
use glam::Vec2;
|
||||
use lyra_ecs::query::ResMut;
|
||||
use winit::{event::{MouseScrollDelta, WindowEvent}, keyboard::PhysicalKey};
|
||||
|
||||
use crate::{game::GameStages, plugin::Plugin, winit::DeviceEventPair, EventReader, EventWriter};
|
||||
|
||||
use super::{events::*, InputButtons, KeyCode};
|
||||
|
||||
fn write_scroll_delta(mouse_scroll_ev: &mut EventWriter<MouseScroll>, delta: &MouseScrollDelta) {
|
||||
let event = match delta {
|
||||
MouseScrollDelta::LineDelta(x, y) => MouseScroll {
|
||||
unit: MouseScrollUnit::Line(Vec2::new(*x, *y)),
|
||||
},
|
||||
MouseScrollDelta::PixelDelta(delta) => MouseScroll {
|
||||
unit: MouseScrollUnit::Pixel(Vec2::new(delta.x as f32, delta.y as f32)),
|
||||
},
|
||||
};
|
||||
|
||||
mouse_scroll_ev.write(event);
|
||||
}
|
||||
|
||||
fn write_key_event(key_buttons: &mut ResMut<InputButtons<KeyCode>>, physical_key: PhysicalKey, state: winit::event::ElementState) {
|
||||
if let PhysicalKey::Code(code) = physical_key {
|
||||
key_buttons.add_input_from_winit(KeyCode::from(code), state);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn input_system(
|
||||
mut key_code_res: ResMut<InputButtons<KeyCode>>,
|
||||
mut mouse_btn_res: ResMut<InputButtons<MouseButton>>,
|
||||
mut touches_res: ResMut<Touches>,
|
||||
window_ev: EventReader<WindowEvent>,
|
||||
device_ev: EventReader<DeviceEventPair>,
|
||||
mut mouse_scroll_ev: EventWriter<MouseScroll>,
|
||||
mouse_btn_ev: EventWriter<MouseButton>,
|
||||
mouse_exact_ev: EventWriter<MouseExact>,
|
||||
mouse_entered_ev: EventWriter<CursorEnteredWindow>,
|
||||
mouse_left_ev: EventWriter<CursorLeftWindow>,
|
||||
mouse_motion_ev: EventWriter<MouseMotion>,
|
||||
) -> anyhow::Result<()> {
|
||||
while let Some(event) = window_ev.read() {
|
||||
match event.deref() {
|
||||
WindowEvent::KeyboardInput { event, .. } => {
|
||||
write_key_event(&mut key_code_res, event.physical_key, event.state);
|
||||
},
|
||||
WindowEvent::CursorMoved { position, .. } => {
|
||||
let exact = MouseExact {
|
||||
pos: Vec2::new(position.x as f32, position.y as f32)
|
||||
};
|
||||
|
||||
mouse_exact_ev.write(exact);
|
||||
},
|
||||
WindowEvent::CursorEntered { .. } => {
|
||||
mouse_entered_ev.write(CursorEnteredWindow);
|
||||
},
|
||||
WindowEvent::CursorLeft { .. } => {
|
||||
mouse_left_ev.write(CursorLeftWindow);
|
||||
},
|
||||
WindowEvent::MouseWheel { delta, .. } => {
|
||||
write_scroll_delta(&mut mouse_scroll_ev, delta);
|
||||
},
|
||||
WindowEvent::MouseInput { button, state, .. } => {
|
||||
let button_event = match button {
|
||||
winit::event::MouseButton::Left => MouseButton::Left,
|
||||
winit::event::MouseButton::Right => MouseButton::Right,
|
||||
winit::event::MouseButton::Middle => MouseButton::Middle,
|
||||
winit::event::MouseButton::Back => MouseButton::Back,
|
||||
winit::event::MouseButton::Forward => MouseButton::Forward,
|
||||
winit::event::MouseButton::Other(v) => MouseButton::Other(*v),
|
||||
};
|
||||
|
||||
mouse_btn_ev.write(button_event);
|
||||
mouse_btn_res.add_input_from_winit(button_event, *state);
|
||||
},
|
||||
WindowEvent::Touch(t) => {
|
||||
let touch = Touch {
|
||||
phase: TouchPhase::from(t.phase),
|
||||
location: Vec2::new(t.location.x as f32, t.location.y as f32),
|
||||
force: t.force.map(Force::from),
|
||||
finger_id: t.id,
|
||||
};
|
||||
|
||||
touches_res.touches.push(touch);
|
||||
},
|
||||
_ => {},
|
||||
}
|
||||
}
|
||||
|
||||
while let Some(device) = device_ev.read() {
|
||||
match &device.event {
|
||||
winit::event::DeviceEvent::Motion { .. } => {
|
||||
// TODO: handle device motion events
|
||||
// A todo! isn't used since these are triggered alongside MouseMotion events
|
||||
}
|
||||
winit::event::DeviceEvent::MouseMotion { delta } => {
|
||||
let delta = MouseMotion {
|
||||
delta: Vec2::new(delta.0 as f32, delta.1 as f32)
|
||||
};
|
||||
|
||||
mouse_motion_ev.write(delta);
|
||||
},
|
||||
winit::event::DeviceEvent::MouseWheel { delta } => {
|
||||
write_scroll_delta(&mut mouse_scroll_ev, delta);
|
||||
},
|
||||
winit::event::DeviceEvent::Key(key) => {
|
||||
write_key_event(&mut key_code_res, key.physical_key, key.state);
|
||||
},
|
||||
_ => {
|
||||
todo!("unhandled device event: {:?}", device.event);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Plugin that runs InputSystem
|
||||
#[derive(Default)]
|
||||
pub struct InputPlugin;
|
||||
|
||||
impl Plugin for InputPlugin {
|
||||
fn setup(&mut self, app: &mut crate::game::App) {
|
||||
app.add_resource(InputButtons::<KeyCode>::default());
|
||||
app.add_resource(InputButtons::<MouseButton>::default());
|
||||
app.add_resource(Touches::default());
|
||||
|
||||
app.register_event::<MouseScroll>();
|
||||
app.register_event::<MouseButton>();
|
||||
app.register_event::<MouseMotion>();
|
||||
app.register_event::<MouseExact>();
|
||||
app.register_event::<CursorEnteredWindow>();
|
||||
app.register_event::<CursorLeftWindow>();
|
||||
|
||||
app.add_system_to_stage(GameStages::PreUpdate, "input", input_system, &[]);
|
||||
}
|
||||
}
|
|
@ -1,297 +0,0 @@
|
|||
use std::{alloc::Layout, cmp, marker::PhantomData, mem};
|
||||
|
||||
use std::{alloc, ptr};
|
||||
use unique::Unique;
|
||||
|
||||
/// A [`Vec`] with its elements aligned to a runtime alignment value.
|
||||
pub struct AVec<T> {
|
||||
buf: Unique<u8>,
|
||||
cap: usize,
|
||||
len: usize,
|
||||
align: usize,
|
||||
_marker: PhantomData<T>,
|
||||
}
|
||||
|
||||
impl<T> AVec<T> {
|
||||
// Tiny Vecs are dumb. Skip to:
|
||||
// - 8 if the element size is 1, because any heap allocators are likely
|
||||
// to round up a request of less than 8 bytes to at least 8 bytes.
|
||||
// - 4 if elements are moderate-sized (<= 1 KiB).
|
||||
// - 1 otherwise, to avoid wasting too much space for very short Vecs.
|
||||
//
|
||||
// Taken from Rust's standard library RawVec
|
||||
pub(crate) const MIN_NON_ZERO_CAP: usize = if mem::size_of::<T>() == 1 {
|
||||
8
|
||||
} else if mem::size_of::<T>() <= 1024 {
|
||||
4
|
||||
} else {
|
||||
1
|
||||
};
|
||||
|
||||
#[inline]
|
||||
pub fn new(alignment: usize) -> Self {
|
||||
debug_assert!(mem::size_of::<T>() > 0, "ZSTs not yet supported");
|
||||
|
||||
Self {
|
||||
buf: Unique::dangling(),
|
||||
cap: 0,
|
||||
len: 0,
|
||||
align: alignment,
|
||||
_marker: PhantomData
|
||||
}
|
||||
}
|
||||
|
||||
/// Constructs a new, empty `AVec` with at least the specified capacity.
|
||||
///
|
||||
/// The aligned vector will be able to hold at least `capacity` elements without reallocating.
|
||||
/// This method may allocate for more elements than `capacity`. If `capacity` is zero,
|
||||
/// the vector will not allocate.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// Panics if the capacity exceeds `usize::MAX` bytes.
|
||||
#[inline]
|
||||
pub fn with_capacity(alignment: usize, capacity: usize) -> Self {
|
||||
let mut s = Self::new(alignment);
|
||||
|
||||
if capacity > 0 {
|
||||
unsafe {
|
||||
s.grow_amortized(0, capacity);
|
||||
}
|
||||
}
|
||||
|
||||
s
|
||||
}
|
||||
|
||||
/// Calculates the size of the 'slot' for a single **aligned** item.
|
||||
#[inline(always)]
|
||||
fn slot_size(&self) -> usize {
|
||||
let a = self.align - 1;
|
||||
(mem::align_of::<T>() + (a)) & !a
|
||||
}
|
||||
|
||||
/// # Panics
|
||||
///
|
||||
/// Panics if the new capacity exceeds `usize::MAX` bytes.
|
||||
#[inline]
|
||||
unsafe fn grow_amortized(&mut self, len: usize, additional: usize) {
|
||||
debug_assert!(additional > 0);
|
||||
|
||||
let required_cap = len.checked_add(additional)
|
||||
.expect("Capacity overflow");
|
||||
|
||||
let cap = cmp::max(self.cap * 2, required_cap);
|
||||
let cap = cmp::max(Self::MIN_NON_ZERO_CAP, cap);
|
||||
|
||||
let new_layout = Layout::from_size_align_unchecked(cap * self.slot_size(), self.align);
|
||||
|
||||
let ptr = alloc::alloc(new_layout);
|
||||
self.buf = Unique::new_unchecked(ptr);
|
||||
self.cap = cap;
|
||||
}
|
||||
|
||||
/// # Panics
|
||||
///
|
||||
/// Panics if the new capacity exceeds `usize::MAX` bytes.
|
||||
#[inline]
|
||||
unsafe fn grow_exact(&mut self, len: usize, additional: usize) {
|
||||
debug_assert!(additional > 0);
|
||||
|
||||
let cap = len.checked_add(additional)
|
||||
.expect("Capacity overflow");
|
||||
|
||||
let new_layout = Layout::from_size_align_unchecked(cap * self.slot_size(), self.align);
|
||||
|
||||
let ptr = alloc::alloc(new_layout);
|
||||
self.buf = Unique::new_unchecked(ptr);
|
||||
self.cap = cap;
|
||||
}
|
||||
|
||||
/// Reserves capacity for at least `additional` more elements.
|
||||
///
|
||||
/// The collection may reserve more space to speculatively avoid frequent reallocations.
|
||||
/// After calling `reserve`, capacity will be greater than or equal to
|
||||
/// `self.len() + additional`. Does nothing if capacity is already sufficient.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// Panics if the new capacity exceeds `usize::MAX` bytes.
|
||||
#[inline]
|
||||
pub fn reserve(&mut self, additional: usize) {
|
||||
debug_assert!(additional > 0);
|
||||
|
||||
let remaining = self.capacity().wrapping_sub(self.len);
|
||||
|
||||
if additional > remaining {
|
||||
unsafe { self.grow_amortized(self.len, additional) };
|
||||
}
|
||||
}
|
||||
|
||||
/// Reserves capacity for `additional` more elements.
|
||||
///
|
||||
/// Unlike [`reserve`], this will not over-allocate to speculatively avoid frequent
|
||||
/// reallocations. After calling `reserve_exact`, capacity will be equal to
|
||||
/// `self.len() + additional`. Does nothing if the capacity is already sufficient.
|
||||
///
|
||||
/// Prefer [`reserve`] if future insertions are expected.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// Panics if the new capacity exceeds `usize::MAX` bytes.
|
||||
#[inline]
|
||||
pub fn reserve_exact(&mut self, additional: usize) {
|
||||
let remaining = self.capacity().wrapping_sub(self.len);
|
||||
|
||||
if additional > remaining {
|
||||
unsafe { self.grow_exact(self.len, additional) };
|
||||
}
|
||||
}
|
||||
|
||||
/// Appends an element to the back of the collection.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// Panics if the new capacity exceeds `usize::MAX` bytes.
|
||||
#[inline]
|
||||
pub fn push(&mut self, val: T) {
|
||||
if self.len == self.cap {
|
||||
self.reserve(self.slot_size());
|
||||
}
|
||||
|
||||
unsafe {
|
||||
// SAFETY: the length is ensured to be less than the capacity.
|
||||
self.set_at_unchecked(self.len, val);
|
||||
}
|
||||
|
||||
self.len += 1;
|
||||
}
|
||||
|
||||
/// Sets an element at position `idx` within the vector to `val`.
|
||||
///
|
||||
/// # Unsafe
|
||||
///
|
||||
/// If `self.len > idx`, bytes past the length of the vector will be written to, potentially
|
||||
/// also writing past the capacity of the vector.
|
||||
#[inline(always)]
|
||||
unsafe fn set_at_unchecked(&mut self, idx: usize, val: T) {
|
||||
let ptr = self.buf
|
||||
.as_ptr()
|
||||
.add(idx * self.slot_size());
|
||||
|
||||
std::ptr::write(ptr.cast::<T>(), val);
|
||||
}
|
||||
|
||||
/// Sets an element at position `idx` within the vector to `val`.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// Panics if `idx >= self.len`.
|
||||
#[inline(always)]
|
||||
pub fn set_at(&mut self, idx: usize, val: T) {
|
||||
assert!(self.len > idx);
|
||||
|
||||
unsafe {
|
||||
self.set_at_unchecked(idx, val);
|
||||
}
|
||||
}
|
||||
|
||||
/// Shortens the vector, keeping the first `len` elements and dropping the rest.
|
||||
///
|
||||
/// If `len` is greater or equal to the vector’s current length, this has no effect.
|
||||
#[inline]
|
||||
pub fn truncate(&mut self, len: usize) {
|
||||
if len > self.len {
|
||||
return;
|
||||
}
|
||||
|
||||
unsafe {
|
||||
// drop each element past the new length
|
||||
for i in len..self.len {
|
||||
let ptr = self.buf.as_ptr()
|
||||
.add(i * self.slot_size())
|
||||
.cast::<T>();
|
||||
|
||||
ptr::drop_in_place(ptr);
|
||||
}
|
||||
}
|
||||
|
||||
self.len = len;
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn as_ptr(&self) -> *const u8 {
|
||||
self.buf.as_ptr()
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn as_mut_ptr(&self) -> *mut u8 {
|
||||
self.buf.as_ptr()
|
||||
}
|
||||
|
||||
/// Returns the alignment of the elements in the vector.
|
||||
#[inline(always)]
|
||||
pub fn align(&self) -> usize {
|
||||
self.align
|
||||
}
|
||||
|
||||
/// Returns the length of the vector.
|
||||
#[inline(always)]
|
||||
pub fn len(&self) -> usize {
|
||||
self.len
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.len == 0
|
||||
}
|
||||
|
||||
/// Returns the capacity of the vector.
|
||||
///
|
||||
/// The capacity is the amount of elements that the vector can store without reallocating.
|
||||
#[inline(always)]
|
||||
pub fn capacity(&self) -> usize {
|
||||
self.cap
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Clone> AVec<T> {
|
||||
/// Resized the `AVec` in-place so that `len` is equal to `new_len`.
|
||||
///
|
||||
/// If `new_len` is greater than `len`, the `AVec` is extended by the difference, and
|
||||
/// each additional slot is filled with `value`. If `new_len` is less than `len`,
|
||||
/// the `AVec` will be truncated by to be `new_len`
|
||||
///
|
||||
/// This method requires `T` to implement [`Clone`] in order to clone the passed value.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// Panics if the new capacity exceeds `usize::MAX` bytes.
|
||||
#[inline]
|
||||
pub fn resize(&mut self, new_len: usize, value: T) {
|
||||
if new_len > self.len {
|
||||
self.reserve(new_len - self.len);
|
||||
|
||||
unsafe {
|
||||
let mut ptr = self.buf
|
||||
.as_ptr().add(self.len * self.slot_size());
|
||||
|
||||
// write all elements besides the last one
|
||||
for _ in 1..new_len {
|
||||
std::ptr::write(ptr.cast::<T>(), value.clone());
|
||||
ptr = ptr.add(self.slot_size());
|
||||
self.len += 1;
|
||||
}
|
||||
|
||||
if new_len > 0 {
|
||||
// the last element can be written without cloning
|
||||
std::ptr::write(ptr.cast::<T>(), value.clone());
|
||||
self.len += 1;
|
||||
}
|
||||
|
||||
self.len = new_len;
|
||||
}
|
||||
} else {
|
||||
self.truncate(new_len);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,101 +0,0 @@
|
|||
use std::{collections::VecDeque, sync::Arc};
|
||||
|
||||
use tracing::instrument;
|
||||
|
||||
use super::{RenderGraphLabel, RenderGraphLabelValue};
|
||||
|
||||
/// A queued write to a GPU buffer targeting a graph slot.
|
||||
pub(crate) struct GraphBufferWrite {
|
||||
/// The name of the slot that has the resource that will be written
|
||||
pub(crate) target_slot: RenderGraphLabelValue,
|
||||
pub(crate) offset: u64,
|
||||
pub(crate) bytes: Vec<u8>,
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub struct RenderGraphContext<'a> {
|
||||
/// The [`wgpu::CommandEncoder`] used to encode GPU operations.
|
||||
///
|
||||
/// This is `None` during the `prepare` stage.
|
||||
pub encoder: Option<wgpu::CommandEncoder>,
|
||||
/// The gpu device that is being used.
|
||||
pub device: Arc<wgpu::Device>,
|
||||
pub queue: Arc<wgpu::Queue>,
|
||||
pub(crate) buffer_writes: VecDeque<GraphBufferWrite>,
|
||||
renderpass_desc: Vec<wgpu::RenderPassDescriptor<'a>>,
|
||||
/// The label of this Node.
|
||||
pub label: RenderGraphLabelValue,
|
||||
}
|
||||
|
||||
impl<'a> RenderGraphContext<'a> {
|
||||
pub(crate) fn new(device: Arc<wgpu::Device>, queue: Arc<wgpu::Queue>, encoder: Option<wgpu::CommandEncoder>, label: RenderGraphLabelValue) -> Self {
|
||||
Self {
|
||||
encoder,
|
||||
device,
|
||||
queue,
|
||||
buffer_writes: Default::default(),
|
||||
renderpass_desc: vec![],
|
||||
label,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn begin_render_pass(
|
||||
&'a mut self,
|
||||
desc: wgpu::RenderPassDescriptor<'a>,
|
||||
) -> wgpu::RenderPass<'a> {
|
||||
self.encoder
|
||||
.as_mut()
|
||||
.expect(
|
||||
"RenderGraphContext is missing a command encoder. This is likely \
|
||||
because you are trying to run render commands in the prepare stage.",
|
||||
)
|
||||
.begin_render_pass(&desc)
|
||||
}
|
||||
|
||||
pub fn begin_compute_pass(&mut self, desc: &wgpu::ComputePassDescriptor) -> wgpu::ComputePass {
|
||||
self.encoder
|
||||
.as_mut()
|
||||
.expect(
|
||||
"RenderGraphContext is missing a command encoder. This is likely \
|
||||
because you are trying to run render commands in the prepare stage.",
|
||||
)
|
||||
.begin_compute_pass(desc)
|
||||
}
|
||||
|
||||
/// Queue a data write to a buffer at that is contained in `target_slot`.
|
||||
///
|
||||
/// This does not submit the data to the GPU immediately, or add it to the `wgpu::Queue`. The
|
||||
/// data will be submitted to the GPU queue right after the prepare stage for all passes
|
||||
/// is ran.
|
||||
#[instrument(skip(self, bytes), level="trace", fields(size = bytes.len()))]
|
||||
pub fn queue_buffer_write(&mut self, target_slot: impl RenderGraphLabel, offset: u64, bytes: &[u8]) {
|
||||
self.buffer_writes.push_back(GraphBufferWrite {
|
||||
target_slot: target_slot.into(),
|
||||
offset,
|
||||
bytes: bytes.to_vec(),
|
||||
})
|
||||
}
|
||||
|
||||
/// Queue a data write of a type that to a buffer at that is contained in `target_slot`.
|
||||
#[instrument(skip(self, bytes), level="trace", fields(size = std::mem::size_of::<T>()))]
|
||||
pub fn queue_buffer_write_with<T: bytemuck::NoUninit>(
|
||||
&mut self,
|
||||
target_slot: impl RenderGraphLabel,
|
||||
offset: u64,
|
||||
bytes: T,
|
||||
) {
|
||||
self.queue_buffer_write(target_slot, offset, bytemuck::bytes_of(&bytes));
|
||||
}
|
||||
|
||||
/// Submit the encoder to the gpu queue.
|
||||
///
|
||||
/// The `encoder` of this context will be `None` until the next node is executed, then another
|
||||
/// one will be made. You likely don't need to run this yourself until you are manually
|
||||
/// presenting a surface texture.
|
||||
pub fn submit_encoder(&mut self) {
|
||||
let en = self.encoder.take()
|
||||
.unwrap()
|
||||
.finish();
|
||||
self.queue.submit(std::iter::once(en));
|
||||
}
|
||||
}
|
|
@ -1,572 +0,0 @@
|
|||
mod node;
|
||||
use std::{
|
||||
cell::{Ref, RefCell, RefMut}, collections::VecDeque, fmt::Debug, hash::Hash, rc::Rc, sync::Arc
|
||||
};
|
||||
|
||||
use lyra_ecs::World;
|
||||
pub use node::*;
|
||||
|
||||
mod passes;
|
||||
pub use passes::*;
|
||||
|
||||
mod slot_desc;
|
||||
pub use slot_desc::*;
|
||||
|
||||
mod context;
|
||||
pub use context::*;
|
||||
|
||||
mod render_target;
|
||||
pub use render_target::*;
|
||||
|
||||
use rustc_hash::FxHashMap;
|
||||
use tracing::{debug_span, instrument, trace, warn};
|
||||
use wgpu::CommandEncoder;
|
||||
|
||||
use super::resource::{ComputePipeline, Pass, Pipeline, RenderPipeline};
|
||||
|
||||
/// A trait that represents the label of a resource, slot, or node in the [`RenderGraph`].
|
||||
pub trait RenderGraphLabel: Debug + 'static {
|
||||
fn rc_clone(&self) -> Rc<dyn RenderGraphLabel>;
|
||||
fn as_label_hash(&self) -> u64;
|
||||
|
||||
fn label_eq_rc(&self, other: &Rc<dyn RenderGraphLabel>) -> bool {
|
||||
self.as_label_hash() == other.as_label_hash()
|
||||
}
|
||||
|
||||
fn label_eq(&self, other: &dyn RenderGraphLabel) -> bool {
|
||||
self.as_label_hash() == other.as_label_hash()
|
||||
}
|
||||
}
|
||||
|
||||
/// An owned [`RenderGraphLabel`].
|
||||
#[derive(Clone)]
|
||||
pub struct RenderGraphLabelValue(Rc<dyn RenderGraphLabel>);
|
||||
|
||||
impl Debug for RenderGraphLabelValue {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
self.0.fmt(f)
|
||||
}
|
||||
}
|
||||
|
||||
impl<L: RenderGraphLabel> From<L> for RenderGraphLabelValue {
|
||||
fn from(value: L) -> Self {
|
||||
Self(Rc::new(value))
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Rc<dyn RenderGraphLabel>> for RenderGraphLabelValue {
|
||||
fn from(value: Rc<dyn RenderGraphLabel>) -> Self {
|
||||
Self(value)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&Rc<dyn RenderGraphLabel>> for RenderGraphLabelValue {
|
||||
fn from(value: &Rc<dyn RenderGraphLabel>) -> Self {
|
||||
Self(value.clone())
|
||||
}
|
||||
}
|
||||
|
||||
impl Hash for RenderGraphLabelValue {
|
||||
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
|
||||
state.write_u64(self.0.as_label_hash());
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialEq for RenderGraphLabelValue {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.0.label_eq_rc(&other.0)
|
||||
}
|
||||
}
|
||||
|
||||
impl Eq for RenderGraphLabelValue {}
|
||||
|
||||
struct NodeEntry {
|
||||
/// The Node
|
||||
inner: Arc<RefCell<dyn Node>>,
|
||||
/// The Node descriptor
|
||||
desc: Rc<RefCell<NodeDesc>>,
|
||||
/// The index of the node in the execution graph
|
||||
graph_index: petgraph::matrix_graph::NodeIndex<usize>,
|
||||
/// The Node's optional pipeline
|
||||
pipeline: Rc<RefCell<Option<Pipeline>>>,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
struct BindGroupEntry {
|
||||
label: RenderGraphLabelValue,
|
||||
/// BindGroup
|
||||
bg: Arc<wgpu::BindGroup>,
|
||||
/// BindGroupLayout
|
||||
layout: Option<Arc<wgpu::BindGroupLayout>>,
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
#[derive(Clone)]
|
||||
struct ResourceSlot {
|
||||
label: RenderGraphLabelValue,
|
||||
ty: SlotType,
|
||||
value: SlotValue,
|
||||
}
|
||||
|
||||
pub struct RenderGraph {
|
||||
device: Arc<wgpu::Device>,
|
||||
queue: Arc<wgpu::Queue>,
|
||||
slots: FxHashMap<RenderGraphLabelValue, ResourceSlot>,
|
||||
nodes: FxHashMap<RenderGraphLabelValue, NodeEntry>,
|
||||
sub_graphs: FxHashMap<RenderGraphLabelValue, RenderGraph>,
|
||||
bind_groups: FxHashMap<RenderGraphLabelValue, BindGroupEntry>,
|
||||
/// A directed graph used to determine dependencies of nodes.
|
||||
node_graph: petgraph::matrix_graph::DiMatrix<RenderGraphLabelValue, (), Option<()>, usize>,
|
||||
view_target: Rc<RefCell<ViewTarget>>,
|
||||
shader_prepoc: wgsl_preprocessor::Processor,
|
||||
}
|
||||
|
||||
impl RenderGraph {
|
||||
pub fn new(device: Arc<wgpu::Device>, queue: Arc<wgpu::Queue>, view_target: Rc<RefCell<ViewTarget>>) -> Self {
|
||||
Self {
|
||||
device,
|
||||
queue,
|
||||
slots: Default::default(),
|
||||
nodes: Default::default(),
|
||||
sub_graphs: Default::default(),
|
||||
bind_groups: Default::default(),
|
||||
node_graph: Default::default(),
|
||||
view_target,
|
||||
shader_prepoc: wgsl_preprocessor::Processor::new(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn device(&self) -> &wgpu::Device {
|
||||
&self.device
|
||||
}
|
||||
|
||||
/// Add a [`Node`] to the RenderGraph.
|
||||
///
|
||||
/// When the node is added, its [`Node::desc`] method will be executed.
|
||||
///
|
||||
/// Additionally, all [`Slot`](node::NodeSlot)s of the node will be iterated,
|
||||
/// 1. Ensuring that there are no two slots of the same name, with different value types
|
||||
/// 2. Changing the id of insert slots to match the id of the output slot of the same name.
|
||||
/// * This means that the id of insert slots **ARE NOT STABLE**. **DO NOT** rely on them to
|
||||
/// not change. The IDs of output slots do stay the same.
|
||||
/// 3. Ensuring that no two slots share the same ID when the names do not match.
|
||||
#[instrument(skip(self, node), level = "debug")]
|
||||
pub fn add_node<P: Node>(&mut self, label: impl RenderGraphLabel, mut node: P) {
|
||||
let mut desc = node.desc(self);
|
||||
|
||||
// collect all the slots of the node
|
||||
for slot in &mut desc.slots {
|
||||
if let Some(other) = self
|
||||
.slots
|
||||
.get_mut(&slot.label)
|
||||
{
|
||||
debug_assert_eq!(
|
||||
slot.ty, other.ty,
|
||||
"slot {:?} in node {:?} does not match existing slot of same name",
|
||||
slot.label, label
|
||||
);
|
||||
} else {
|
||||
debug_assert!(!self.slots.contains_key(&slot.label),
|
||||
"Reuse of id detected in render graph! Node: {:?}, slot: {:?}",
|
||||
label, slot.label,
|
||||
);
|
||||
|
||||
let res_slot = ResourceSlot {
|
||||
label: slot.label.clone(),
|
||||
ty: slot.ty,
|
||||
value: slot.value.clone().unwrap_or(SlotValue::None),
|
||||
};
|
||||
|
||||
self.slots.insert(slot.label.clone(), res_slot);
|
||||
}
|
||||
}
|
||||
|
||||
// get clones of the bind groups and layouts
|
||||
for (label, bg, bgl) in &desc.bind_groups {
|
||||
self.bind_groups.insert(label.clone(), BindGroupEntry {
|
||||
label: label.clone(),
|
||||
bg: bg.clone(),
|
||||
layout: bgl.clone(),
|
||||
});
|
||||
}
|
||||
|
||||
let label: RenderGraphLabelValue = label.into();
|
||||
let index = self.node_graph.add_node(label.clone());
|
||||
|
||||
self.nodes.insert(
|
||||
label,
|
||||
NodeEntry {
|
||||
inner: Arc::new(RefCell::new(node)),
|
||||
desc: Rc::new(RefCell::new(desc)),
|
||||
graph_index: index,
|
||||
pipeline: Rc::new(RefCell::new(None)),
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
/// Creates all buffers required for the nodes.
|
||||
///
|
||||
/// This only needs to be ran when the [`Node`]s in the graph change, or they are removed or
|
||||
/// added.
|
||||
#[instrument(skip(self, device))]
|
||||
pub fn setup(&mut self, device: &wgpu::Device) {
|
||||
// For all nodes, create their pipelines
|
||||
for node in self.nodes.values_mut() {
|
||||
let desc = (*node.desc).borrow();
|
||||
if let Some(pipeline_desc) = &desc.pipeline_desc {
|
||||
let pipeline = match desc.ty {
|
||||
NodeType::Render => Pipeline::Render(RenderPipeline::create(
|
||||
device,
|
||||
pipeline_desc
|
||||
.as_render_pipeline_descriptor()
|
||||
.expect("got compute pipeline descriptor in a render node"),
|
||||
)),
|
||||
NodeType::Compute => Pipeline::Compute(ComputePipeline::create(
|
||||
device,
|
||||
pipeline_desc
|
||||
.as_compute_pipeline_descriptor()
|
||||
.expect("got render pipeline descriptor in a compute node"),
|
||||
)),
|
||||
NodeType::Presenter | NodeType::Node | NodeType::Graph => {
|
||||
panic!("Present or Node RenderGraph nodes should not have a pipeline descriptor!");
|
||||
},
|
||||
};
|
||||
|
||||
drop(desc);
|
||||
|
||||
let mut node_pipeline = node.pipeline.borrow_mut();
|
||||
*node_pipeline = Some(pipeline);
|
||||
}
|
||||
}
|
||||
|
||||
for sub in self.sub_graphs.values_mut() {
|
||||
sub.setup(device);
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(skip(self, world))]
|
||||
pub fn prepare(&mut self, world: &mut World) {
|
||||
let mut buffer_writes = VecDeque::<GraphBufferWrite>::new();
|
||||
// reserve some buffer writes. not all nodes write so half the amount of them is probably
|
||||
// fine.
|
||||
buffer_writes.reserve(self.nodes.len() / 2);
|
||||
|
||||
let mut sorted: VecDeque<RenderGraphLabelValue> = petgraph::algo::toposort(&self.node_graph, None)
|
||||
.expect("RenderGraph had cycled!")
|
||||
.iter()
|
||||
.map(|i| self.node_graph[*i].clone())
|
||||
.collect();
|
||||
|
||||
while let Some(node_label) = sorted.pop_front() {
|
||||
let node = self.nodes.get(&node_label).unwrap();
|
||||
let device = self.device.clone();
|
||||
let queue = self.queue.clone();
|
||||
|
||||
let inner = node.inner.clone();
|
||||
let mut inner = inner.borrow_mut();
|
||||
|
||||
let mut context = RenderGraphContext::new(device, queue, None, node_label.clone());
|
||||
inner.prepare(self, world, &mut context);
|
||||
buffer_writes.append(&mut context.buffer_writes);
|
||||
}
|
||||
|
||||
{
|
||||
// Queue all buffer writes to the gpu
|
||||
let s = debug_span!("queue_buffer_writes");
|
||||
let _e = s.enter();
|
||||
|
||||
while let Some(bufwr) = buffer_writes.pop_front() {
|
||||
let slot = self
|
||||
.slots
|
||||
.get(&bufwr.target_slot)
|
||||
.unwrap_or_else(|| panic!("Failed to find slot '{:?}' for buffer write",
|
||||
bufwr.target_slot));
|
||||
let buf = slot
|
||||
.value
|
||||
.as_buffer()
|
||||
.unwrap_or_else(|| panic!("Slot '{:?}' is not a buffer", bufwr.target_slot));
|
||||
|
||||
self.queue.write_buffer(buf, bufwr.offset, &bufwr.bytes);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn create_encoder(&self) -> CommandEncoder {
|
||||
self.device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
|
||||
label: Some("graph encoder"),
|
||||
})
|
||||
}
|
||||
|
||||
#[instrument(skip(self))]
|
||||
pub fn render(&mut self) {
|
||||
let mut sorted: VecDeque<RenderGraphLabelValue> = petgraph::algo::toposort(&self.node_graph, None)
|
||||
.expect("RenderGraph had cycled!")
|
||||
.iter()
|
||||
.map(|i| self.node_graph[*i].clone())
|
||||
.collect();
|
||||
|
||||
// A bit of 'encoder hot potato' is played using this.
|
||||
// Although the encoder is an option, its only an option so ownership of it can be given
|
||||
// to the context for the time of the node execution.
|
||||
// After the node is executed, the encoder is taken back. If the node is a presenter node,
|
||||
// the encoder will be submitted and a new one will be made.
|
||||
let mut encoder = Some(self.create_encoder());
|
||||
|
||||
while let Some(node_label) = sorted.pop_front() {
|
||||
let node = self.nodes.get(&node_label).unwrap();
|
||||
let node_inn = node.inner.clone();
|
||||
|
||||
let node_desc = node.desc.clone();
|
||||
let node_desc = (*node_desc).borrow();
|
||||
|
||||
// clone of the Rc's is required to appease the borrow checker
|
||||
let device = self.device.clone();
|
||||
let queue = self.queue.clone();
|
||||
|
||||
// create a new encoder if the last one was submitted
|
||||
if encoder.is_none() {
|
||||
encoder = Some(self.create_encoder());
|
||||
}
|
||||
|
||||
let mut context = RenderGraphContext::new(device, queue, encoder.take(), node_label.clone());
|
||||
|
||||
trace!("Executing {:?}", node_label.0);
|
||||
let mut inner = node_inn.borrow_mut();
|
||||
inner.execute(self, &node_desc, &mut context);
|
||||
|
||||
// take back the encoder from the context
|
||||
encoder = context.encoder;
|
||||
}
|
||||
|
||||
if let Some(encoder) = encoder {
|
||||
self.queue.submit(std::iter::once(encoder.finish()));
|
||||
}
|
||||
}
|
||||
|
||||
pub fn slot_value<L: Into<RenderGraphLabelValue>>(&self, label: L) -> Option<&SlotValue> {
|
||||
self.slots.get(&label.into()).map(|s| &s.value)
|
||||
}
|
||||
|
||||
pub fn slot_value_mut<L: Into<RenderGraphLabelValue>>(&mut self, label: L) -> Option<&mut SlotValue> {
|
||||
self.slots.get_mut(&label.into()).map(|s| &mut s.value)
|
||||
}
|
||||
|
||||
pub fn node_desc<L: Into<RenderGraphLabelValue>>(&self, label: L) -> Option<Ref<NodeDesc>> {
|
||||
self.nodes.get(&label.into()).map(|s| (*s.desc).borrow())
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn pipeline<L: Into<RenderGraphLabelValue>>(&self, label: L) -> Option<Ref<Pipeline>> {
|
||||
self.nodes.get(&label.into())
|
||||
.and_then(|p| {
|
||||
let v = p.pipeline.borrow();
|
||||
|
||||
#[allow(clippy::manual_map)]
|
||||
match &*v {
|
||||
Some(_) => Some(Ref::map(v, |p| p.as_ref().unwrap())),
|
||||
None => None,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn try_bind_group<L: Into<RenderGraphLabelValue>>(&self, label: L) -> Option<&Arc<wgpu::BindGroup>> {
|
||||
self.bind_groups.get(&label.into()).map(|e| &e.bg)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn bind_group<L: Into<RenderGraphLabelValue>>(&self, label: L) -> &Arc<wgpu::BindGroup> {
|
||||
let l = label.into();
|
||||
self.try_bind_group(l.clone()).unwrap_or_else(|| panic!("Unknown label '{:?}' for bind group layout", l.clone()))
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn try_bind_group_layout<L: Into<RenderGraphLabelValue>>(&self, label: L) -> Option<&Arc<wgpu::BindGroupLayout>> {
|
||||
self.bind_groups.get(&label.into()).and_then(|e| e.layout.as_ref())
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn bind_group_layout<L: Into<RenderGraphLabelValue>>(&self, label: L) -> &Arc<wgpu::BindGroupLayout> {
|
||||
let l = label.into();
|
||||
self.try_bind_group_layout(l.clone())
|
||||
.unwrap_or_else(|| panic!("Unknown label '{:?}' for bind group layout", l.clone()))
|
||||
}
|
||||
|
||||
pub fn add_edge(&mut self, from: impl RenderGraphLabel, to: impl RenderGraphLabel)
|
||||
{
|
||||
let from = RenderGraphLabelValue::from(from);
|
||||
let to = RenderGraphLabelValue::from(to);
|
||||
|
||||
let from_idx = self
|
||||
.nodes
|
||||
.iter()
|
||||
.find(|p| *p.0 == from)
|
||||
.map(|p| p.1.graph_index)
|
||||
.expect("Failed to find from node");
|
||||
let to_idx = self
|
||||
.nodes
|
||||
.iter()
|
||||
.find(|p| *p.0 == to)
|
||||
.map(|p| p.1.graph_index)
|
||||
.expect("Failed to find to node");
|
||||
|
||||
debug_assert_ne!(from_idx, to_idx, "cannot add edges between the same node");
|
||||
|
||||
self.node_graph.add_edge(from_idx, to_idx, ());
|
||||
}
|
||||
|
||||
/// Utility method for setting the bind groups for a node.
|
||||
///
|
||||
/// The parameter `bind_groups` can be used to specify the labels of a bind group, and the
|
||||
/// index of the bind group in the pipeline for the node. If a bind group of the provided
|
||||
/// name is not found in the graph, a panic will occur.
|
||||
///
|
||||
/// # Example:
|
||||
/// ```nobuild
|
||||
/// graph.set_bind_groups(
|
||||
/// &mut pass,
|
||||
/// &[
|
||||
/// // retrieves the `BasePassSlots::DepthTexture` bind group and sets the index 0 in the
|
||||
/// // node to it.
|
||||
/// (&BaseNodeSlots::DepthTexture, 0),
|
||||
/// (&BaseNodeSlots::Camera, 1),
|
||||
/// (&LightBaseNodeSlots::Lights, 2),
|
||||
/// (&LightCullComputeNodeSlots::LightIndicesGridGroup, 3),
|
||||
/// (&BaseNodeSlots::ScreenSize, 4),
|
||||
/// ],
|
||||
/// );
|
||||
/// ```
|
||||
///
|
||||
/// # Panics
|
||||
/// Panics if a bind group of a provided name is not found.
|
||||
pub fn set_bind_groups<'a, P: Pass<'a>>(
|
||||
&'a self,
|
||||
pass: &mut P,
|
||||
bind_groups: &[(&dyn RenderGraphLabel, u32)],
|
||||
) {
|
||||
for (label, index) in bind_groups {
|
||||
let bg = self
|
||||
.bind_group(label.rc_clone());
|
||||
|
||||
pass.set_bind_group(*index, bg, &[]);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn sub_graph_mut<L: Into<RenderGraphLabelValue>>(&mut self, label: L) -> Option<&mut RenderGraph> {
|
||||
self.sub_graphs.get_mut(&label.into())
|
||||
}
|
||||
|
||||
/// Add a sub graph.
|
||||
///
|
||||
/// > Note: the sub graph is not ran unless you add a node that executes it. See [`SubGraphNode`].
|
||||
pub fn add_sub_graph<L: Into<RenderGraphLabelValue>>(&mut self, label: L, sub: RenderGraph) {
|
||||
self.sub_graphs.insert(label.into(), sub);
|
||||
}
|
||||
|
||||
/// Clone rendering resources (slots, bind groups, etc.) to a sub graph.
|
||||
fn clone_resources_to_sub(&mut self, sub_graph: RenderGraphLabelValue, slots: Vec<RenderGraphLabelValue>) {
|
||||
// instead of inserting the slots to the sub graph as they are extracted from the parent graph,
|
||||
// they are done separately to make the borrow checker happy. If this is not done,
|
||||
// the borrow checker complains about multiple mutable borrows (or an inmutable borrow
|
||||
// while mutable borrowing) to self; caused by borrowing the sub graph from self, and
|
||||
// self.slots.
|
||||
let mut collected_slots = VecDeque::new();
|
||||
let mut collected_bind_groups = VecDeque::new();
|
||||
|
||||
for slot in slots.iter() {
|
||||
let mut found_res = false;
|
||||
|
||||
// Since slots and bind groups may go by the same label,
|
||||
// there must be a way to collect both of them. A flag variable is used to detect
|
||||
// if neither was found.
|
||||
|
||||
if let Some(slot_res) = self.slots.get(slot) {
|
||||
collected_slots.push_back(slot_res.clone());
|
||||
found_res = true;
|
||||
}
|
||||
|
||||
if let Some(bg_res) = self.bind_groups.get(slot) {
|
||||
collected_bind_groups.push_back(bg_res.clone());
|
||||
found_res = true;
|
||||
}
|
||||
|
||||
if !found_res {
|
||||
panic!("sub graph is missing {:?} input slot or bind group", slot);
|
||||
}
|
||||
}
|
||||
|
||||
let sg = self.sub_graph_mut(sub_graph.clone()).unwrap();
|
||||
while let Some(res) = collected_slots.pop_front() {
|
||||
sg.slots.insert(res.label.clone(), res);
|
||||
}
|
||||
|
||||
while let Some(bg) = collected_bind_groups.pop_front() {
|
||||
sg.bind_groups.insert(bg.label.clone(), bg);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn view_target(&self) -> Ref<ViewTarget> {
|
||||
self.view_target.borrow()
|
||||
}
|
||||
|
||||
pub fn view_target_mut(&self) -> RefMut<ViewTarget> {
|
||||
self.view_target.borrow_mut()
|
||||
}
|
||||
|
||||
/// Register a shader with the preprocessor.
|
||||
///
|
||||
/// This step also parses the shader and will return errors if it failed to parse.
|
||||
///
|
||||
/// Returns: The shader module import path if the module specified one.
|
||||
#[inline(always)]
|
||||
pub fn register_shader(&mut self, shader_src: &str) -> Result<Option<String>, wgsl_preprocessor::Error> {
|
||||
self.shader_prepoc.parse_module(shader_src)
|
||||
}
|
||||
|
||||
/// Preprocess a shader, returning the source.
|
||||
#[inline(always)]
|
||||
pub fn preprocess_shader(&mut self, shader_path: &str) -> Result<String, wgsl_preprocessor::Error> {
|
||||
self.shader_prepoc.preprocess_module(shader_path)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct SubGraphNode {
|
||||
subg: RenderGraphLabelValue,
|
||||
slots: Vec<RenderGraphLabelValue>,
|
||||
}
|
||||
|
||||
impl SubGraphNode {
|
||||
pub fn new<L: Into<RenderGraphLabelValue>>(sub_label: L, slot_labels: Vec<RenderGraphLabelValue>) -> Self {
|
||||
Self {
|
||||
subg: sub_label.into(),
|
||||
slots: slot_labels,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Node for SubGraphNode {
|
||||
fn desc(&mut self, _: &mut RenderGraph) -> NodeDesc {
|
||||
NodeDesc::new(NodeType::Graph, None, vec![])
|
||||
}
|
||||
|
||||
fn prepare(&mut self, graph: &mut RenderGraph, world: &mut World, _: &mut RenderGraphContext) {
|
||||
graph.clone_resources_to_sub(self.subg.clone(), self.slots.clone());
|
||||
|
||||
let sg = graph.sub_graph_mut(self.subg.clone())
|
||||
.unwrap_or_else(|| panic!("failed to find sub graph for SubGraphNode: {:?}", self.subg));
|
||||
sg.prepare(world);
|
||||
}
|
||||
|
||||
fn execute(
|
||||
&mut self,
|
||||
graph: &mut RenderGraph,
|
||||
_: &NodeDesc,
|
||||
_: &mut RenderGraphContext,
|
||||
) {
|
||||
graph.clone_resources_to_sub(self.subg.clone(), self.slots.clone());
|
||||
|
||||
let sg = graph.sub_graph_mut(self.subg.clone())
|
||||
.unwrap_or_else(|| panic!("failed to find sub graph for SubGraphNode: {:?}", self.subg));
|
||||
sg.render();
|
||||
}
|
||||
}
|
|
@ -1,387 +0,0 @@
|
|||
use std::{cell::{Ref, RefCell, RefMut}, num::NonZeroU32, rc::Rc, sync::Arc};
|
||||
|
||||
use bind_match::bind_match;
|
||||
use lyra_ecs::World;
|
||||
|
||||
use crate::render::resource::PipelineDescriptor;
|
||||
|
||||
use super::{Frame, RenderGraph, RenderGraphContext, RenderGraphLabel, RenderGraphLabelValue, RenderTarget};
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Default)]
|
||||
pub enum NodeType {
|
||||
/// A node doesn't render, compute, or present anything. This likely means it injects data into the graph.
|
||||
#[default]
|
||||
Node,
|
||||
/// A Compute pass node type.
|
||||
Compute,
|
||||
/// A render pass node type.
|
||||
Render,
|
||||
/// A node that presents render results to a render target.
|
||||
Presenter,
|
||||
/// A node that represents a sub-graph.
|
||||
Graph,
|
||||
}
|
||||
|
||||
impl NodeType {
|
||||
/// Returns a boolean indicating if the node should have a [`Pipeline`](crate::render::resource::Pipeline).
|
||||
pub fn should_have_pipeline(&self) -> bool {
|
||||
match self {
|
||||
NodeType::Node => false,
|
||||
NodeType::Compute => true,
|
||||
NodeType::Render => true,
|
||||
NodeType::Presenter => false,
|
||||
NodeType::Graph => false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// The type of data that is stored in a [`Node`] slot.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
|
||||
pub enum SlotType {
|
||||
TextureView,
|
||||
Sampler,
|
||||
Texture,
|
||||
Buffer,
|
||||
RenderTarget,
|
||||
Frame,
|
||||
}
|
||||
|
||||
/// The value of a slot in a [`Node`].
|
||||
#[derive(Clone)]
|
||||
pub enum SlotValue {
|
||||
/// This slot doesn't have any value
|
||||
None,
|
||||
/// The value will be set during a later phase of the render graph. To see the type of value
|
||||
/// this will be set to, see the slots type.
|
||||
Lazy,
|
||||
TextureView(Arc<wgpu::TextureView>),
|
||||
Sampler(Rc<wgpu::Sampler>),
|
||||
Texture(Arc<wgpu::Texture>),
|
||||
Buffer(Arc<wgpu::Buffer>),
|
||||
RenderTarget(Rc<RefCell<RenderTarget>>),
|
||||
Frame(Rc<RefCell<Option<Frame>>>),
|
||||
}
|
||||
|
||||
impl SlotValue {
|
||||
pub fn is_none(&self) -> bool {
|
||||
matches!(self, Self::None)
|
||||
}
|
||||
|
||||
pub fn is_lazy(&self) -> bool {
|
||||
matches!(self, Self::Lazy)
|
||||
}
|
||||
|
||||
pub fn as_texture_view(&self) -> Option<&Arc<wgpu::TextureView>> {
|
||||
bind_match!(self, Self::TextureView(v) => v)
|
||||
}
|
||||
|
||||
pub fn as_sampler(&self) -> Option<&Rc<wgpu::Sampler>> {
|
||||
bind_match!(self, Self::Sampler(v) => v)
|
||||
}
|
||||
|
||||
pub fn as_texture(&self) -> Option<&Arc<wgpu::Texture>> {
|
||||
bind_match!(self, Self::Texture(v) => v)
|
||||
}
|
||||
|
||||
pub fn as_buffer(&self) -> Option<&Arc<wgpu::Buffer>> {
|
||||
bind_match!(self, Self::Buffer(v) => v)
|
||||
}
|
||||
|
||||
pub fn as_render_target(&self) -> Option<Ref<RenderTarget>> {
|
||||
bind_match!(self, Self::RenderTarget(v) => v.borrow())
|
||||
}
|
||||
|
||||
pub fn as_render_target_mut(&mut self) -> Option<RefMut<RenderTarget>> {
|
||||
bind_match!(self, Self::RenderTarget(v) => v.borrow_mut())
|
||||
}
|
||||
|
||||
pub fn as_frame(&self) -> Option<Ref<Option<Frame>>> {
|
||||
bind_match!(self, Self::Frame(v) => v.borrow())
|
||||
}
|
||||
|
||||
pub fn as_frame_mut(&mut self) -> Option<RefMut<Option<Frame>>> {
|
||||
bind_match!(self, Self::Frame(v) => v.borrow_mut())
|
||||
}
|
||||
}
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
|
||||
pub enum SlotAttribute {
|
||||
/// This slot inputs a value into the node, expecting another node to `Output` it.
|
||||
Input,
|
||||
/// This slot outputs a value from the node, providing the value to other nodes that
|
||||
/// `Input`it.
|
||||
Output,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct NodeSlot {
|
||||
/// The type of the value that this slot inputs/outputs.
|
||||
pub ty: SlotType,
|
||||
/// The way this slot uses the value. Defines if this slot is an output or input.
|
||||
pub attribute: SlotAttribute,
|
||||
/// The identifying label of this slot.
|
||||
pub label: RenderGraphLabelValue,
|
||||
/// The value of the slot.
|
||||
/// This is `None` if the slot is a `SlotAttribute::Input` type.
|
||||
pub value: Option<SlotValue>,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct PipelineShaderDesc {
|
||||
pub label: Option<String>,
|
||||
pub source: String,
|
||||
pub entry_point: String,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct RenderGraphPipelineInfo {
|
||||
/// Debug label of the pipeline. This will show up in graphics debuggers for easy identification.
|
||||
pub label: Option<String>,
|
||||
/// The layout of bind groups for this pipeline.
|
||||
pub bind_group_layouts: Vec<Rc<wgpu::BindGroupLayout>>,
|
||||
/// The descriptor of the vertex shader.
|
||||
pub vertex: PipelineShaderDesc,
|
||||
/// The properties of the pipeline at the primitive assembly and rasterization level.
|
||||
pub primitive: wgpu::PrimitiveState,
|
||||
/// The effect of draw calls on the depth and stencil aspects of the output target, if any.
|
||||
pub depth_stencil: Option<wgpu::DepthStencilState>,
|
||||
/// The multi-sampling properties of the pipeline.
|
||||
pub multisample: wgpu::MultisampleState,
|
||||
/// The compiled fragment stage, its entry point, and the color targets.
|
||||
pub fragment: Option<PipelineShaderDesc>,
|
||||
/// If the pipeline will be used with a multiview render pass, this indicates how many array
|
||||
/// layers the attachments will have.
|
||||
pub multiview: Option<NonZeroU32>,
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
impl RenderGraphPipelineInfo {
|
||||
pub fn new(
|
||||
label: &str,
|
||||
bind_group_layouts: Vec<wgpu::BindGroupLayout>,
|
||||
vertex: PipelineShaderDesc,
|
||||
primitive: wgpu::PrimitiveState,
|
||||
depth_stencil: Option<wgpu::DepthStencilState>,
|
||||
multisample: wgpu::MultisampleState,
|
||||
fragment: Option<PipelineShaderDesc>,
|
||||
multiview: Option<NonZeroU32>,
|
||||
) -> Self {
|
||||
Self {
|
||||
label: Some(label.to_string()),
|
||||
bind_group_layouts: bind_group_layouts
|
||||
.into_iter()
|
||||
.map(Rc::new)
|
||||
.collect(),
|
||||
vertex,
|
||||
primitive,
|
||||
depth_stencil,
|
||||
multisample,
|
||||
fragment,
|
||||
multiview,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Descriptor of a Node in a [`RenderGraph`].
|
||||
pub struct NodeDesc {
|
||||
/// The [`NodeType`] of the node.
|
||||
pub ty: NodeType,
|
||||
/// The slots that the Node uses.
|
||||
/// This defines the resources that the node uses and creates in the graph.
|
||||
pub slots: Vec<NodeSlot>,
|
||||
//slot_label_lookup: HashMap<RenderGraphLabelValue, u64>,
|
||||
/// An optional pipeline descriptor for the Node.
|
||||
/// This is `None` if the Node type is not a node that requires a pipeline
|
||||
/// (see [`NodeType::should_have_pipeline`]).
|
||||
pub pipeline_desc: Option<PipelineDescriptor>,
|
||||
/// The bind groups that this Node creates.
|
||||
/// This makes the bind groups accessible to other Nodes.
|
||||
pub bind_groups: Vec<(
|
||||
RenderGraphLabelValue,
|
||||
Arc<wgpu::BindGroup>,
|
||||
Option<Arc<wgpu::BindGroupLayout>>,
|
||||
)>,
|
||||
}
|
||||
|
||||
impl NodeDesc {
|
||||
/// Create a new node descriptor.
|
||||
pub fn new(
|
||||
pass_type: NodeType,
|
||||
pipeline_desc: Option<PipelineDescriptor>,
|
||||
bind_groups: Vec<(&dyn RenderGraphLabel, Arc<wgpu::BindGroup>, Option<Arc<wgpu::BindGroupLayout>>)>,
|
||||
) -> Self {
|
||||
Self {
|
||||
ty: pass_type,
|
||||
slots: vec![],
|
||||
pipeline_desc,
|
||||
bind_groups: bind_groups
|
||||
.into_iter()
|
||||
.map(|bg| (bg.0.rc_clone().into(), bg.1, bg.2))
|
||||
.collect(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Add a slot to the descriptor.
|
||||
///
|
||||
/// In debug builds, there is an assert that triggers if the slot is an input slot and has
|
||||
/// a value set.
|
||||
pub fn add_slot(&mut self, slot: NodeSlot) {
|
||||
debug_assert!(
|
||||
!(slot.attribute == SlotAttribute::Input && slot.value.is_some()),
|
||||
"input slots should not have values"
|
||||
);
|
||||
|
||||
self.slots.push(slot);
|
||||
}
|
||||
|
||||
/// Add a buffer slot to the descriptor.
|
||||
///
|
||||
/// In debug builds, there is an assert that triggers if the slot is an input slot and has
|
||||
/// a value set. There is also an assert that is triggered if this slot value is not `None`,
|
||||
/// `SlotValue::Lazy` or a `Buffer`.
|
||||
#[inline(always)]
|
||||
pub fn add_buffer_slot<L: RenderGraphLabel>(
|
||||
&mut self,
|
||||
label: L,
|
||||
attribute: SlotAttribute,
|
||||
value: Option<SlotValue>,
|
||||
) {
|
||||
debug_assert!(
|
||||
matches!(value, None | Some(SlotValue::Lazy) | Some(SlotValue::Buffer(_))),
|
||||
"slot value is not a buffer"
|
||||
);
|
||||
|
||||
let slot = NodeSlot {
|
||||
label: label.into(),
|
||||
ty: SlotType::Buffer,
|
||||
attribute,
|
||||
value,
|
||||
};
|
||||
self.add_slot(slot);
|
||||
}
|
||||
|
||||
/// Add a slot that stores a [`wgpu::Texture`] to the descriptor.
|
||||
///
|
||||
/// In debug builds, there is an assert that triggers if the slot is an input slot and has
|
||||
/// a value set. There is also an assert that is triggered if this slot value is not `None`,
|
||||
/// `SlotValue::Lazy` or a `SlotValue::Texture`.
|
||||
#[inline(always)]
|
||||
pub fn add_texture_slot<L: RenderGraphLabel>(
|
||||
&mut self,
|
||||
label: L,
|
||||
attribute: SlotAttribute,
|
||||
value: Option<SlotValue>,
|
||||
) {
|
||||
debug_assert!(
|
||||
matches!(value, None | Some(SlotValue::Lazy) | Some(SlotValue::Texture(_))),
|
||||
"slot value is not a texture"
|
||||
);
|
||||
|
||||
let slot = NodeSlot {
|
||||
label: label.into(),
|
||||
ty: SlotType::Texture,
|
||||
attribute,
|
||||
value,
|
||||
};
|
||||
self.add_slot(slot);
|
||||
}
|
||||
|
||||
/// Add a slot that stores a [`wgpu::TextureView`] to the descriptor.
|
||||
///
|
||||
/// In debug builds, there is an assert that triggers if the slot is an input slot and has
|
||||
/// a value set. There is also an assert that is triggered if this slot value is not `None`,
|
||||
/// `SlotValue::Lazy` or a `SlotValue::TextureView`.
|
||||
#[inline(always)]
|
||||
pub fn add_texture_view_slot<L: RenderGraphLabel>(
|
||||
&mut self,
|
||||
label: L,
|
||||
attribute: SlotAttribute,
|
||||
value: Option<SlotValue>,
|
||||
) {
|
||||
debug_assert!(
|
||||
matches!(value, None | Some(SlotValue::Lazy) | Some(SlotValue::TextureView(_))),
|
||||
"slot value is not a texture view"
|
||||
);
|
||||
|
||||
let slot = NodeSlot {
|
||||
label: label.into(),
|
||||
ty: SlotType::TextureView,
|
||||
attribute,
|
||||
value,
|
||||
};
|
||||
self.add_slot(slot);
|
||||
}
|
||||
|
||||
/// Add a slot that stores a [`wgpu::Sampler`] to the descriptor.
|
||||
///
|
||||
/// In debug builds, there is an assert that triggers if the slot is an input slot and has
|
||||
/// a value set. There is also an assert that is triggered if this slot value is not `None`,
|
||||
/// `SlotValue::Lazy` or a `SlotValue::Sampler`.
|
||||
#[inline(always)]
|
||||
pub fn add_sampler_slot<L: RenderGraphLabel>(
|
||||
&mut self,
|
||||
label: L,
|
||||
attribute: SlotAttribute,
|
||||
value: Option<SlotValue>,
|
||||
) {
|
||||
debug_assert!(
|
||||
matches!(value, None | Some(SlotValue::Lazy) | Some(SlotValue::Sampler(_))),
|
||||
"slot value is not a sampler"
|
||||
);
|
||||
|
||||
let slot = NodeSlot {
|
||||
label: label.into(),
|
||||
ty: SlotType::Sampler,
|
||||
attribute,
|
||||
value,
|
||||
};
|
||||
self.add_slot(slot);
|
||||
}
|
||||
|
||||
/// Returns all input slots that the descriptor defines.
|
||||
pub fn input_slots(&self) -> Vec<&NodeSlot> {
|
||||
self.slots
|
||||
.iter()
|
||||
.filter(|s| s.attribute == SlotAttribute::Input)
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Returns all output slots that the descriptor defines.
|
||||
pub fn output_slots(&self) -> Vec<&NodeSlot> {
|
||||
self.slots
|
||||
.iter()
|
||||
.filter(|s| s.attribute == SlotAttribute::Output)
|
||||
.collect()
|
||||
}
|
||||
}
|
||||
|
||||
/// A node that can be executed and scheduled in a [`RenderGraph`].
|
||||
///
|
||||
/// A node can be used for rendering, computing data on the GPU, collecting data from the main
|
||||
/// world and writing it to GPU buffers, or presenting renders to a surface.
|
||||
///
|
||||
/// The [`RenderGraph`] is ran in phases. The first phase is `prepare`, then `execute`. When a node
|
||||
/// is first added to a RenderGraph, its [`Node::desc`] function will be ran. The descriptor
|
||||
/// describes all resources the node requires for execution during the `execute` phase.
|
||||
pub trait Node: 'static {
|
||||
/// Retrieve a descriptor of the Node.
|
||||
fn desc(&mut self, graph: &mut RenderGraph) -> NodeDesc;
|
||||
|
||||
/// Prepare the node for rendering.
|
||||
///
|
||||
/// This phase runs before `execute` and is meant to be used to collect data from the World
|
||||
/// and write to GPU buffers.
|
||||
fn prepare(&mut self, graph: &mut RenderGraph, world: &mut World, context: &mut RenderGraphContext);
|
||||
|
||||
/// Execute the node.
|
||||
///
|
||||
/// Parameters:
|
||||
/// * `graph` - The RenderGraph that this node is a part of. Can be used to get bind groups and bind to them.
|
||||
/// * `desc` - The descriptor of this node.
|
||||
/// * `context` - The rendering graph context.
|
||||
fn execute(
|
||||
&mut self,
|
||||
graph: &mut RenderGraph,
|
||||
desc: &NodeDesc,
|
||||
context: &mut RenderGraphContext,
|
||||
);
|
||||
}
|
|
@ -1,151 +0,0 @@
|
|||
use std::sync::Arc;
|
||||
|
||||
use glam::UVec2;
|
||||
use lyra_game_derive::RenderGraphLabel;
|
||||
use tracing::warn;
|
||||
use winit::dpi::PhysicalSize;
|
||||
|
||||
use crate::{
|
||||
render::{
|
||||
camera::{CameraUniform, RenderCamera},
|
||||
graph::{
|
||||
Node, NodeDesc, NodeType, RenderGraph, RenderGraphContext, SlotAttribute, SlotValue
|
||||
},
|
||||
render_buffer::BufferWrapper, texture::RenderTexture,
|
||||
},
|
||||
scene::CameraComponent,
|
||||
};
|
||||
|
||||
#[derive(Debug, Hash, Clone, Default, PartialEq, RenderGraphLabel)]
|
||||
pub struct BasePassLabel;
|
||||
|
||||
#[derive(Debug, Hash, Clone, PartialEq, RenderGraphLabel)]
|
||||
pub enum BasePassSlots {
|
||||
DepthTexture,
|
||||
ScreenSize,
|
||||
Camera,
|
||||
DepthTextureView,
|
||||
}
|
||||
|
||||
/// Supplies some basic things other passes needs.
|
||||
///
|
||||
/// screen size buffer, camera buffer,
|
||||
#[derive(Default)]
|
||||
pub struct BasePass {
|
||||
screen_size: UVec2,
|
||||
}
|
||||
|
||||
impl BasePass {
|
||||
pub fn new() -> Self {
|
||||
Self::default()
|
||||
}
|
||||
}
|
||||
|
||||
impl Node for BasePass {
|
||||
fn desc(
|
||||
&mut self,
|
||||
graph: &mut crate::render::graph::RenderGraph,
|
||||
) -> crate::render::graph::NodeDesc {
|
||||
let vt = graph.view_target();
|
||||
self.screen_size = vt.size();
|
||||
|
||||
let (screen_size_bgl, screen_size_bg, screen_size_buf, _) = BufferWrapper::builder()
|
||||
.buffer_usage(wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST)
|
||||
.label_prefix("ScreenSize")
|
||||
.visibility(wgpu::ShaderStages::COMPUTE)
|
||||
.buffer_dynamic_offset(false)
|
||||
.contents(&[self.screen_size])
|
||||
.finish_parts(graph.device());
|
||||
let screen_size_bgl = Arc::new(screen_size_bgl);
|
||||
let screen_size_bg = Arc::new(screen_size_bg);
|
||||
|
||||
let (camera_bgl, camera_bg, camera_buf, _) = BufferWrapper::builder()
|
||||
.buffer_usage(wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST)
|
||||
.label_prefix("camera")
|
||||
.visibility(wgpu::ShaderStages::all())
|
||||
.buffer_dynamic_offset(false)
|
||||
.contents(&[CameraUniform::default()])
|
||||
.finish_parts(graph.device());
|
||||
let camera_bgl = Arc::new(camera_bgl);
|
||||
let camera_bg = Arc::new(camera_bg);
|
||||
|
||||
// create the depth texture using the utility struct, then take all the required fields
|
||||
let mut depth_texture = RenderTexture::create_depth_texture(graph.device(), self.screen_size, "depth_texture");
|
||||
depth_texture.create_bind_group(&graph.device);
|
||||
|
||||
let dt_bg_pair = depth_texture.bindgroup_pair.unwrap();
|
||||
let depth_texture_bg = Arc::new(dt_bg_pair.bindgroup);
|
||||
let depth_texture_bgl = dt_bg_pair.layout;
|
||||
let depth_texture_view = Arc::new(depth_texture.view);
|
||||
|
||||
let mut desc = NodeDesc::new(
|
||||
NodeType::Node,
|
||||
None,
|
||||
vec![
|
||||
// TODO: Make this a trait maybe?
|
||||
// Could impl it for (RenderGraphLabel, wgpu::BindGroup) and also
|
||||
// (RenderGraphLabel, wgpu::BindGroup, wgpu::BindGroupLabel) AND
|
||||
// (RenderGraphLabel, wgpu::BindGroup, Option<wgpu::BindGroupLabel>)
|
||||
//
|
||||
// This could make it slightly easier to create this
|
||||
(&BasePassSlots::DepthTexture, depth_texture_bg, Some(depth_texture_bgl)),
|
||||
(&BasePassSlots::ScreenSize, screen_size_bg, Some(screen_size_bgl)),
|
||||
(&BasePassSlots::Camera, camera_bg, Some(camera_bgl)),
|
||||
],
|
||||
);
|
||||
|
||||
desc.add_texture_view_slot(
|
||||
BasePassSlots::DepthTextureView,
|
||||
SlotAttribute::Output,
|
||||
Some(SlotValue::TextureView(depth_texture_view)),
|
||||
);
|
||||
desc.add_buffer_slot(
|
||||
BasePassSlots::ScreenSize,
|
||||
SlotAttribute::Output,
|
||||
Some(SlotValue::Buffer(Arc::new(screen_size_buf))),
|
||||
);
|
||||
desc.add_buffer_slot(
|
||||
BasePassSlots::Camera,
|
||||
SlotAttribute::Output,
|
||||
Some(SlotValue::Buffer(Arc::new(camera_buf))),
|
||||
);
|
||||
|
||||
desc
|
||||
}
|
||||
|
||||
fn prepare(&mut self, graph: &mut RenderGraph, world: &mut lyra_ecs::World, context: &mut RenderGraphContext) {
|
||||
if let Some(camera) = world.view_iter::<&mut CameraComponent>().next() {
|
||||
let screen_size = graph.view_target().size();
|
||||
|
||||
let mut render_cam =
|
||||
RenderCamera::new(PhysicalSize::new(screen_size.x, screen_size.y));
|
||||
let uniform = render_cam.calc_view_projection(&camera);
|
||||
|
||||
context.queue_buffer_write_with(BasePassSlots::Camera, 0, uniform)
|
||||
} else {
|
||||
warn!("Missing camera!");
|
||||
}
|
||||
}
|
||||
|
||||
fn execute(
|
||||
&mut self,
|
||||
graph: &mut crate::render::graph::RenderGraph,
|
||||
_desc: &crate::render::graph::NodeDesc,
|
||||
context: &mut crate::render::graph::RenderGraphContext,
|
||||
) {
|
||||
let mut vt = graph.view_target_mut();
|
||||
vt.primary.create_frame();
|
||||
vt.primary.create_frame_view();
|
||||
/* debug_assert!(
|
||||
!rt.current_texture.is_some(),
|
||||
"main render target surface was not presented!"
|
||||
); */
|
||||
|
||||
// update the screen size buffer if the size changed.
|
||||
let rt_size = vt.size();
|
||||
if rt_size != self.screen_size {
|
||||
self.screen_size = rt_size;
|
||||
context.queue_buffer_write_with(BasePassSlots::ScreenSize, 0, self.screen_size)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,173 +0,0 @@
|
|||
use std::{collections::HashMap, rc::Rc, sync::Arc};
|
||||
|
||||
use lyra_game_derive::RenderGraphLabel;
|
||||
|
||||
use crate::render::{
|
||||
graph::{Node, NodeDesc, NodeType},
|
||||
resource::{FragmentState, PipelineDescriptor, RenderPipelineDescriptor, Shader, VertexState},
|
||||
};
|
||||
|
||||
#[derive(Default, Debug, Clone, Copy, Hash, RenderGraphLabel)]
|
||||
pub struct FxaaPassLabel;
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
pub struct FxaaPass {
|
||||
target_sampler: Option<wgpu::Sampler>,
|
||||
bgl: Option<Arc<wgpu::BindGroupLayout>>,
|
||||
/// Store bind groups for the input textures.
|
||||
/// The texture may change due to resizes, or changes to the view target chain
|
||||
/// from other nodes.
|
||||
bg_cache: HashMap<wgpu::Id<wgpu::TextureView>, wgpu::BindGroup>,
|
||||
}
|
||||
|
||||
impl FxaaPass {
|
||||
pub fn new() -> Self {
|
||||
Self::default()
|
||||
}
|
||||
}
|
||||
|
||||
impl Node for FxaaPass {
|
||||
fn desc(
|
||||
&mut self,
|
||||
graph: &mut crate::render::graph::RenderGraph,
|
||||
) -> crate::render::graph::NodeDesc {
|
||||
let device = &graph.device;
|
||||
|
||||
let bgl = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
|
||||
label: Some("fxaa_bgl"),
|
||||
entries: &[
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 0,
|
||||
visibility: wgpu::ShaderStages::FRAGMENT,
|
||||
ty: wgpu::BindingType::Texture {
|
||||
sample_type: wgpu::TextureSampleType::Float { filterable: true },
|
||||
view_dimension: wgpu::TextureViewDimension::D2,
|
||||
multisampled: false,
|
||||
},
|
||||
count: None,
|
||||
},
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 1,
|
||||
visibility: wgpu::ShaderStages::FRAGMENT,
|
||||
ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
|
||||
count: None,
|
||||
},
|
||||
],
|
||||
});
|
||||
let bgl = Arc::new(bgl);
|
||||
self.bgl = Some(bgl.clone());
|
||||
self.target_sampler = Some(device.create_sampler(&wgpu::SamplerDescriptor {
|
||||
label: Some("fxaa sampler"),
|
||||
mag_filter: wgpu::FilterMode::Linear,
|
||||
min_filter: wgpu::FilterMode::Linear,
|
||||
mipmap_filter: wgpu::FilterMode::Linear,
|
||||
..Default::default()
|
||||
}));
|
||||
|
||||
let shader = Rc::new(Shader {
|
||||
label: Some("fxaa_shader".into()),
|
||||
source: include_str!("../../shaders/fxaa.wgsl").to_string(),
|
||||
});
|
||||
|
||||
let vt = graph.view_target();
|
||||
|
||||
NodeDesc::new(
|
||||
NodeType::Render,
|
||||
Some(PipelineDescriptor::Render(RenderPipelineDescriptor {
|
||||
label: Some("fxaa_pass".into()),
|
||||
layouts: vec![bgl.clone()],
|
||||
push_constant_ranges: vec![],
|
||||
vertex: VertexState {
|
||||
module: shader.clone(),
|
||||
entry_point: "vs_main".into(),
|
||||
buffers: vec![],
|
||||
},
|
||||
fragment: Some(FragmentState {
|
||||
module: shader,
|
||||
entry_point: "fs_main".into(),
|
||||
targets: vec![Some(wgpu::ColorTargetState {
|
||||
format: vt.format(),
|
||||
blend: Some(wgpu::BlendState::REPLACE),
|
||||
write_mask: wgpu::ColorWrites::ALL,
|
||||
})],
|
||||
}),
|
||||
depth_stencil: None,
|
||||
primitive: wgpu::PrimitiveState::default(),
|
||||
multisample: wgpu::MultisampleState::default(),
|
||||
multiview: None,
|
||||
})),
|
||||
vec![],
|
||||
)
|
||||
}
|
||||
|
||||
fn prepare(
|
||||
&mut self,
|
||||
_: &mut crate::render::graph::RenderGraph,
|
||||
_: &mut lyra_ecs::World,
|
||||
_: &mut crate::render::graph::RenderGraphContext,
|
||||
) {
|
||||
//todo!()
|
||||
}
|
||||
|
||||
fn execute(
|
||||
&mut self,
|
||||
graph: &mut crate::render::graph::RenderGraph,
|
||||
_: &crate::render::graph::NodeDesc,
|
||||
context: &mut crate::render::graph::RenderGraphContext,
|
||||
) {
|
||||
let pipeline = graph
|
||||
.pipeline(context.label.clone())
|
||||
.expect("Failed to find pipeline for FxaaPass");
|
||||
|
||||
let mut vt = graph.view_target_mut();
|
||||
let chain = vt.get_chain();
|
||||
let source_view = chain.source.frame_view.as_ref().unwrap();
|
||||
let dest_view = chain.dest.frame_view.as_ref().unwrap();
|
||||
|
||||
let bg = self
|
||||
.bg_cache
|
||||
.entry(source_view.global_id())
|
||||
.or_insert_with(|| {
|
||||
graph
|
||||
.device()
|
||||
.create_bind_group(&wgpu::BindGroupDescriptor {
|
||||
label: Some("fxaa_bg"),
|
||||
layout: self.bgl.as_ref().unwrap(),
|
||||
entries: &[
|
||||
wgpu::BindGroupEntry {
|
||||
binding: 0,
|
||||
resource: wgpu::BindingResource::TextureView(source_view),
|
||||
},
|
||||
wgpu::BindGroupEntry {
|
||||
binding: 1,
|
||||
resource: wgpu::BindingResource::Sampler(
|
||||
self.target_sampler.as_ref().unwrap(),
|
||||
),
|
||||
},
|
||||
],
|
||||
})
|
||||
});
|
||||
|
||||
{
|
||||
let encoder = context.encoder.as_mut().unwrap();
|
||||
let mut pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
|
||||
label: Some("fxaa_pass"),
|
||||
color_attachments: &[Some(wgpu::RenderPassColorAttachment {
|
||||
view: dest_view,
|
||||
resolve_target: None,
|
||||
ops: wgpu::Operations {
|
||||
load: wgpu::LoadOp::Load,
|
||||
store: wgpu::StoreOp::Store,
|
||||
},
|
||||
})],
|
||||
depth_stencil_attachment: None,
|
||||
timestamp_writes: None,
|
||||
occlusion_query_set: None, // TODO: occlusion queries
|
||||
});
|
||||
pass.set_pipeline(pipeline.as_render());
|
||||
|
||||
pass.set_bind_group(0, bg, &[]);
|
||||
pass.draw(0..3, 0..1);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,33 +0,0 @@
|
|||
use lyra_game_derive::RenderGraphLabel;
|
||||
|
||||
use crate::render::graph::{Node, NodeDesc, NodeSlot, NodeType};
|
||||
|
||||
#[derive(Debug, Default, Clone, Copy, Hash, RenderGraphLabel)]
|
||||
pub struct InitNodeLabel;
|
||||
|
||||
pub struct InitNode {
|
||||
slots: Vec<NodeSlot>,
|
||||
}
|
||||
|
||||
impl Node for InitNode {
|
||||
fn desc(&mut self, _: &mut crate::render::graph::RenderGraph) -> crate::render::graph::NodeDesc {
|
||||
let mut desc = NodeDesc::new(NodeType::Node, None, vec![]);
|
||||
// the slots can just be cloned since the slot attribute doesn't really matter much.
|
||||
desc.slots = self.slots.clone();
|
||||
|
||||
desc
|
||||
}
|
||||
|
||||
fn prepare(&mut self, _: &mut crate::render::graph::RenderGraph, _: &mut lyra_ecs::World, _: &mut crate::render::graph::RenderGraphContext) {
|
||||
|
||||
}
|
||||
|
||||
fn execute(
|
||||
&mut self,
|
||||
_: &mut crate::render::graph::RenderGraph,
|
||||
_: &crate::render::graph::NodeDesc,
|
||||
_: &mut crate::render::graph::RenderGraphContext,
|
||||
) {
|
||||
|
||||
}
|
||||
}
|
|
@ -1,73 +0,0 @@
|
|||
use lyra_game_derive::RenderGraphLabel;
|
||||
|
||||
use crate::render::{
|
||||
graph::{
|
||||
Node, NodeDesc, NodeType, RenderGraph, RenderGraphContext, SlotAttribute, SlotValue
|
||||
},
|
||||
light::LightUniformBuffers,
|
||||
};
|
||||
|
||||
#[derive(Debug, Hash, Clone, Default, PartialEq, RenderGraphLabel)]
|
||||
pub struct LightBasePassLabel;
|
||||
|
||||
#[derive(Debug, Hash, Clone, PartialEq, RenderGraphLabel)]
|
||||
pub enum LightBasePassSlots {
|
||||
Lights
|
||||
}
|
||||
|
||||
/// Supplies some basic things other passes needs.
|
||||
///
|
||||
/// screen size buffer, camera buffer,
|
||||
#[derive(Default)]
|
||||
pub struct LightBasePass {
|
||||
light_buffers: Option<LightUniformBuffers>,
|
||||
}
|
||||
|
||||
impl LightBasePass {
|
||||
pub fn new() -> Self {
|
||||
Self::default()
|
||||
}
|
||||
}
|
||||
|
||||
impl Node for LightBasePass {
|
||||
fn desc(
|
||||
&mut self,
|
||||
graph: &mut crate::render::graph::RenderGraph,
|
||||
) -> crate::render::graph::NodeDesc {
|
||||
let device = &graph.device;
|
||||
self.light_buffers = Some(LightUniformBuffers::new(device));
|
||||
let light_buffers = self.light_buffers.as_ref().unwrap();
|
||||
|
||||
let mut desc = NodeDesc::new(
|
||||
NodeType::Node,
|
||||
None,
|
||||
vec![(
|
||||
&LightBasePassSlots::Lights,
|
||||
light_buffers.bind_group.clone(),
|
||||
Some(light_buffers.bind_group_layout.clone()),
|
||||
)],
|
||||
);
|
||||
|
||||
desc.add_buffer_slot(
|
||||
LightBasePassSlots::Lights,
|
||||
SlotAttribute::Output,
|
||||
Some(SlotValue::Buffer(light_buffers.buffer.clone())),
|
||||
);
|
||||
|
||||
desc
|
||||
}
|
||||
|
||||
fn prepare(&mut self, _graph: &mut RenderGraph, world: &mut lyra_ecs::World, context: &mut RenderGraphContext) {
|
||||
let tick = world.current_tick();
|
||||
let lights = self.light_buffers.as_mut().unwrap();
|
||||
lights.update_lights(&context.queue, tick, world);
|
||||
}
|
||||
|
||||
fn execute(
|
||||
&mut self,
|
||||
_graph: &mut crate::render::graph::RenderGraph,
|
||||
_desc: &crate::render::graph::NodeDesc,
|
||||
_context: &mut crate::render::graph::RenderGraphContext,
|
||||
) {
|
||||
}
|
||||
}
|
|
@ -1,275 +0,0 @@
|
|||
use std::{mem, rc::Rc, sync::Arc};
|
||||
|
||||
use glam::Vec2Swizzles;
|
||||
use lyra_ecs::World;
|
||||
use lyra_game_derive::RenderGraphLabel;
|
||||
use wgpu::util::DeviceExt;
|
||||
|
||||
use crate::render::{
|
||||
graph::{
|
||||
Node, NodeDesc, NodeType, RenderGraph, RenderGraphContext, SlotAttribute, SlotValue
|
||||
}, renderer::ScreenSize, resource::{ComputePipeline, ComputePipelineDescriptor, Shader}
|
||||
};
|
||||
|
||||
use super::{BasePassSlots, LightBasePassSlots};
|
||||
|
||||
#[derive(Debug, Hash, Clone, Default, PartialEq, RenderGraphLabel)]
|
||||
pub struct LightCullComputePassLabel;
|
||||
|
||||
#[derive(Debug, Hash, Clone, PartialEq, RenderGraphLabel)]
|
||||
pub enum LightCullComputePassSlots {
|
||||
LightGridTexture,
|
||||
LightGridTextureView,
|
||||
IndexCounterBuffer,
|
||||
LightIndicesGridGroup,
|
||||
}
|
||||
|
||||
pub struct LightCullComputePass {
|
||||
workgroup_size: glam::UVec2,
|
||||
pipeline: Option<ComputePipeline>,
|
||||
}
|
||||
|
||||
impl LightCullComputePass {
|
||||
pub fn new(screen_size: winit::dpi::PhysicalSize<u32>) -> Self {
|
||||
Self {
|
||||
workgroup_size: glam::UVec2::new(screen_size.width, screen_size.height),
|
||||
pipeline: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Node for LightCullComputePass {
|
||||
fn desc(
|
||||
&mut self,
|
||||
graph: &mut crate::render::graph::RenderGraph,
|
||||
) -> crate::render::graph::NodeDesc {
|
||||
// initialize some buffers with empty data
|
||||
let mut contents = Vec::<u8>::new();
|
||||
let contents_len =
|
||||
self.workgroup_size.x * self.workgroup_size.y * mem::size_of::<u32>() as u32;
|
||||
contents.resize(contents_len as _, 0);
|
||||
|
||||
let device = graph.device();
|
||||
let light_indices_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
|
||||
label: Some("light_indices_buffer"),
|
||||
contents: &contents,
|
||||
usage: wgpu::BufferUsages::STORAGE | wgpu::BufferUsages::COPY_DST,
|
||||
});
|
||||
|
||||
let light_index_counter_buffer =
|
||||
device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
|
||||
label: Some("light_index_counter_buffer"),
|
||||
contents: bytemuck::cast_slice(&[0]),
|
||||
usage: wgpu::BufferUsages::STORAGE | wgpu::BufferUsages::COPY_DST,
|
||||
});
|
||||
|
||||
let light_indices_bg_layout = Arc::new(device.create_bind_group_layout(
|
||||
&wgpu::BindGroupLayoutDescriptor {
|
||||
entries: &[
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 0,
|
||||
visibility: wgpu::ShaderStages::COMPUTE | wgpu::ShaderStages::FRAGMENT,
|
||||
ty: wgpu::BindingType::Buffer {
|
||||
ty: wgpu::BufferBindingType::Storage { read_only: false },
|
||||
has_dynamic_offset: false,
|
||||
min_binding_size: None,
|
||||
},
|
||||
count: None,
|
||||
},
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 1,
|
||||
visibility: wgpu::ShaderStages::COMPUTE | wgpu::ShaderStages::FRAGMENT,
|
||||
ty: wgpu::BindingType::StorageTexture {
|
||||
access: wgpu::StorageTextureAccess::ReadWrite,
|
||||
format: wgpu::TextureFormat::Rg32Uint, // vec2<uint>
|
||||
view_dimension: wgpu::TextureViewDimension::D2,
|
||||
},
|
||||
count: None,
|
||||
},
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 2,
|
||||
visibility: wgpu::ShaderStages::COMPUTE,
|
||||
ty: wgpu::BindingType::Buffer {
|
||||
ty: wgpu::BufferBindingType::Storage { read_only: false },
|
||||
has_dynamic_offset: false,
|
||||
min_binding_size: None,
|
||||
},
|
||||
count: None,
|
||||
},
|
||||
],
|
||||
label: Some("light_indices_grid_bgl"),
|
||||
},
|
||||
));
|
||||
|
||||
let size = wgpu::Extent3d {
|
||||
width: self.workgroup_size.x,
|
||||
height: self.workgroup_size.y,
|
||||
depth_or_array_layers: 1,
|
||||
};
|
||||
let grid_texture = device.create_texture(&wgpu::TextureDescriptor {
|
||||
label: Some("light_grid_tex"),
|
||||
size,
|
||||
mip_level_count: 1,
|
||||
sample_count: 1,
|
||||
dimension: wgpu::TextureDimension::D2,
|
||||
format: wgpu::TextureFormat::Rg32Uint, // vec2<uint>
|
||||
usage: wgpu::TextureUsages::STORAGE_BINDING,
|
||||
view_formats: &[],
|
||||
});
|
||||
|
||||
let grid_texture_view = grid_texture.create_view(&wgpu::TextureViewDescriptor {
|
||||
label: Some("light_grid_texview"),
|
||||
format: Some(wgpu::TextureFormat::Rg32Uint), // vec2<uint>
|
||||
dimension: Some(wgpu::TextureViewDimension::D2),
|
||||
aspect: wgpu::TextureAspect::All,
|
||||
base_mip_level: 0,
|
||||
mip_level_count: None,
|
||||
base_array_layer: 0,
|
||||
array_layer_count: None,
|
||||
});
|
||||
|
||||
let light_indices_bg = Arc::new(device.create_bind_group(&wgpu::BindGroupDescriptor {
|
||||
layout: &light_indices_bg_layout,
|
||||
entries: &[
|
||||
wgpu::BindGroupEntry {
|
||||
binding: 0,
|
||||
resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
|
||||
buffer: &light_indices_buffer,
|
||||
offset: 0,
|
||||
size: None, // the entire light buffer is needed
|
||||
}),
|
||||
},
|
||||
wgpu::BindGroupEntry {
|
||||
binding: 1,
|
||||
resource: wgpu::BindingResource::TextureView(&grid_texture_view),
|
||||
},
|
||||
wgpu::BindGroupEntry {
|
||||
binding: 2,
|
||||
resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
|
||||
buffer: &light_index_counter_buffer,
|
||||
offset: 0,
|
||||
size: None, // the entire light buffer is needed
|
||||
}),
|
||||
},
|
||||
],
|
||||
label: Some("light_indices_grid_bind_group"),
|
||||
}));
|
||||
|
||||
//drop(main_rt);
|
||||
|
||||
/* let depth_tex_bgl = graph.bind_group_layout(BasePassSlots::DepthTexture);
|
||||
let camera_bgl = graph.bind_group_layout(BasePassSlots::Camera);
|
||||
let lights_bgl = graph.bind_group_layout(LightBasePassSlots::Lights);
|
||||
let screen_size_bgl = graph.bind_group_layout(BasePassSlots::ScreenSize); */
|
||||
|
||||
let mut desc = NodeDesc::new(
|
||||
NodeType::Compute,
|
||||
/* Some(PipelineDescriptor::Compute(ComputePipelineDescriptor {
|
||||
label: Some("light_cull_pipeline".into()),
|
||||
push_constant_ranges: vec![],
|
||||
layouts: vec![
|
||||
depth_tex_bgl.clone(),
|
||||
camera_bgl.clone(),
|
||||
lights_bgl.clone(),
|
||||
light_indices_bg_layout.clone(),
|
||||
screen_size_bgl.clone(),
|
||||
],
|
||||
shader,
|
||||
shader_entry_point: "cs_main".into(),
|
||||
})), */
|
||||
None,
|
||||
vec![(
|
||||
&LightCullComputePassSlots::LightIndicesGridGroup,
|
||||
light_indices_bg,
|
||||
Some(light_indices_bg_layout),
|
||||
)],
|
||||
);
|
||||
|
||||
desc.add_buffer_slot(
|
||||
BasePassSlots::ScreenSize,
|
||||
SlotAttribute::Input,
|
||||
None,
|
||||
);
|
||||
desc.add_buffer_slot(BasePassSlots::Camera, SlotAttribute::Input, None);
|
||||
desc.add_buffer_slot(
|
||||
LightCullComputePassSlots::IndexCounterBuffer,
|
||||
SlotAttribute::Output,
|
||||
Some(SlotValue::Buffer(Arc::new(light_index_counter_buffer))),
|
||||
);
|
||||
|
||||
desc
|
||||
}
|
||||
|
||||
fn prepare(&mut self, graph: &mut RenderGraph, world: &mut World, context: &mut RenderGraphContext) {
|
||||
context.queue_buffer_write_with(LightCullComputePassSlots::IndexCounterBuffer, 0, 0);
|
||||
|
||||
let screen_size = world.get_resource::<ScreenSize>()
|
||||
.expect("world missing ScreenSize resource");
|
||||
if screen_size.xy() != self.workgroup_size {
|
||||
self.workgroup_size = screen_size.xy();
|
||||
todo!("Resize buffers and other resources");
|
||||
}
|
||||
|
||||
if self.pipeline.is_none() {
|
||||
let device = graph.device();
|
||||
|
||||
let depth_tex_bgl = graph.bind_group_layout(BasePassSlots::DepthTexture);
|
||||
let camera_bgl = graph.bind_group_layout(BasePassSlots::Camera);
|
||||
let lights_bgl = graph.bind_group_layout(LightBasePassSlots::Lights);
|
||||
let screen_size_bgl = graph.bind_group_layout(BasePassSlots::ScreenSize);
|
||||
let light_indices_bg_layout = graph.bind_group_layout(LightCullComputePassSlots::LightIndicesGridGroup);
|
||||
|
||||
let shader = Rc::new(Shader {
|
||||
label: Some("light_cull_comp_shader".into()),
|
||||
source: include_str!("../../shaders/light_cull.comp.wgsl").to_string(),
|
||||
});
|
||||
|
||||
let pipeline = ComputePipeline::create(device, &ComputePipelineDescriptor {
|
||||
label: Some("light_cull_pipeline".into()),
|
||||
push_constant_ranges: vec![],
|
||||
layouts: vec![
|
||||
depth_tex_bgl.clone(),
|
||||
camera_bgl.clone(),
|
||||
lights_bgl.clone(),
|
||||
light_indices_bg_layout.clone(),
|
||||
screen_size_bgl.clone(),
|
||||
],
|
||||
shader,
|
||||
shader_entry_point: "cs_main".into(),
|
||||
cache: None,
|
||||
compilation_options: Default::default(),
|
||||
});
|
||||
|
||||
self.pipeline = Some(pipeline);
|
||||
}
|
||||
}
|
||||
|
||||
fn execute(
|
||||
&mut self,
|
||||
graph: &mut crate::render::graph::RenderGraph,
|
||||
_: &crate::render::graph::NodeDesc,
|
||||
context: &mut RenderGraphContext,
|
||||
) {
|
||||
let pipeline = self.pipeline.as_mut().unwrap();
|
||||
|
||||
let mut pass = context.begin_compute_pass(&wgpu::ComputePassDescriptor {
|
||||
label: Some("light_cull_pass"),
|
||||
..Default::default()
|
||||
});
|
||||
|
||||
pass.set_pipeline(pipeline);
|
||||
|
||||
graph.set_bind_groups(
|
||||
&mut pass,
|
||||
&[
|
||||
(&BasePassSlots::DepthTexture, 0),
|
||||
(&BasePassSlots::Camera, 1),
|
||||
(&LightBasePassSlots::Lights, 2),
|
||||
(&LightCullComputePassSlots::LightIndicesGridGroup, 3),
|
||||
(&BasePassSlots::ScreenSize, 4),
|
||||
],
|
||||
);
|
||||
|
||||
pass.dispatch_workgroups(self.workgroup_size.x, self.workgroup_size.y, 1);
|
||||
}
|
||||
}
|
|
@ -1,667 +0,0 @@
|
|||
use std::{
|
||||
collections::{HashSet, VecDeque},
|
||||
ops::{Deref, DerefMut},
|
||||
sync::Arc,
|
||||
};
|
||||
|
||||
use glam::{UVec2, Vec3};
|
||||
use image::GenericImageView;
|
||||
use itertools::izip;
|
||||
use lyra_ecs::{
|
||||
query::{filter::Or, Entities, ResMut, TickOf},
|
||||
Entity, ResourceObject, World,
|
||||
};
|
||||
use lyra_game_derive::RenderGraphLabel;
|
||||
use lyra_resource::ResHandle;
|
||||
use lyra_gltf::Mesh;
|
||||
use lyra_scene::SceneGraph;
|
||||
use rustc_hash::FxHashMap;
|
||||
use tracing::{debug, instrument};
|
||||
use uuid::Uuid;
|
||||
use wgpu::util::DeviceExt;
|
||||
|
||||
use crate::render::{
|
||||
graph::{Node, NodeDesc, NodeType},
|
||||
render_buffer::BufferStorage,
|
||||
render_job::RenderJob,
|
||||
texture::{res_filter_to_wgpu, res_wrap_to_wgpu},
|
||||
transform_buffer_storage::TransformIndex,
|
||||
vertex::Vertex,
|
||||
};
|
||||
|
||||
type MeshHandle = ResHandle<Mesh>;
|
||||
type SceneHandle = ResHandle<SceneGraph>;
|
||||
|
||||
pub struct MeshBufferStorage {
|
||||
pub buffer_vertex: BufferStorage,
|
||||
pub buffer_indices: Option<(wgpu::IndexFormat, BufferStorage)>,
|
||||
|
||||
// maybe this should just be a Uuid and the material can be retrieved though
|
||||
// MeshPass's `material_buffers` field?
|
||||
pub material: Option<Arc<GpuMaterial>>,
|
||||
}
|
||||
|
||||
#[derive(Default, Debug, Clone, Copy, Hash, RenderGraphLabel)]
|
||||
pub struct MeshPrepNodeLabel;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct MeshPrepNode {
|
||||
pub material_bgl: Arc<wgpu::BindGroupLayout>,
|
||||
}
|
||||
|
||||
impl MeshPrepNode {
|
||||
pub fn new(device: &wgpu::Device) -> Self {
|
||||
let bgl = GpuMaterial::create_bind_group_layout(device);
|
||||
|
||||
Self { material_bgl: bgl }
|
||||
}
|
||||
|
||||
/// Checks if the mesh buffers in the GPU need to be updated.
|
||||
#[instrument(skip(self, device, mesh_buffers, queue, mesh_han))]
|
||||
fn check_mesh_buffers(
|
||||
&mut self,
|
||||
device: &wgpu::Device,
|
||||
queue: &wgpu::Queue,
|
||||
mesh_buffers: &mut FxHashMap<uuid::Uuid, MeshBufferStorage>,
|
||||
mesh_han: &ResHandle<Mesh>,
|
||||
) {
|
||||
let mesh_uuid = mesh_han.uuid();
|
||||
|
||||
if let (Some(mesh), Some(buffers)) = (mesh_han.data_ref(), mesh_buffers.get_mut(&mesh_uuid))
|
||||
{
|
||||
// check if the buffer sizes dont match. If they dont, completely remake the buffers
|
||||
let vertices = mesh.position().unwrap();
|
||||
if buffers.buffer_vertex.count() != vertices.len() {
|
||||
debug!("Recreating buffers for mesh {}", mesh_uuid.to_string());
|
||||
let (vert, idx) = self.create_vertex_index_buffers(device, &mesh);
|
||||
|
||||
// have to re-get buffers because of borrow checker
|
||||
let buffers = mesh_buffers.get_mut(&mesh_uuid).unwrap();
|
||||
buffers.buffer_indices = idx;
|
||||
buffers.buffer_vertex = vert;
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
// update vertices
|
||||
let vertex_buffer = buffers.buffer_vertex.buffer();
|
||||
let vertices = vertices.as_slice();
|
||||
// align the vertices to 4 bytes (u32 is 4 bytes, which is wgpu::COPY_BUFFER_ALIGNMENT)
|
||||
let (_, vertices, _) = bytemuck::pod_align_to::<Vec3, u32>(vertices);
|
||||
queue.write_buffer(vertex_buffer, 0, bytemuck::cast_slice(vertices));
|
||||
|
||||
// update the indices if they're given
|
||||
if let Some(index_buffer) = buffers.buffer_indices.as_ref() {
|
||||
let aligned_indices = match mesh.indices.as_ref().unwrap() {
|
||||
// U16 indices need to be aligned to u32, for wpgu, which are 4-bytes in size.
|
||||
lyra_gltf::MeshIndices::U16(v) => {
|
||||
bytemuck::pod_align_to::<u16, u32>(v).1
|
||||
}
|
||||
lyra_gltf::MeshIndices::U32(v) => {
|
||||
bytemuck::pod_align_to::<u32, u32>(v).1
|
||||
}
|
||||
};
|
||||
|
||||
let index_buffer = index_buffer.1.buffer();
|
||||
queue.write_buffer(index_buffer, 0, bytemuck::cast_slice(aligned_indices));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(skip(self, device, mesh))]
|
||||
fn create_vertex_index_buffers(
|
||||
&mut self,
|
||||
device: &wgpu::Device,
|
||||
mesh: &Mesh,
|
||||
) -> (BufferStorage, Option<(wgpu::IndexFormat, BufferStorage)>) {
|
||||
let positions = mesh.position().unwrap();
|
||||
let tex_coords: Vec<glam::Vec2> = mesh
|
||||
.tex_coords()
|
||||
.cloned()
|
||||
.unwrap_or_else(|| vec![glam::Vec2::new(0.0, 0.0); positions.len()]);
|
||||
let normals = mesh.normals().unwrap();
|
||||
|
||||
assert!(positions.len() == tex_coords.len() && positions.len() == normals.len());
|
||||
|
||||
let mut vertex_inputs = vec![];
|
||||
for (v, t, n) in izip!(positions.iter(), tex_coords.iter(), normals.iter()) {
|
||||
vertex_inputs.push(Vertex::new(*v, *t, *n));
|
||||
}
|
||||
|
||||
let vertex_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
|
||||
label: Some("Vertex Buffer"),
|
||||
contents: bytemuck::cast_slice(vertex_inputs.as_slice()), //vertex_combined.as_slice(),
|
||||
usage: wgpu::BufferUsages::VERTEX | wgpu::BufferUsages::COPY_DST,
|
||||
});
|
||||
let vertex_buffer = BufferStorage::new(vertex_buffer, 0, vertex_inputs.len());
|
||||
|
||||
let indices = match mesh.indices.as_ref() {
|
||||
Some(indices) => {
|
||||
let (idx_type, len, contents) = match indices {
|
||||
lyra_gltf::MeshIndices::U16(v) => {
|
||||
(wgpu::IndexFormat::Uint16, v.len(), bytemuck::cast_slice(v))
|
||||
}
|
||||
lyra_gltf::MeshIndices::U32(v) => {
|
||||
(wgpu::IndexFormat::Uint32, v.len(), bytemuck::cast_slice(v))
|
||||
}
|
||||
};
|
||||
|
||||
let index_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
|
||||
label: Some("Index Buffer"),
|
||||
contents,
|
||||
usage: wgpu::BufferUsages::INDEX | wgpu::BufferUsages::COPY_DST,
|
||||
});
|
||||
|
||||
let buffer_indices = BufferStorage::new(index_buffer, 0, len);
|
||||
|
||||
Some((idx_type, buffer_indices))
|
||||
}
|
||||
None => None,
|
||||
};
|
||||
|
||||
(vertex_buffer, indices)
|
||||
}
|
||||
|
||||
#[instrument(skip(self, device, queue, material_buffers, mesh))]
|
||||
fn create_mesh_buffers(
|
||||
&mut self,
|
||||
device: &wgpu::Device,
|
||||
queue: &wgpu::Queue,
|
||||
material_buffers: &mut RenderAssets<Arc<GpuMaterial>>,
|
||||
mesh: &Mesh,
|
||||
) -> MeshBufferStorage {
|
||||
let (vertex_buffer, buffer_indices) = self.create_vertex_index_buffers(device, mesh);
|
||||
|
||||
let material = mesh
|
||||
.material
|
||||
.as_ref()
|
||||
.expect("Material resource not loaded yet");
|
||||
let material_ref = material.data_ref().unwrap();
|
||||
|
||||
let material = material_buffers.entry(material.uuid()).or_insert_with(|| {
|
||||
debug!(
|
||||
uuid = material.uuid().to_string(),
|
||||
"Sending material to gpu"
|
||||
);
|
||||
Arc::new(GpuMaterial::from_resource(
|
||||
device,
|
||||
queue,
|
||||
&self.material_bgl,
|
||||
&material_ref,
|
||||
))
|
||||
});
|
||||
|
||||
MeshBufferStorage {
|
||||
buffer_vertex: vertex_buffer,
|
||||
buffer_indices,
|
||||
material: Some(material.clone()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Processes the mesh for the renderer, storing and creating buffers as needed. Returns true if a new mesh was processed.
|
||||
#[instrument(skip(
|
||||
self,
|
||||
device,
|
||||
queue,
|
||||
mesh_buffers,
|
||||
material_buffers,
|
||||
entity_meshes,
|
||||
mesh,
|
||||
entity
|
||||
))]
|
||||
fn process_mesh(
|
||||
&mut self,
|
||||
device: &wgpu::Device,
|
||||
queue: &wgpu::Queue,
|
||||
mesh_buffers: &mut RenderAssets<MeshBufferStorage>,
|
||||
material_buffers: &mut RenderAssets<Arc<GpuMaterial>>,
|
||||
entity_meshes: &mut FxHashMap<Entity, uuid::Uuid>,
|
||||
entity: Entity,
|
||||
mesh: &Mesh,
|
||||
mesh_uuid: Uuid,
|
||||
) -> bool {
|
||||
#[allow(clippy::map_entry)]
|
||||
if !mesh_buffers.contains_key(&mesh_uuid) {
|
||||
// create the mesh's buffers
|
||||
let buffers = self.create_mesh_buffers(device, queue, material_buffers, mesh);
|
||||
mesh_buffers.insert(mesh_uuid, buffers);
|
||||
entity_meshes.insert(entity, mesh_uuid);
|
||||
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
/// If the resource does not exist in the world, add the default
|
||||
fn try_init_resource<T: ResourceObject + Default>(world: &mut World) {
|
||||
if !world.has_resource::<T>() {
|
||||
world.add_resource_default::<T>();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Node for MeshPrepNode {
|
||||
fn desc(
|
||||
&mut self,
|
||||
_: &mut crate::render::graph::RenderGraph,
|
||||
) -> crate::render::graph::NodeDesc {
|
||||
NodeDesc::new(NodeType::Node, None, vec![])
|
||||
}
|
||||
|
||||
fn prepare(
|
||||
&mut self,
|
||||
_: &mut crate::render::graph::RenderGraph,
|
||||
world: &mut lyra_ecs::World,
|
||||
context: &mut crate::render::graph::RenderGraphContext,
|
||||
) {
|
||||
let device = &context.device;
|
||||
let queue = &context.queue;
|
||||
|
||||
let last_epoch = world.current_tick();
|
||||
let mut alive_entities = HashSet::new();
|
||||
|
||||
{
|
||||
// prepare the world with resources
|
||||
Self::try_init_resource::<RenderMeshes>(world);
|
||||
Self::try_init_resource::<RenderAssets<MeshBufferStorage>>(world);
|
||||
Self::try_init_resource::<RenderAssets<Arc<GpuMaterial>>>(world);
|
||||
Self::try_init_resource::<FxHashMap<Entity, uuid::Uuid>>(world);
|
||||
|
||||
let mut render_meshes = world
|
||||
.get_resource_mut::<RenderMeshes>()
|
||||
.expect("world missing RenderMeshes resource");
|
||||
render_meshes.clear();
|
||||
}
|
||||
|
||||
let view = world.view_iter::<(
|
||||
Entities,
|
||||
&TransformIndex,
|
||||
Or<(&MeshHandle, TickOf<MeshHandle>), (&SceneHandle, TickOf<SceneHandle>)>,
|
||||
ResMut<RenderMeshes>,
|
||||
ResMut<RenderAssets<MeshBufferStorage>>,
|
||||
ResMut<RenderAssets<Arc<GpuMaterial>>>,
|
||||
ResMut<FxHashMap<Entity, uuid::Uuid>>,
|
||||
)>();
|
||||
|
||||
// used to store InterpTransform components to add to entities later
|
||||
for (
|
||||
entity,
|
||||
transform_index,
|
||||
(mesh_pair, scene_pair),
|
||||
mut render_meshes,
|
||||
mut mesh_buffers,
|
||||
mut material_buffers,
|
||||
mut entity_meshes,
|
||||
) in view
|
||||
{
|
||||
alive_entities.insert(entity);
|
||||
|
||||
if let Some((mesh_han, mesh_epoch)) = mesh_pair {
|
||||
if let Some(mesh) = mesh_han.data_ref() {
|
||||
// if process mesh did not just create a new mesh, and the epoch
|
||||
// shows that the scene has changed, verify that the mesh buffers
|
||||
// dont need to be resent to the gpu.
|
||||
if !self.process_mesh(
|
||||
device,
|
||||
queue,
|
||||
&mut mesh_buffers,
|
||||
&mut material_buffers,
|
||||
&mut entity_meshes,
|
||||
entity,
|
||||
&mesh,
|
||||
mesh_han.uuid(),
|
||||
) && mesh_epoch == last_epoch
|
||||
{
|
||||
self.check_mesh_buffers(device, queue, &mut mesh_buffers, &mesh_han);
|
||||
}
|
||||
|
||||
let material = mesh.material.as_ref().unwrap().data_ref().unwrap();
|
||||
let shader = material.shader_uuid.unwrap_or(0);
|
||||
let job = RenderJob::new(entity, shader, mesh_han.uuid(), *transform_index);
|
||||
render_meshes.push_back(job);
|
||||
}
|
||||
}
|
||||
|
||||
if let Some((scene_han, scene_epoch)) = scene_pair {
|
||||
if let Some(scene) = scene_han.data_ref() {
|
||||
for (mesh_han, transform_index) in
|
||||
scene.world().view_iter::<(&MeshHandle, &TransformIndex)>()
|
||||
{
|
||||
if let Some(mesh) = mesh_han.data_ref() {
|
||||
// if process mesh did not just create a new mesh, and the epoch
|
||||
// shows that the scene has changed, verify that the mesh buffers
|
||||
// dont need to be resent to the gpu.
|
||||
if !self.process_mesh(
|
||||
device,
|
||||
queue,
|
||||
&mut mesh_buffers,
|
||||
&mut material_buffers,
|
||||
&mut entity_meshes,
|
||||
entity,
|
||||
&mesh,
|
||||
mesh_han.uuid(),
|
||||
) && scene_epoch == last_epoch
|
||||
{
|
||||
self.check_mesh_buffers(
|
||||
device,
|
||||
queue,
|
||||
&mut mesh_buffers,
|
||||
&mesh_han,
|
||||
);
|
||||
}
|
||||
|
||||
let material = mesh.material.as_ref().unwrap().data_ref().unwrap();
|
||||
let shader = material.shader_uuid.unwrap_or(0);
|
||||
let job =
|
||||
RenderJob::new(entity, shader, mesh_han.uuid(), *transform_index);
|
||||
render_meshes.push_back(job);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn execute(
|
||||
&mut self,
|
||||
_: &mut crate::render::graph::RenderGraph,
|
||||
_: &crate::render::graph::NodeDesc,
|
||||
_: &mut crate::render::graph::RenderGraphContext,
|
||||
) {
|
||||
}
|
||||
}
|
||||
|
||||
#[repr(transparent)]
|
||||
pub struct RenderAssets<T>(FxHashMap<Uuid, T>);
|
||||
|
||||
impl<T> Deref for RenderAssets<T> {
|
||||
type Target = FxHashMap<Uuid, T>;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> DerefMut for RenderAssets<T> {
|
||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||
&mut self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Default for RenderAssets<T> {
|
||||
fn default() -> Self {
|
||||
Self(Default::default())
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> RenderAssets<T> {
|
||||
pub fn new() -> Self {
|
||||
Self::default()
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub struct GpuMaterial {
|
||||
pub bind_group: Arc<wgpu::BindGroup>,
|
||||
bind_group_layout: Arc<wgpu::BindGroupLayout>,
|
||||
material_properties_buffer: wgpu::Buffer,
|
||||
diffuse_texture: wgpu::Texture,
|
||||
diffuse_texture_sampler: wgpu::Sampler,
|
||||
/* specular_texture: wgpu::Texture,
|
||||
specular_texture_sampler: wgpu::Sampler, */
|
||||
}
|
||||
|
||||
impl GpuMaterial {
|
||||
fn create_bind_group_layout(device: &wgpu::Device) -> Arc<wgpu::BindGroupLayout> {
|
||||
Arc::new(
|
||||
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
|
||||
label: Some("bgl_material"),
|
||||
entries: &[
|
||||
// material properties
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 0,
|
||||
visibility: wgpu::ShaderStages::FRAGMENT,
|
||||
ty: wgpu::BindingType::Buffer {
|
||||
ty: wgpu::BufferBindingType::Uniform,
|
||||
has_dynamic_offset: false,
|
||||
min_binding_size: None, /* Some(
|
||||
NonZeroU64::new(mem::size_of::<MaterialPropertiesUniform>() as _)
|
||||
.unwrap(),
|
||||
) */
|
||||
},
|
||||
count: None,
|
||||
},
|
||||
// diffuse texture
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 1,
|
||||
visibility: wgpu::ShaderStages::FRAGMENT,
|
||||
ty: wgpu::BindingType::Texture {
|
||||
sample_type: wgpu::TextureSampleType::Float { filterable: true },
|
||||
view_dimension: wgpu::TextureViewDimension::D2,
|
||||
multisampled: false,
|
||||
},
|
||||
count: None,
|
||||
},
|
||||
// diffuse texture sampler
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 2,
|
||||
visibility: wgpu::ShaderStages::FRAGMENT,
|
||||
ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
|
||||
count: None,
|
||||
},
|
||||
// specular texture
|
||||
/* wgpu::BindGroupLayoutEntry {
|
||||
binding: 3,
|
||||
visibility: wgpu::ShaderStages::FRAGMENT,
|
||||
ty: wgpu::BindingType::Texture {
|
||||
sample_type: wgpu::TextureSampleType::Float { filterable: false },
|
||||
view_dimension: wgpu::TextureViewDimension::D2,
|
||||
multisampled: false,
|
||||
},
|
||||
count: None,
|
||||
},
|
||||
// specular texture sampler
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 4,
|
||||
visibility: wgpu::ShaderStages::FRAGMENT,
|
||||
ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::NonFiltering),
|
||||
count: None,
|
||||
}, */
|
||||
],
|
||||
}),
|
||||
)
|
||||
}
|
||||
|
||||
fn texture_desc(label: &str, size: UVec2) -> wgpu::TextureDescriptor {
|
||||
//debug!("Texture desc size: {:?}", size);
|
||||
wgpu::TextureDescriptor {
|
||||
label: Some(label),
|
||||
size: wgpu::Extent3d {
|
||||
width: size.x,
|
||||
height: size.y,
|
||||
depth_or_array_layers: 1,
|
||||
},
|
||||
mip_level_count: 1, // TODO
|
||||
sample_count: 1,
|
||||
dimension: wgpu::TextureDimension::D2,
|
||||
format: wgpu::TextureFormat::Rgba8UnormSrgb,
|
||||
usage: wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST,
|
||||
view_formats: &[],
|
||||
}
|
||||
}
|
||||
|
||||
fn write_texture(queue: &wgpu::Queue, texture: &wgpu::Texture, img: &lyra_resource::Image) {
|
||||
let dim = img.dimensions();
|
||||
//debug!("Write texture size: {:?}", dim);
|
||||
queue.write_texture(
|
||||
wgpu::ImageCopyTexture {
|
||||
aspect: wgpu::TextureAspect::All,
|
||||
texture: &texture,
|
||||
mip_level: 0,
|
||||
origin: wgpu::Origin3d::ZERO,
|
||||
},
|
||||
&img.to_rgba8(),
|
||||
wgpu::ImageDataLayout {
|
||||
offset: 0,
|
||||
bytes_per_row: Some(4 * dim.0),
|
||||
rows_per_image: Some(dim.1),
|
||||
},
|
||||
wgpu::Extent3d {
|
||||
width: dim.0,
|
||||
height: dim.1,
|
||||
depth_or_array_layers: 1,
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
fn from_resource(
|
||||
device: &wgpu::Device,
|
||||
queue: &wgpu::Queue,
|
||||
layout: &Arc<wgpu::BindGroupLayout>,
|
||||
mat: &lyra_gltf::Material,
|
||||
) -> Self {
|
||||
//let specular = mat.specular.as_ref().unwrap_or_default();
|
||||
//let specular_
|
||||
|
||||
let prop = MaterialPropertiesUniform {
|
||||
ambient: Vec3::ONE,
|
||||
_padding1: 0,
|
||||
diffuse: Vec3::ONE,
|
||||
shininess: 32.0,
|
||||
specular_factor: 0.0,
|
||||
_padding2: [0; 3],
|
||||
specular_color_factor: Vec3::ZERO,
|
||||
_padding3: 0,
|
||||
};
|
||||
|
||||
let properties_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
|
||||
label: Some("buffer_material"),
|
||||
usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
|
||||
contents: bytemuck::bytes_of(&prop),
|
||||
});
|
||||
|
||||
let diffuse_tex = mat.base_color_texture.as_ref().unwrap();
|
||||
let diffuse_tex = diffuse_tex.data_ref().unwrap();
|
||||
let diffuse_tex_img = diffuse_tex.image.data_ref().unwrap();
|
||||
let diffuse_tex_dim = diffuse_tex_img.dimensions();
|
||||
let diffuse_texture = device.create_texture(&Self::texture_desc(
|
||||
"material_diffuse_texture",
|
||||
UVec2::new(diffuse_tex_dim.0, diffuse_tex_dim.1),
|
||||
));
|
||||
let diffuse_tex_view = diffuse_texture.create_view(&wgpu::TextureViewDescriptor::default());
|
||||
|
||||
let sampler_desc = match &diffuse_tex.sampler {
|
||||
Some(sampler) => {
|
||||
let magf = res_filter_to_wgpu(
|
||||
sampler
|
||||
.mag_filter
|
||||
.unwrap_or(lyra_resource::FilterMode::Linear),
|
||||
);
|
||||
let minf = res_filter_to_wgpu(
|
||||
sampler
|
||||
.min_filter
|
||||
.unwrap_or(lyra_resource::FilterMode::Nearest),
|
||||
);
|
||||
let mipf = res_filter_to_wgpu(
|
||||
sampler
|
||||
.mipmap_filter
|
||||
.unwrap_or(lyra_resource::FilterMode::Nearest),
|
||||
);
|
||||
|
||||
let wrap_u = res_wrap_to_wgpu(sampler.wrap_u);
|
||||
let wrap_v = res_wrap_to_wgpu(sampler.wrap_v);
|
||||
let wrap_w = res_wrap_to_wgpu(sampler.wrap_w);
|
||||
|
||||
wgpu::SamplerDescriptor {
|
||||
address_mode_u: wrap_u,
|
||||
address_mode_v: wrap_v,
|
||||
address_mode_w: wrap_w,
|
||||
mag_filter: magf,
|
||||
min_filter: minf,
|
||||
mipmap_filter: mipf,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
None => wgpu::SamplerDescriptor {
|
||||
address_mode_u: wgpu::AddressMode::ClampToEdge,
|
||||
address_mode_v: wgpu::AddressMode::ClampToEdge,
|
||||
address_mode_w: wgpu::AddressMode::ClampToEdge,
|
||||
mag_filter: wgpu::FilterMode::Linear,
|
||||
min_filter: wgpu::FilterMode::Nearest,
|
||||
mipmap_filter: wgpu::FilterMode::Nearest,
|
||||
..Default::default()
|
||||
},
|
||||
};
|
||||
let diffuse_sampler = device.create_sampler(&sampler_desc);
|
||||
|
||||
Self::write_texture(queue, &diffuse_texture, &diffuse_tex_img);
|
||||
|
||||
debug!("TODO: specular texture");
|
||||
|
||||
let bg = device.create_bind_group(&wgpu::BindGroupDescriptor {
|
||||
label: Some("bg_material"),
|
||||
layout: &layout,
|
||||
entries: &[
|
||||
// material properties
|
||||
wgpu::BindGroupEntry {
|
||||
binding: 0,
|
||||
resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
|
||||
buffer: &properties_buffer,
|
||||
offset: 0,
|
||||
size: None,
|
||||
}),
|
||||
},
|
||||
// diffuse texture
|
||||
wgpu::BindGroupEntry {
|
||||
binding: 1,
|
||||
resource: wgpu::BindingResource::TextureView(&diffuse_tex_view),
|
||||
},
|
||||
wgpu::BindGroupEntry {
|
||||
binding: 2,
|
||||
resource: wgpu::BindingResource::Sampler(&diffuse_sampler),
|
||||
},
|
||||
// TODO: specular textures
|
||||
],
|
||||
});
|
||||
|
||||
Self {
|
||||
bind_group: Arc::new(bg),
|
||||
bind_group_layout: layout.clone(),
|
||||
material_properties_buffer: properties_buffer,
|
||||
diffuse_texture,
|
||||
diffuse_texture_sampler: diffuse_sampler,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Uniform for MaterialProperties in a shader
|
||||
#[repr(C)]
|
||||
#[derive(Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
|
||||
pub struct MaterialPropertiesUniform {
|
||||
ambient: glam::Vec3,
|
||||
_padding1: u32,
|
||||
diffuse: glam::Vec3,
|
||||
shininess: f32,
|
||||
specular_factor: f32,
|
||||
_padding2: [u32; 3],
|
||||
specular_color_factor: glam::Vec3,
|
||||
_padding3: u32,
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct RenderMeshes(VecDeque<RenderJob>);
|
||||
|
||||
impl Deref for RenderMeshes {
|
||||
type Target = VecDeque<RenderJob>;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl DerefMut for RenderMeshes {
|
||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||
&mut self.0
|
||||
}
|
||||
}
|
|
@ -1,507 +0,0 @@
|
|||
use std::{rc::Rc, sync::Arc};
|
||||
|
||||
use lyra_ecs::{AtomicRef, ResourceData};
|
||||
use lyra_game_derive::RenderGraphLabel;
|
||||
use tracing::{instrument, warn};
|
||||
|
||||
use crate::render::{
|
||||
desc_buf_lay::DescVertexBufferLayout,
|
||||
graph::{Node, NodeDesc, NodeType, RenderGraph, RenderGraphContext},
|
||||
resource::{FragmentState, RenderPipeline, RenderPipelineDescriptor, Shader, VertexState},
|
||||
texture::RenderTexture,
|
||||
transform_buffer_storage::TransformBuffers,
|
||||
vertex::Vertex,
|
||||
};
|
||||
|
||||
use super::{
|
||||
BasePassSlots, LightBasePassSlots, LightCullComputePassSlots, MeshBufferStorage, RenderAssets, RenderMeshes, ShadowMapsPassSlots
|
||||
};
|
||||
|
||||
#[derive(Debug, Hash, Clone, Default, PartialEq, RenderGraphLabel)]
|
||||
pub struct MeshesPassLabel;
|
||||
|
||||
#[derive(Debug, Hash, Clone, PartialEq, RenderGraphLabel)]
|
||||
pub enum MeshesPassSlots {
|
||||
Material,
|
||||
}
|
||||
|
||||
/// Stores the bind group and bind group layout for the shadow atlas texture
|
||||
struct ShadowsAtlasBgPair {
|
||||
layout: Arc<wgpu::BindGroupLayout>,
|
||||
bg: Arc<wgpu::BindGroup>,
|
||||
}
|
||||
|
||||
//#[derive(Default)]
|
||||
#[allow(dead_code)]
|
||||
pub struct MeshPass {
|
||||
default_texture: Option<RenderTexture>,
|
||||
|
||||
pipeline: Option<RenderPipeline>,
|
||||
material_bgl: Arc<wgpu::BindGroupLayout>,
|
||||
|
||||
// TODO: find a better way to extract these resources from the main world to be used in the
|
||||
// render stage.
|
||||
transform_buffers: Option<ResourceData>,
|
||||
render_meshes: Option<ResourceData>,
|
||||
mesh_buffers: Option<ResourceData>,
|
||||
|
||||
shadows_atlas: Option<ShadowsAtlasBgPair>,
|
||||
}
|
||||
|
||||
impl MeshPass {
|
||||
pub fn new(material_bgl: Arc<wgpu::BindGroupLayout>) -> Self {
|
||||
Self {
|
||||
default_texture: None,
|
||||
pipeline: None,
|
||||
material_bgl,
|
||||
|
||||
transform_buffers: None,
|
||||
render_meshes: None,
|
||||
mesh_buffers: None,
|
||||
|
||||
shadows_atlas: None,
|
||||
}
|
||||
}
|
||||
|
||||
fn transform_buffers(&self) -> AtomicRef<TransformBuffers> {
|
||||
self.transform_buffers.as_ref().unwrap().get()
|
||||
}
|
||||
|
||||
fn render_meshes(&self) -> AtomicRef<RenderMeshes> {
|
||||
self.render_meshes.as_ref().unwrap().get()
|
||||
}
|
||||
|
||||
fn mesh_buffers(&self) -> AtomicRef<RenderAssets<MeshBufferStorage>> {
|
||||
self.mesh_buffers.as_ref().unwrap().get()
|
||||
}
|
||||
}
|
||||
|
||||
impl Node for MeshPass {
|
||||
fn desc(
|
||||
&mut self,
|
||||
_: &mut crate::render::graph::RenderGraph,
|
||||
) -> crate::render::graph::NodeDesc {
|
||||
// load the default texture
|
||||
//let bytes = include_bytes!("../../default_texture.png");
|
||||
//self.default_texture = Some(RenderTexture::from_bytes(device, &graph.queue, texture_bind_group_layout.clone(), bytes, "default_texture").unwrap());
|
||||
|
||||
NodeDesc::new(
|
||||
NodeType::Render,
|
||||
None,
|
||||
vec![
|
||||
//(&MeshesPassSlots::Material, material_bg, Some(material_bgl)),
|
||||
],
|
||||
)
|
||||
}
|
||||
|
||||
#[instrument(skip(self, graph, world))]
|
||||
fn prepare(
|
||||
&mut self,
|
||||
graph: &mut RenderGraph,
|
||||
world: &mut lyra_ecs::World,
|
||||
_: &mut RenderGraphContext,
|
||||
) {
|
||||
if self.pipeline.is_none() {
|
||||
let shader_mod = graph.register_shader(include_str!("../../shaders/base.wgsl"))
|
||||
.expect("failed to register shader").expect("base shader missing module");
|
||||
let shader_src = graph.preprocess_shader(&shader_mod)
|
||||
.expect("failed to preprocess shader");
|
||||
|
||||
let device = graph.device();
|
||||
let surface_config_format = graph.view_target().format();
|
||||
|
||||
let atlas_view = graph
|
||||
.slot_value(ShadowMapsPassSlots::ShadowAtlasTextureView)
|
||||
.expect("missing ShadowMapsPassSlots::ShadowAtlasTextureView")
|
||||
.as_texture_view()
|
||||
.unwrap();
|
||||
let atlas_sampler = graph
|
||||
.slot_value(ShadowMapsPassSlots::ShadowAtlasSampler)
|
||||
.expect("missing ShadowMapsPassSlots::ShadowAtlasSampler")
|
||||
.as_sampler()
|
||||
.unwrap();
|
||||
let atlas_sampler_compare = graph
|
||||
.slot_value(ShadowMapsPassSlots::ShadowAtlasSamplerComparison)
|
||||
.expect("missing ShadowMapsPassSlots::ShadowAtlasSamplerComparison")
|
||||
.as_sampler()
|
||||
.unwrap();
|
||||
let shadow_settings_buf = graph
|
||||
.slot_value(ShadowMapsPassSlots::ShadowSettingsUniform)
|
||||
.expect("missing ShadowMapsPassSlots::ShadowSettingsUniform")
|
||||
.as_buffer()
|
||||
.unwrap();
|
||||
let light_uniform_buf = graph
|
||||
.slot_value(ShadowMapsPassSlots::ShadowLightUniformsBuffer)
|
||||
.expect("missing ShadowMapsPassSlots::ShadowLightUniformsBuffer")
|
||||
.as_buffer()
|
||||
.unwrap();
|
||||
let pcf_poisson_disc = graph
|
||||
.slot_value(ShadowMapsPassSlots::PcfPoissonDiscBuffer)
|
||||
.expect("missing ShadowMapsPassSlots::PcfPoissonDiscBuffer")
|
||||
.as_buffer()
|
||||
.unwrap();
|
||||
let pcf_poisson_disc_3d = graph
|
||||
.slot_value(ShadowMapsPassSlots::PcfPoissonDiscBuffer3d)
|
||||
.expect("missing ShadowMapsPassSlots::PcfPoissonDiscBuffer3d")
|
||||
.as_buffer()
|
||||
.unwrap();
|
||||
let pcss_poisson_disc = graph
|
||||
.slot_value(ShadowMapsPassSlots::PcssPoissonDiscBuffer)
|
||||
.expect("missing ShadowMapsPassSlots::PcssPoissonDiscBuffer")
|
||||
.as_buffer()
|
||||
.unwrap();
|
||||
|
||||
let atlas_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
|
||||
label: Some("bgl_shadows_atlas"),
|
||||
entries: &[
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 0,
|
||||
visibility: wgpu::ShaderStages::FRAGMENT,
|
||||
ty: wgpu::BindingType::Texture {
|
||||
sample_type: wgpu::TextureSampleType::Depth,
|
||||
view_dimension: wgpu::TextureViewDimension::D2,
|
||||
multisampled: false,
|
||||
},
|
||||
count: None,
|
||||
},
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 1,
|
||||
visibility: wgpu::ShaderStages::FRAGMENT,
|
||||
ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
|
||||
count: None,
|
||||
},
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 2,
|
||||
visibility: wgpu::ShaderStages::FRAGMENT,
|
||||
ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Comparison),
|
||||
count: None,
|
||||
},
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 3,
|
||||
visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
|
||||
ty: wgpu::BindingType::Buffer {
|
||||
ty: wgpu::BufferBindingType::Uniform,
|
||||
has_dynamic_offset: false,
|
||||
min_binding_size: None,
|
||||
},
|
||||
count: None,
|
||||
},
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 4,
|
||||
visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
|
||||
ty: wgpu::BindingType::Buffer {
|
||||
ty: wgpu::BufferBindingType::Storage { read_only: true },
|
||||
has_dynamic_offset: false,
|
||||
min_binding_size: None,
|
||||
},
|
||||
count: None,
|
||||
},
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 5,
|
||||
visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
|
||||
ty: wgpu::BindingType::Buffer {
|
||||
ty: wgpu::BufferBindingType::Storage { read_only: true },
|
||||
has_dynamic_offset: false,
|
||||
min_binding_size: None,
|
||||
},
|
||||
count: None,
|
||||
},
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 6,
|
||||
visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
|
||||
ty: wgpu::BindingType::Buffer {
|
||||
ty: wgpu::BufferBindingType::Storage { read_only: true },
|
||||
has_dynamic_offset: false,
|
||||
min_binding_size: None,
|
||||
},
|
||||
count: None,
|
||||
},
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 7,
|
||||
visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
|
||||
ty: wgpu::BindingType::Buffer {
|
||||
ty: wgpu::BufferBindingType::Storage { read_only: true },
|
||||
has_dynamic_offset: false,
|
||||
min_binding_size: None,
|
||||
},
|
||||
count: None,
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
let atlas_bg = device.create_bind_group(&wgpu::BindGroupDescriptor {
|
||||
label: Some("bg_shadows_atlas"),
|
||||
layout: &atlas_layout,
|
||||
entries: &[
|
||||
wgpu::BindGroupEntry {
|
||||
binding: 0,
|
||||
resource: wgpu::BindingResource::TextureView(atlas_view),
|
||||
},
|
||||
wgpu::BindGroupEntry {
|
||||
binding: 1,
|
||||
resource: wgpu::BindingResource::Sampler(atlas_sampler),
|
||||
},
|
||||
wgpu::BindGroupEntry {
|
||||
binding: 2,
|
||||
resource: wgpu::BindingResource::Sampler(atlas_sampler_compare),
|
||||
},
|
||||
wgpu::BindGroupEntry {
|
||||
binding: 3,
|
||||
resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
|
||||
buffer: shadow_settings_buf,
|
||||
offset: 0,
|
||||
size: None,
|
||||
}),
|
||||
},
|
||||
wgpu::BindGroupEntry {
|
||||
binding: 4,
|
||||
resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
|
||||
buffer: light_uniform_buf,
|
||||
offset: 0,
|
||||
size: None,
|
||||
}),
|
||||
},
|
||||
wgpu::BindGroupEntry {
|
||||
binding: 5,
|
||||
resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
|
||||
buffer: pcf_poisson_disc,
|
||||
offset: 0,
|
||||
size: None,
|
||||
}),
|
||||
},
|
||||
wgpu::BindGroupEntry {
|
||||
binding: 6,
|
||||
resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
|
||||
buffer: pcf_poisson_disc_3d,
|
||||
offset: 0,
|
||||
size: None,
|
||||
}),
|
||||
},
|
||||
wgpu::BindGroupEntry {
|
||||
binding: 7,
|
||||
resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
|
||||
buffer: pcss_poisson_disc,
|
||||
offset: 0,
|
||||
size: None,
|
||||
}),
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
self.shadows_atlas = Some(ShadowsAtlasBgPair {
|
||||
layout: Arc::new(atlas_layout),
|
||||
bg: Arc::new(atlas_bg),
|
||||
});
|
||||
|
||||
let camera_bgl = graph.bind_group_layout(BasePassSlots::Camera);
|
||||
let lights_bgl = graph.bind_group_layout(LightBasePassSlots::Lights);
|
||||
let light_grid_bgl =
|
||||
graph.bind_group_layout(LightCullComputePassSlots::LightIndicesGridGroup);
|
||||
let atlas_bgl = self.shadows_atlas.as_ref().unwrap().layout.clone();
|
||||
|
||||
let shader = Rc::new(Shader {
|
||||
label: Some(shader_mod.into()),
|
||||
source: shader_src,
|
||||
});
|
||||
|
||||
let transforms = world
|
||||
.get_resource_data::<TransformBuffers>()
|
||||
.expect("Missing transform buffers");
|
||||
self.transform_buffers = Some(transforms.clone());
|
||||
|
||||
let render_meshes = world
|
||||
.get_resource_data::<RenderMeshes>()
|
||||
.expect("Missing transform buffers");
|
||||
self.render_meshes = Some(render_meshes.clone());
|
||||
|
||||
let mesh_buffers = world
|
||||
.get_resource_data::<RenderAssets<MeshBufferStorage>>()
|
||||
.expect("Missing render meshes");
|
||||
self.mesh_buffers = Some(mesh_buffers.clone());
|
||||
|
||||
let transforms = transforms.get::<TransformBuffers>();
|
||||
|
||||
self.pipeline = Some(RenderPipeline::create(
|
||||
device,
|
||||
&RenderPipelineDescriptor {
|
||||
label: Some("meshes".into()),
|
||||
layouts: vec![
|
||||
self.material_bgl.clone(),
|
||||
transforms.bindgroup_layout.clone(),
|
||||
camera_bgl.clone(),
|
||||
lights_bgl.clone(),
|
||||
light_grid_bgl.clone(),
|
||||
atlas_bgl,
|
||||
],
|
||||
push_constant_ranges: vec![],
|
||||
vertex: VertexState {
|
||||
module: shader.clone(),
|
||||
entry_point: "vs_main".into(),
|
||||
buffers: vec![Vertex::desc().into()],
|
||||
},
|
||||
fragment: Some(FragmentState {
|
||||
module: shader,
|
||||
entry_point: "fs_main".into(),
|
||||
targets: vec![Some(wgpu::ColorTargetState {
|
||||
format: surface_config_format,
|
||||
blend: Some(wgpu::BlendState::REPLACE),
|
||||
write_mask: wgpu::ColorWrites::ALL,
|
||||
})],
|
||||
}),
|
||||
depth_stencil: Some(wgpu::DepthStencilState {
|
||||
format: RenderTexture::DEPTH_FORMAT,
|
||||
depth_write_enabled: true,
|
||||
depth_compare: wgpu::CompareFunction::Less,
|
||||
stencil: wgpu::StencilState::default(), // TODO: stencil buffer
|
||||
bias: wgpu::DepthBiasState::default(),
|
||||
}),
|
||||
primitive: wgpu::PrimitiveState {
|
||||
cull_mode: Some(wgpu::Face::Back),
|
||||
..Default::default()
|
||||
},
|
||||
multisample: wgpu::MultisampleState::default(),
|
||||
multiview: None,
|
||||
},
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
fn execute(
|
||||
&mut self,
|
||||
graph: &mut crate::render::graph::RenderGraph,
|
||||
_: &crate::render::graph::NodeDesc,
|
||||
context: &mut crate::render::graph::RenderGraphContext,
|
||||
) {
|
||||
let encoder = context.encoder.as_mut().unwrap();
|
||||
|
||||
/* let view = graph
|
||||
.slot_value(BasePassSlots::WindowTextureView)
|
||||
.unwrap()
|
||||
.as_texture_view()
|
||||
.expect("BasePassSlots::WindowTextureView was not a TextureView slot"); */
|
||||
|
||||
let vt = graph.view_target();
|
||||
let view = vt.render_view();
|
||||
|
||||
let depth_view = graph
|
||||
.slot_value(BasePassSlots::DepthTextureView)
|
||||
.unwrap()
|
||||
.as_texture_view()
|
||||
.expect("BasePassSlots::DepthTextureView was not a TextureView slot");
|
||||
|
||||
let camera_bg = graph.bind_group(BasePassSlots::Camera);
|
||||
|
||||
let lights_bg = graph.bind_group(LightBasePassSlots::Lights);
|
||||
|
||||
let light_grid_bg = graph.bind_group(LightCullComputePassSlots::LightIndicesGridGroup);
|
||||
|
||||
let shadows_atlas_bg = &self.shadows_atlas.as_ref().unwrap().bg;
|
||||
|
||||
//let material_bg = graph.bind_group(MeshesPassSlots::Material);
|
||||
|
||||
/* let pipeline = graph.pipeline(context.label.clone())
|
||||
.expect("Failed to find pipeline for MeshPass"); */
|
||||
let pipeline = self.pipeline.as_ref().unwrap();
|
||||
|
||||
let transforms = self.transform_buffers();
|
||||
let render_meshes = self.render_meshes();
|
||||
let mesh_buffers = self.mesh_buffers();
|
||||
|
||||
{
|
||||
let mut pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
|
||||
label: Some("Render Pass"),
|
||||
color_attachments: &[Some(wgpu::RenderPassColorAttachment {
|
||||
view,
|
||||
resolve_target: None,
|
||||
ops: wgpu::Operations {
|
||||
load: wgpu::LoadOp::Clear(wgpu::Color {
|
||||
r: 0.1,
|
||||
g: 0.2,
|
||||
b: 0.3,
|
||||
a: 1.0,
|
||||
}),
|
||||
store: wgpu::StoreOp::Store,
|
||||
},
|
||||
})],
|
||||
// enable depth buffer
|
||||
depth_stencil_attachment: Some(wgpu::RenderPassDepthStencilAttachment {
|
||||
view: depth_view,
|
||||
depth_ops: Some(wgpu::Operations {
|
||||
load: wgpu::LoadOp::Clear(1.0),
|
||||
store: wgpu::StoreOp::Store,
|
||||
}),
|
||||
stencil_ops: None,
|
||||
}),
|
||||
..Default::default()
|
||||
});
|
||||
|
||||
pass.set_pipeline(pipeline);
|
||||
|
||||
//let default_texture = self.default_texture.as_ref().unwrap();
|
||||
|
||||
for job in render_meshes.iter() {
|
||||
// get the mesh (containing vertices) and the buffers from storage
|
||||
let buffers = mesh_buffers.get(&job.mesh_uuid);
|
||||
if buffers.is_none() {
|
||||
warn!("Skipping job since its mesh is missing {:?}", job.mesh_uuid);
|
||||
continue;
|
||||
}
|
||||
let buffers = buffers.unwrap();
|
||||
|
||||
// Bind the optional texture
|
||||
/* if let Some(tex) = buffers.material.as_ref()
|
||||
.and_then(|m| m.diffuse_texture.as_ref()) {
|
||||
pass.set_bind_group(0, tex.bind_group(), &[]);
|
||||
} else {
|
||||
pass.set_bind_group(0, default_texture.bind_group(), &[]);
|
||||
}
|
||||
|
||||
if let Some(tex) = buffers.material.as_ref()
|
||||
.and_then(|m| m.specular.as_ref())
|
||||
.and_then(|s| s.texture.as_ref().or(s.color_texture.as_ref())) {
|
||||
pass.set_bind_group(5, tex.bind_group(), &[]);
|
||||
} else {
|
||||
pass.set_bind_group(5, default_texture.bind_group(), &[]);
|
||||
} */
|
||||
if let Some(mat) = buffers.material.as_ref() {
|
||||
pass.set_bind_group(0, &mat.bind_group, &[]);
|
||||
} else {
|
||||
todo!("cannot render mesh without material");
|
||||
}
|
||||
|
||||
// Get the bindgroup for job's transform and bind to it using an offset.
|
||||
let bindgroup = transforms.bind_group(job.transform_id);
|
||||
let offset = transforms.buffer_offset(job.transform_id);
|
||||
pass.set_bind_group(1, bindgroup, &[offset]);
|
||||
|
||||
pass.set_bind_group(2, camera_bg, &[]);
|
||||
pass.set_bind_group(3, lights_bg, &[]);
|
||||
//pass.set_bind_group(4, material_bg, &[]);
|
||||
|
||||
pass.set_bind_group(4, light_grid_bg, &[]);
|
||||
|
||||
pass.set_bind_group(5, shadows_atlas_bg, &[]);
|
||||
|
||||
// if this mesh uses indices, use them to draw the mesh
|
||||
if let Some((idx_type, indices)) = buffers.buffer_indices.as_ref() {
|
||||
let indices_len = indices.count() as u32;
|
||||
|
||||
pass.set_vertex_buffer(
|
||||
buffers.buffer_vertex.slot(),
|
||||
buffers.buffer_vertex.buffer().slice(..),
|
||||
);
|
||||
pass.set_index_buffer(indices.buffer().slice(..), *idx_type);
|
||||
pass.draw_indexed(0..indices_len, 0, 0..1);
|
||||
} else {
|
||||
let vertex_count = buffers.buffer_vertex.count();
|
||||
|
||||
pass.set_vertex_buffer(
|
||||
buffers.buffer_vertex.slot(),
|
||||
buffers.buffer_vertex.buffer().slice(..),
|
||||
);
|
||||
pass.draw(0..vertex_count as u32, 0..1);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,32 +0,0 @@
|
|||
mod light_cull_compute;
|
||||
pub use light_cull_compute::*;
|
||||
|
||||
mod base;
|
||||
pub use base::*;
|
||||
|
||||
mod meshes;
|
||||
pub use meshes::*;
|
||||
|
||||
mod light_base;
|
||||
pub use light_base::*;
|
||||
|
||||
mod present_pass;
|
||||
pub use present_pass::*;
|
||||
|
||||
mod init;
|
||||
pub use init::*;
|
||||
|
||||
mod tint;
|
||||
pub use tint::*;
|
||||
|
||||
mod fxaa;
|
||||
pub use fxaa::*;
|
||||
|
||||
mod shadows;
|
||||
pub use shadows::*;
|
||||
|
||||
mod mesh_prepare;
|
||||
pub use mesh_prepare::*;
|
||||
|
||||
mod transform;
|
||||
pub use transform::*;
|
|
@ -1,44 +0,0 @@
|
|||
use std::hash::Hash;
|
||||
|
||||
use lyra_game_derive::RenderGraphLabel;
|
||||
|
||||
use crate::render::graph::{Node, NodeDesc, NodeType, RenderGraph, RenderGraphContext};
|
||||
|
||||
#[derive(Debug, Clone, Hash, PartialEq, RenderGraphLabel)]
|
||||
pub struct PresentPassLabel;
|
||||
|
||||
/// Supplies some basic things other passes needs.
|
||||
///
|
||||
/// screen size buffer, camera buffer,
|
||||
#[derive(Default, Debug)]
|
||||
pub struct PresentPass;
|
||||
|
||||
impl PresentPass {
|
||||
pub fn new() -> Self {
|
||||
Self
|
||||
}
|
||||
}
|
||||
|
||||
impl Node for PresentPass {
|
||||
fn desc(&mut self, _graph: &mut crate::render::graph::RenderGraph) -> crate::render::graph::NodeDesc {
|
||||
NodeDesc::new(
|
||||
NodeType::Presenter,
|
||||
None,
|
||||
vec![],
|
||||
)
|
||||
}
|
||||
|
||||
fn prepare(&mut self, _graph: &mut RenderGraph, _world: &mut lyra_ecs::World, _context: &mut RenderGraphContext) {
|
||||
|
||||
}
|
||||
|
||||
fn execute(&mut self, graph: &mut crate::render::graph::RenderGraph, _desc: &crate::render::graph::NodeDesc, context: &mut crate::render::graph::RenderGraphContext) {
|
||||
let mut vt = graph.view_target_mut();
|
||||
vt.copy_to_primary(context.encoder.as_mut().unwrap());
|
||||
context.submit_encoder();
|
||||
|
||||
let frame = vt.primary.frame.take()
|
||||
.expect("ViewTarget.primary was already presented");
|
||||
frame.present();
|
||||
}
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -1,168 +0,0 @@
|
|||
use std::{collections::HashMap, rc::Rc, sync::Arc};
|
||||
|
||||
use lyra_game_derive::RenderGraphLabel;
|
||||
|
||||
use crate::render::{
|
||||
graph::{Node, NodeDesc, NodeType},
|
||||
resource::{FragmentState, PipelineDescriptor, RenderPipelineDescriptor, Shader, VertexState},
|
||||
};
|
||||
|
||||
#[derive(Default, Debug, Clone, Copy, Hash, RenderGraphLabel)]
|
||||
pub struct TintPassLabel;
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
pub struct TintPass {
|
||||
target_sampler: Option<wgpu::Sampler>,
|
||||
bgl: Option<Arc<wgpu::BindGroupLayout>>,
|
||||
/// Store bind groups for the input textures.
|
||||
/// The texture may change due to resizes, or changes to the view target chain
|
||||
/// from other nodes.
|
||||
bg_cache: HashMap<wgpu::Id<wgpu::TextureView>, wgpu::BindGroup>,
|
||||
}
|
||||
|
||||
impl TintPass {
|
||||
pub fn new() -> Self {
|
||||
Self::default()
|
||||
}
|
||||
}
|
||||
|
||||
impl Node for TintPass {
|
||||
fn desc(
|
||||
&mut self,
|
||||
graph: &mut crate::render::graph::RenderGraph,
|
||||
) -> crate::render::graph::NodeDesc {
|
||||
let device = &graph.device;
|
||||
|
||||
let bgl = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
|
||||
label: Some("tint_bgl"),
|
||||
entries: &[
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 0,
|
||||
visibility: wgpu::ShaderStages::FRAGMENT,
|
||||
ty: wgpu::BindingType::Texture {
|
||||
sample_type: wgpu::TextureSampleType::Float { filterable: false },
|
||||
view_dimension: wgpu::TextureViewDimension::D2,
|
||||
multisampled: false,
|
||||
},
|
||||
count: None,
|
||||
},
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 1,
|
||||
visibility: wgpu::ShaderStages::FRAGMENT,
|
||||
ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::NonFiltering),
|
||||
count: None,
|
||||
},
|
||||
],
|
||||
});
|
||||
let bgl = Arc::new(bgl);
|
||||
self.bgl = Some(bgl.clone());
|
||||
self.target_sampler = Some(device.create_sampler(&wgpu::SamplerDescriptor::default()));
|
||||
|
||||
let shader = Rc::new(Shader {
|
||||
label: Some("tint_shader".into()),
|
||||
source: include_str!("../../shaders/tint.wgsl").to_string(),
|
||||
});
|
||||
|
||||
let vt = graph.view_target();
|
||||
|
||||
|
||||
NodeDesc::new(
|
||||
NodeType::Render,
|
||||
Some(PipelineDescriptor::Render(RenderPipelineDescriptor {
|
||||
label: Some("tint_pass".into()),
|
||||
layouts: vec![bgl.clone()],
|
||||
push_constant_ranges: vec![],
|
||||
vertex: VertexState {
|
||||
module: shader.clone(),
|
||||
entry_point: "vs_main".into(),
|
||||
buffers: vec![],
|
||||
},
|
||||
fragment: Some(FragmentState {
|
||||
module: shader,
|
||||
entry_point: "fs_main".into(),
|
||||
targets: vec![Some(wgpu::ColorTargetState {
|
||||
format: vt.format(),
|
||||
blend: Some(wgpu::BlendState::REPLACE),
|
||||
write_mask: wgpu::ColorWrites::ALL,
|
||||
})],
|
||||
}),
|
||||
depth_stencil: None,
|
||||
primitive: wgpu::PrimitiveState::default(),
|
||||
multisample: wgpu::MultisampleState::default(),
|
||||
multiview: None,
|
||||
})),
|
||||
vec![],
|
||||
)
|
||||
}
|
||||
|
||||
fn prepare(
|
||||
&mut self,
|
||||
_: &mut crate::render::graph::RenderGraph,
|
||||
_: &mut lyra_ecs::World,
|
||||
_: &mut crate::render::graph::RenderGraphContext,
|
||||
) {
|
||||
//todo!()
|
||||
}
|
||||
|
||||
fn execute(
|
||||
&mut self,
|
||||
graph: &mut crate::render::graph::RenderGraph,
|
||||
_: &crate::render::graph::NodeDesc,
|
||||
context: &mut crate::render::graph::RenderGraphContext,
|
||||
) {
|
||||
let pipeline = graph
|
||||
.pipeline(context.label.clone())
|
||||
.expect("Failed to find pipeline for TintPass");
|
||||
|
||||
let mut vt = graph.view_target_mut();
|
||||
let chain = vt.get_chain();
|
||||
let source_view = chain.source.frame_view.as_ref().unwrap();
|
||||
let dest_view = chain.dest.frame_view.as_ref().unwrap();
|
||||
|
||||
let bg = self
|
||||
.bg_cache
|
||||
.entry(source_view.global_id())
|
||||
.or_insert_with(|| {
|
||||
graph
|
||||
.device()
|
||||
.create_bind_group(&wgpu::BindGroupDescriptor {
|
||||
label: Some("tint_bg"),
|
||||
layout: self.bgl.as_ref().unwrap(),
|
||||
entries: &[
|
||||
wgpu::BindGroupEntry {
|
||||
binding: 0,
|
||||
resource: wgpu::BindingResource::TextureView(source_view),
|
||||
},
|
||||
wgpu::BindGroupEntry {
|
||||
binding: 1,
|
||||
resource: wgpu::BindingResource::Sampler(
|
||||
self.target_sampler.as_ref().unwrap(),
|
||||
),
|
||||
},
|
||||
],
|
||||
})
|
||||
});
|
||||
|
||||
{
|
||||
let encoder = context.encoder.as_mut().unwrap();
|
||||
let mut pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
|
||||
label: Some("tint_pass"),
|
||||
color_attachments: &[Some(wgpu::RenderPassColorAttachment {
|
||||
view: dest_view,
|
||||
resolve_target: None,
|
||||
ops: wgpu::Operations {
|
||||
load: wgpu::LoadOp::Load,
|
||||
store: wgpu::StoreOp::Store,
|
||||
},
|
||||
})],
|
||||
depth_stencil_attachment: None,
|
||||
timestamp_writes: None,
|
||||
occlusion_query_set: None,
|
||||
});
|
||||
pass.set_pipeline(pipeline.as_render());
|
||||
|
||||
pass.set_bind_group(0, bg, &[]);
|
||||
pass.draw(0..3, 0..1);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,212 +0,0 @@
|
|||
use lyra_ecs::{
|
||||
query::{
|
||||
filter::Or,
|
||||
Entities,
|
||||
},
|
||||
Component, Entity,
|
||||
};
|
||||
use lyra_game_derive::RenderGraphLabel;
|
||||
use lyra_math::Transform;
|
||||
use lyra_resource::ResHandle;
|
||||
use lyra_scene::{SceneGraph, WorldTransform};
|
||||
use tracing::debug;
|
||||
|
||||
use crate::{
|
||||
render::{
|
||||
graph::{Node, NodeDesc, NodeType},
|
||||
transform_buffer_storage::{TransformBuffers, TransformIndex},
|
||||
},
|
||||
DeltaTime,
|
||||
};
|
||||
|
||||
/// An interpolated transform.
|
||||
///
|
||||
/// This transform is interpolated between frames to make movement appear smoother when the
|
||||
/// transform is updated less often than rendering.
|
||||
#[derive(Clone, Debug, Component)]
|
||||
pub struct InterpTransform {
|
||||
last_transform: Transform,
|
||||
alpha: f32,
|
||||
}
|
||||
|
||||
#[derive(Default, Debug, Clone, Copy, Hash, RenderGraphLabel)]
|
||||
pub struct TransformsNodeLabel;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct TransformsNode {}
|
||||
|
||||
impl TransformsNode {
|
||||
pub fn new() -> Self {
|
||||
Self {}
|
||||
}
|
||||
}
|
||||
|
||||
fn process_component_queue(world: &mut lyra_ecs::World, component_queue: Vec<(Entity, Option<InterpTransform>, Option<TransformIndex>)>) {
|
||||
for (en, interp, index) in component_queue {
|
||||
println!("writing index {:?} for entity {}", index, en.id().0);
|
||||
|
||||
match (interp, index) {
|
||||
(None, None) => unreachable!(),
|
||||
(None, Some(index)) => world.insert(en, index),
|
||||
(Some(interp), None) => world.insert(en, interp),
|
||||
(Some(interp), Some(index)) => world.insert(en, (interp, index)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn update_transforms(
|
||||
device: &wgpu::Device,
|
||||
queue: &wgpu::Queue,
|
||||
limits: &wgpu::Limits,
|
||||
world: &mut lyra_ecs::World,
|
||||
delta_time: DeltaTime,
|
||||
buffers: &mut TransformBuffers,
|
||||
parent_transform: Transform,
|
||||
) {
|
||||
let mut component_queue = vec![];
|
||||
|
||||
let view = world.view_iter::<(
|
||||
Entities,
|
||||
Or<&WorldTransform, &Transform>,
|
||||
Option<&mut InterpTransform>,
|
||||
Option<&TransformIndex>,
|
||||
Option<&ResHandle<SceneGraph>>,
|
||||
)>();
|
||||
|
||||
for (entity, transform, interp_tran, transform_index, scene_graph) in view {
|
||||
// expand the transform buffers if they need to be.
|
||||
if buffers.needs_expand() {
|
||||
debug!("Expanding transform buffers");
|
||||
buffers.expand_buffers(device);
|
||||
}
|
||||
|
||||
// Get the world transform of the entity, else fall back to the transform
|
||||
let transform = match transform {
|
||||
(None, None) => unreachable!(),
|
||||
(None, Some(t)) => *t,
|
||||
(Some(wt), None) => **wt,
|
||||
// Assume world transform since it *should* be updated by world systems
|
||||
(Some(wt), Some(_)) => **wt,
|
||||
};
|
||||
// offset this transform by its parent
|
||||
let transform = transform + parent_transform;
|
||||
|
||||
// Interpolate the transform for this entity using a component.
|
||||
// If the entity does not have the component then it will be queued to be added
|
||||
// to it after all the entities are prepared for rendering.
|
||||
let transform = match interp_tran {
|
||||
Some(mut interp_transform) => {
|
||||
// found in https://youtu.be/YJB1QnEmlTs?t=472
|
||||
interp_transform.alpha = 1.0 - interp_transform.alpha.powf(*delta_time);
|
||||
|
||||
interp_transform.last_transform = interp_transform
|
||||
.last_transform
|
||||
.lerp(transform, interp_transform.alpha);
|
||||
interp_transform.last_transform
|
||||
}
|
||||
None => {
|
||||
let interp = InterpTransform {
|
||||
last_transform: transform,
|
||||
alpha: 0.5,
|
||||
};
|
||||
component_queue.push((entity, Some(interp), None));
|
||||
transform
|
||||
}
|
||||
};
|
||||
|
||||
// Get the TransformIndex from the entity, or reserve a new one if the entity doesn't have
|
||||
// the component.
|
||||
let index = match transform_index {
|
||||
Some(i) => *i,
|
||||
None => {
|
||||
let i = buffers.reserve_transform(&device);
|
||||
debug!(
|
||||
"Reserved transform index {:?} for entity {}",
|
||||
i,
|
||||
entity.id().0
|
||||
);
|
||||
|
||||
component_queue.push((entity, None, Some(i)));
|
||||
i
|
||||
}
|
||||
};
|
||||
|
||||
// TODO: only update if the transform changed.
|
||||
buffers.update(
|
||||
&queue,
|
||||
index,
|
||||
transform.calculate_mat4(),
|
||||
glam::Mat3::from_quat(transform.rotation),
|
||||
);
|
||||
|
||||
if let Some(scene) = scene_graph {
|
||||
if let Some(mut scene) = scene.data_mut() {
|
||||
|
||||
update_transforms(
|
||||
device,
|
||||
queue,
|
||||
limits,
|
||||
scene.world_mut(),
|
||||
delta_time,
|
||||
buffers,
|
||||
transform,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
process_component_queue(world, component_queue);
|
||||
}
|
||||
|
||||
impl Node for TransformsNode {
|
||||
fn desc(
|
||||
&mut self,
|
||||
_: &mut crate::render::graph::RenderGraph,
|
||||
) -> crate::render::graph::NodeDesc {
|
||||
NodeDesc::new(NodeType::Node, None, vec![])
|
||||
}
|
||||
|
||||
fn prepare(
|
||||
&mut self,
|
||||
_: &mut crate::render::graph::RenderGraph,
|
||||
world: &mut lyra_ecs::World,
|
||||
context: &mut crate::render::graph::RenderGraphContext,
|
||||
) {
|
||||
let device = &context.device;
|
||||
let queue = &context.queue;
|
||||
let render_limits = device.limits();
|
||||
|
||||
// prepare the world with resources
|
||||
if !world.has_resource::<TransformBuffers>() {
|
||||
let buffers = TransformBuffers::new(device);
|
||||
world.add_resource(buffers);
|
||||
}
|
||||
|
||||
// I have to do this weird garbage to borrow the `TransformBuffers`
|
||||
// without running into a borrow checker error from passing `world` as mutable.
|
||||
// This is safe since I know that the recursive function isn't accessing this
|
||||
// TransformBuffers, or any other ones in other worlds.
|
||||
let buffers = world.get_resource_data::<TransformBuffers>()
|
||||
.map(|r| r.clone()).unwrap();
|
||||
let mut buffers = buffers.get_mut();
|
||||
let dt = world.get_resource::<DeltaTime>().unwrap().clone();
|
||||
|
||||
update_transforms(
|
||||
&device,
|
||||
&queue,
|
||||
&render_limits,
|
||||
world,
|
||||
dt,
|
||||
&mut buffers,
|
||||
Transform::default(),
|
||||
);
|
||||
}
|
||||
|
||||
fn execute(
|
||||
&mut self,
|
||||
_: &mut crate::render::graph::RenderGraph,
|
||||
_: &crate::render::graph::NodeDesc,
|
||||
_: &mut crate::render::graph::RenderGraphContext,
|
||||
) {
|
||||
}
|
||||
}
|
|
@ -1,355 +0,0 @@
|
|||
use std::sync::Arc;
|
||||
|
||||
use tracing::debug;
|
||||
|
||||
use crate::math;
|
||||
|
||||
enum RenderTargetInner {
|
||||
Surface {
|
||||
/// The surface that will be rendered to.
|
||||
///
|
||||
/// You can create a new surface with a `'static` lifetime if you have an `Arc<Window>`:
|
||||
/// ```nobuild
|
||||
/// let window = Arc::new(window);
|
||||
/// let surface = instance.create_surface(Arc::clone(&window))?;
|
||||
/// ```
|
||||
surface: wgpu::Surface<'static>,
|
||||
/// the configuration of the surface render target..
|
||||
config: wgpu::SurfaceConfiguration,
|
||||
},
|
||||
Texture {
|
||||
/// The texture that will be rendered to.
|
||||
texture: Arc<wgpu::Texture>,
|
||||
}
|
||||
}
|
||||
|
||||
/// A render target that is a surface or a texture.
|
||||
#[repr(transparent)]
|
||||
pub struct RenderTarget(RenderTargetInner);
|
||||
|
||||
impl From<wgpu::Texture> for RenderTarget {
|
||||
fn from(value: wgpu::Texture) -> Self {
|
||||
Self(RenderTargetInner::Texture { texture: Arc::new(value) })
|
||||
}
|
||||
}
|
||||
|
||||
impl RenderTarget {
|
||||
pub fn from_surface(surface: wgpu::Surface<'static>, config: wgpu::SurfaceConfiguration) -> Self {
|
||||
Self(RenderTargetInner::Surface { surface, config })
|
||||
}
|
||||
|
||||
pub fn new_texture(device: &wgpu::Device, format: wgpu::TextureFormat, size: math::UVec2) -> Self {
|
||||
let tex = device.create_texture(&wgpu::TextureDescriptor {
|
||||
label: None,
|
||||
size: wgpu::Extent3d {
|
||||
width: size.x,
|
||||
height: size.y,
|
||||
depth_or_array_layers: 1,
|
||||
},
|
||||
mip_level_count: 1,
|
||||
sample_count: 1,
|
||||
dimension: wgpu::TextureDimension::D2,
|
||||
format,
|
||||
usage: wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::RENDER_ATTACHMENT | wgpu::TextureUsages::COPY_DST | wgpu::TextureUsages::COPY_SRC,
|
||||
view_formats: &[],
|
||||
});
|
||||
|
||||
Self(RenderTargetInner::Texture { texture: Arc::new(tex) })
|
||||
}
|
||||
|
||||
pub fn format(&self) -> wgpu::TextureFormat {
|
||||
match &self.0 {
|
||||
RenderTargetInner::Surface { config, .. } => config.format,
|
||||
RenderTargetInner::Texture { texture } => texture.format(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn size(&self) -> math::UVec2 {
|
||||
match &self.0 {
|
||||
RenderTargetInner::Surface { config, .. } => math::UVec2::new(config.width, config.height),
|
||||
RenderTargetInner::Texture { texture } => {
|
||||
let s = texture.size();
|
||||
math::UVec2::new(s.width, s.height)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the frame texture of the [`RenderTarget`]
|
||||
///
|
||||
/// If this is target is a surface and the frame texture was already retrieved from the swap
|
||||
/// chain, a [`wgpu::SurfaceError`] error will be returned.
|
||||
pub fn frame_texture(&self) -> Result<FrameTexture, wgpu::SurfaceError> {
|
||||
match &self.0 {
|
||||
RenderTargetInner::Surface { surface, .. } => Ok(FrameTexture::Surface(surface.get_current_texture()?)),
|
||||
RenderTargetInner::Texture { texture } => Ok(FrameTexture::Texture(texture.clone())),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn resize(&mut self, device: &wgpu::Device, new_size: math::UVec2) {
|
||||
match &mut self.0 {
|
||||
RenderTargetInner::Surface { surface, config } => {
|
||||
config.width = new_size.x;
|
||||
config.height = new_size.y;
|
||||
surface.configure(device, config);
|
||||
},
|
||||
RenderTargetInner::Texture { texture } => {
|
||||
let format = texture.format();
|
||||
let size = self.size();
|
||||
|
||||
*self = Self::new_texture(device, format, size);
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// Create the frame of the RenderTarget.
|
||||
///
|
||||
/// If this is target is a surface and the frame texture was already retrieved from the
|
||||
/// swap chain, a [`wgpu::SurfaceError`] error will be returned.
|
||||
pub fn create_frame(&self) -> Frame {
|
||||
let texture = self.frame_texture()
|
||||
.expect("failed to create frame texture"); // TODO: should be returned to the user
|
||||
let size = self.size();
|
||||
|
||||
Frame {
|
||||
size,
|
||||
texture,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub enum FrameTexture {
|
||||
Surface(wgpu::SurfaceTexture),
|
||||
Texture(Arc<wgpu::Texture>),
|
||||
}
|
||||
|
||||
/// Represents the current frame that is being rendered to.
|
||||
//#[allow(dead_code)]
|
||||
pub struct Frame {
|
||||
pub(crate) size: math::UVec2,
|
||||
pub(crate) texture: FrameTexture,
|
||||
}
|
||||
|
||||
impl Frame {
|
||||
pub fn texture(&self) -> &wgpu::Texture {
|
||||
match &self.texture {
|
||||
FrameTexture::Surface(s) => &s.texture,
|
||||
FrameTexture::Texture(t) => t,
|
||||
}
|
||||
}
|
||||
|
||||
/// Present the frame
|
||||
///
|
||||
/// If this frame is from a surface, it will be present, else nothing will happen.
|
||||
pub fn present(self) {
|
||||
match self.texture {
|
||||
FrameTexture::Surface(s) => s.present(),
|
||||
FrameTexture::Texture(_) => {},
|
||||
}
|
||||
}
|
||||
|
||||
/// The size of the frame
|
||||
pub fn size(&self) -> math::UVec2 {
|
||||
self.size
|
||||
}
|
||||
}
|
||||
|
||||
/// Stores the current frame, and the render target it came from.
|
||||
pub struct FrameTarget {
|
||||
pub render_target: RenderTarget,
|
||||
/// None when a frame has not been created yet
|
||||
pub frame: Option<Frame>,
|
||||
/// The view to use to render to the frame.
|
||||
pub frame_view: Option<wgpu::TextureView>,
|
||||
}
|
||||
|
||||
impl FrameTarget {
|
||||
pub fn new(render_target: RenderTarget) -> Self {
|
||||
Self {
|
||||
render_target,
|
||||
frame: None,
|
||||
frame_view: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the size of the [`RenderTarget`].
|
||||
pub fn size(&self) -> math::UVec2 {
|
||||
self.render_target.size()
|
||||
}
|
||||
|
||||
/// Returns the [`wgpu::TextureFormat`] of the [`RenderTarget`].
|
||||
pub fn format(&self) -> wgpu::TextureFormat {
|
||||
self.render_target.format()
|
||||
}
|
||||
|
||||
/// Create the frame using the inner [`RenderTarget`].
|
||||
pub fn create_frame(&mut self) -> &mut Frame {
|
||||
self.frame = Some(self.render_target.create_frame());
|
||||
self.frame.as_mut().unwrap()
|
||||
}
|
||||
|
||||
/// Create the [`wgpu::TextureView`] for the [`Frame`], storing it in self and returning a reference to it.
|
||||
pub fn create_frame_view(&mut self) -> &wgpu::TextureView {
|
||||
let frame = self.frame.as_ref().expect("frame was not created, cannot create view");
|
||||
|
||||
self.frame_view = Some(frame.texture().create_view(&wgpu::TextureViewDescriptor::default()));
|
||||
self.frame_view.as_ref().unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
pub struct TargetViewChain<'a> {
|
||||
pub source: &'a mut FrameTarget,
|
||||
pub dest: &'a mut FrameTarget,
|
||||
}
|
||||
|
||||
struct ViewChain {
|
||||
source: FrameTarget,
|
||||
dest: FrameTarget,
|
||||
/// tracks the target that is currently being presented
|
||||
active: u8,
|
||||
}
|
||||
|
||||
impl ViewChain {
|
||||
/// Returns the currently active [`FrameTarget`].
|
||||
fn active(&self) -> &FrameTarget {
|
||||
if self.active == 0 {
|
||||
&self.source
|
||||
} else if self.active == 1 {
|
||||
&self.dest
|
||||
} else {
|
||||
panic!("active chain index became invalid! ({})", self.active);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct ViewTarget {
|
||||
device: Arc<wgpu::Device>,
|
||||
/// The primary RenderTarget, likely a Surface
|
||||
pub primary: FrameTarget,
|
||||
chain: Option<ViewChain>,
|
||||
}
|
||||
|
||||
impl ViewTarget {
|
||||
pub fn new(device: Arc<wgpu::Device>, primary: RenderTarget) -> Self {
|
||||
let mut s = Self {
|
||||
device,
|
||||
primary: FrameTarget::new(primary),
|
||||
chain: None,
|
||||
};
|
||||
|
||||
s.create_chain(s.primary.format(), s.primary.size());
|
||||
s
|
||||
}
|
||||
|
||||
/// Returns the size of the target.
|
||||
pub fn size(&self) -> math::UVec2 {
|
||||
self.primary.size()
|
||||
}
|
||||
|
||||
/// Returns the [`wgpu::TextureFormat`]
|
||||
pub fn format(&self) -> wgpu::TextureFormat {
|
||||
self.primary.format()
|
||||
}
|
||||
|
||||
/// Resize all the targets, causes the chain to be recreated.
|
||||
pub fn resize(&mut self, device: &wgpu::Device, size: math::UVec2) {
|
||||
if size != self.primary.size() {
|
||||
self.primary.render_target.resize(device, size);
|
||||
self.create_chain(self.primary.format(), size);
|
||||
}
|
||||
}
|
||||
|
||||
fn create_chain(&mut self, format: wgpu::TextureFormat, size: math::UVec2) {
|
||||
debug!("Creating chain with {:?} format and {:?} size", format, size);
|
||||
|
||||
let mut source = FrameTarget::new(RenderTarget::new_texture(&self.device, format, size));
|
||||
source.create_frame();
|
||||
source.create_frame_view();
|
||||
|
||||
let mut dest = FrameTarget::new(RenderTarget::new_texture(&self.device, format, size));
|
||||
dest.create_frame();
|
||||
dest.create_frame_view();
|
||||
|
||||
self.chain = Some(ViewChain {
|
||||
source,
|
||||
dest,
|
||||
active: 0,
|
||||
});
|
||||
}
|
||||
|
||||
/// Cycle the target view chain, storing it in self, and returning a mutable borrow to it.
|
||||
pub fn get_chain(&mut self) -> TargetViewChain {
|
||||
let format = self.primary.format();
|
||||
let size = self.primary.size();
|
||||
|
||||
if let Some(chain) = &self.chain {
|
||||
// check if the chain needs to be recreated
|
||||
if chain.source.format() != format || chain.source.size() != size {
|
||||
self.create_chain(format, size);
|
||||
}
|
||||
} else {
|
||||
self.create_chain(format, size);
|
||||
}
|
||||
|
||||
let chain = self.chain.as_mut().unwrap();
|
||||
|
||||
if chain.active == 0 {
|
||||
chain.active = 1;
|
||||
TargetViewChain {
|
||||
source: &mut chain.source,
|
||||
dest: &mut chain.dest,
|
||||
}
|
||||
} else if chain.active == 1 {
|
||||
chain.active = 0;
|
||||
TargetViewChain {
|
||||
source: &mut chain.dest,
|
||||
dest: &mut chain.source,
|
||||
}
|
||||
} else {
|
||||
panic!("active chain index became invalid! ({})", chain.active);
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the [`wgpu::TextureView`] to render to.
|
||||
pub fn render_view(&self) -> &wgpu::TextureView {
|
||||
let chain = self.chain.as_ref().unwrap();
|
||||
chain.active().frame_view.as_ref().unwrap()
|
||||
}
|
||||
|
||||
/// Copy the chain target to the primary target
|
||||
///
|
||||
/// The primary target must have `wgpu::TextureUsages::COPY_DST`. This also resets the active
|
||||
/// chain texture.
|
||||
pub fn copy_to_primary(&mut self, encoder: &mut wgpu::CommandEncoder) {
|
||||
let chain = self.chain.as_mut().unwrap();
|
||||
let active_tex = chain.active().frame.as_ref().unwrap().texture();
|
||||
|
||||
let active_copy = wgpu::ImageCopyTexture {
|
||||
texture: active_tex,
|
||||
mip_level: 0,
|
||||
origin: wgpu::Origin3d::ZERO,
|
||||
aspect: wgpu::TextureAspect::All,
|
||||
};
|
||||
|
||||
let dest_tex = self.primary.frame.as_ref().unwrap().texture();
|
||||
let dest_copy = wgpu::ImageCopyTexture {
|
||||
texture: dest_tex,
|
||||
mip_level: 0,
|
||||
origin: wgpu::Origin3d::ZERO,
|
||||
aspect: wgpu::TextureAspect::All,
|
||||
};
|
||||
|
||||
let size = self.primary.size();
|
||||
let size = wgpu::Extent3d {
|
||||
width: size.x,
|
||||
height: size.y,
|
||||
depth_or_array_layers: 1,
|
||||
};
|
||||
|
||||
encoder.copy_texture_to_texture(active_copy, dest_copy, size);
|
||||
|
||||
// reset active texture after a render
|
||||
// must get the chain again because of the borrow checker
|
||||
let chain = self.chain.as_mut().unwrap();
|
||||
chain.active = 0;
|
||||
}
|
||||
}
|
|
@ -1,210 +0,0 @@
|
|||
use std::mem;
|
||||
|
||||
#[derive(Clone, Debug, Eq, PartialEq)]
|
||||
pub struct TextureViewDescriptor {
|
||||
/// The label of the texture that this view will be created from.
|
||||
pub texture_label: String,
|
||||
/// Format of the texture view. At this time, it must be the same as the underlying format of the texture.
|
||||
pub format: Option<wgpu::TextureFormat>,
|
||||
/// The dimension of the texture view. For 1D textures, this must be `D1`. For 2D textures it must be one of
|
||||
/// `D2`, `D2Array`, `Cube`, and `CubeArray`. For 3D textures it must be `D3`
|
||||
pub dimension: Option<wgpu::TextureViewDimension>,
|
||||
/// Aspect of the texture. Color textures must be [`TextureAspect::All`].
|
||||
pub aspect: wgpu::TextureAspect,
|
||||
/// Base mip level.
|
||||
pub base_mip_level: u32,
|
||||
/// Mip level count.
|
||||
/// If `Some(count)`, `base_mip_level + count` must be less or equal to underlying texture mip count.
|
||||
/// If `None`, considered to include the rest of the mipmap levels, but at least 1 in total.
|
||||
pub mip_level_count: Option<u32>,
|
||||
/// Base array layer.
|
||||
pub base_array_layer: u32,
|
||||
/// Layer count.
|
||||
/// If `Some(count)`, `base_array_layer + count` must be less or equal to the underlying array count.
|
||||
/// If `None`, considered to include the rest of the array layers, but at least 1 in total.
|
||||
pub array_layer_count: Option<u32>,
|
||||
}
|
||||
|
||||
impl TextureViewDescriptor {
|
||||
pub fn default_view(texture_label: &str) -> Self {
|
||||
let d = wgpu::TextureViewDescriptor::default();
|
||||
|
||||
Self {
|
||||
texture_label: texture_label.to_string(),
|
||||
format: d.format,
|
||||
dimension: d.dimension,
|
||||
aspect: d.aspect,
|
||||
base_array_layer: d.base_array_layer,
|
||||
base_mip_level: d.base_mip_level,
|
||||
mip_level_count: d.mip_level_count,
|
||||
array_layer_count: d.array_layer_count,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn as_wgpu<'a>(&self, label: Option<&'a str>) -> wgpu::TextureViewDescriptor<'a> {
|
||||
wgpu::TextureViewDescriptor {
|
||||
label,
|
||||
format: self.format,
|
||||
dimension: self.dimension,
|
||||
aspect: self.aspect,
|
||||
base_mip_level: self.base_mip_level,
|
||||
mip_level_count: self.mip_level_count,
|
||||
base_array_layer: self.base_array_layer,
|
||||
array_layer_count: self.array_layer_count,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub struct SamplerDescriptor {
|
||||
/// The label of the texture that this view will be created from.
|
||||
pub texture_label: String,
|
||||
/// How to deal with out of bounds accesses in the u (i.e. x) direction
|
||||
pub address_mode_u: wgpu::AddressMode,
|
||||
/// How to deal with out of bounds accesses in the v (i.e. y) direction
|
||||
pub address_mode_v: wgpu::AddressMode,
|
||||
/// How to deal with out of bounds accesses in the w (i.e. z) direction
|
||||
pub address_mode_w: wgpu::AddressMode,
|
||||
/// How to filter the texture when it needs to be magnified (made larger)
|
||||
pub mag_filter: wgpu::FilterMode,
|
||||
/// How to filter the texture when it needs to be minified (made smaller)
|
||||
pub min_filter: wgpu::FilterMode,
|
||||
/// How to filter between mip map levels
|
||||
pub mipmap_filter: wgpu::FilterMode,
|
||||
/// Minimum level of detail (i.e. mip level) to use
|
||||
pub lod_min_clamp: f32,
|
||||
/// Maximum level of detail (i.e. mip level) to use
|
||||
pub lod_max_clamp: f32,
|
||||
/// If this is enabled, this is a comparison sampler using the given comparison function.
|
||||
pub compare: Option<wgpu::CompareFunction>,
|
||||
/// Valid values: 1, 2, 4, 8, and 16.
|
||||
pub anisotropy_clamp: u16,
|
||||
/// Border color to use when address_mode is [`AddressMode::ClampToBorder`]
|
||||
pub border_color: Option<wgpu::SamplerBorderColor>,
|
||||
}
|
||||
|
||||
impl SamplerDescriptor {
|
||||
pub fn default_sampler(texture_label: &str) -> Self {
|
||||
let d = wgpu::SamplerDescriptor::default();
|
||||
|
||||
Self {
|
||||
texture_label: texture_label.to_string(),
|
||||
address_mode_u: d.address_mode_u,
|
||||
address_mode_v: d.address_mode_v,
|
||||
address_mode_w: d.address_mode_w,
|
||||
mag_filter: d.mag_filter,
|
||||
min_filter: d.min_filter,
|
||||
mipmap_filter: d.mipmap_filter,
|
||||
lod_min_clamp: d.lod_min_clamp,
|
||||
lod_max_clamp: d.lod_max_clamp,
|
||||
compare: d.compare,
|
||||
anisotropy_clamp: d.anisotropy_clamp,
|
||||
border_color: d.border_color,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
|
||||
pub struct TextureDescriptor {
|
||||
/// Size of the texture. All components must be greater than zero. For a
|
||||
/// regular 1D/2D texture, the unused sizes will be 1. For 2DArray textures,
|
||||
/// Z is the number of 2D textures in that array.
|
||||
pub size: wgpu::Extent3d,
|
||||
/// Mip count of texture. For a texture with no extra mips, this must be 1.
|
||||
pub mip_level_count: u32,
|
||||
/// Sample count of texture. If this is not 1, texture must have [`BindingType::Texture::multisampled`] set to true.
|
||||
pub sample_count: u32,
|
||||
/// Dimensions of the texture.
|
||||
pub dimension: wgpu::TextureDimension,
|
||||
/// Format of the texture.
|
||||
pub format: wgpu::TextureFormat,
|
||||
/// Allowed usages of the texture. If used in other ways, the operation will panic.
|
||||
pub usage: wgpu::TextureUsages,
|
||||
/// Specifies what view formats will be allowed when calling create_view() on this texture.
|
||||
///
|
||||
/// View formats of the same format as the texture are always allowed.
|
||||
///
|
||||
/// Note: currently, only the srgb-ness is allowed to change. (ex: Rgba8Unorm texture + Rgba8UnormSrgb view)
|
||||
pub view_formats: Vec<wgpu::TextureFormat>,
|
||||
}
|
||||
|
||||
impl TextureDescriptor {
|
||||
pub fn as_wgpu<'a>(&'a self, label: Option<&'a str>) -> wgpu::TextureDescriptor<'a> {
|
||||
wgpu::TextureDescriptor {
|
||||
label,
|
||||
size: self.size,
|
||||
mip_level_count: self.mip_level_count,
|
||||
sample_count: self.sample_count,
|
||||
dimension: self.dimension,
|
||||
format: self.format,
|
||||
usage: self.usage,
|
||||
view_formats: &self.view_formats,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
|
||||
pub struct BufferDescriptor {
|
||||
/// Size of a buffer.
|
||||
pub size: wgpu::BufferAddress,
|
||||
/// Usages of a buffer. If the buffer is used in any way that isn't specified here, the operation
|
||||
/// will panic.
|
||||
pub usage: wgpu::BufferUsages,
|
||||
/// Allows a buffer to be mapped immediately after they are made. It does not have to be [`BufferUsages::MAP_READ`] or
|
||||
/// [`BufferUsages::MAP_WRITE`], all buffers are allowed to be mapped at creation.
|
||||
///
|
||||
/// If this is `true`, [`size`](#structfield.size) must be a multiple of
|
||||
/// [`COPY_BUFFER_ALIGNMENT`].
|
||||
pub mapped_at_creation: bool,
|
||||
}
|
||||
|
||||
impl BufferDescriptor {
|
||||
pub fn new<T: Sized>(usage: wgpu::BufferUsages, mapped_at_creation: bool) -> Self {
|
||||
Self {
|
||||
size: mem::size_of::<T>() as _,
|
||||
usage,
|
||||
mapped_at_creation,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn as_wgpu<'a>(&self, label: Option<&'a str>) -> wgpu::BufferDescriptor<'a> {
|
||||
wgpu::BufferDescriptor {
|
||||
label,
|
||||
size: self.size,
|
||||
usage: self.usage,
|
||||
mapped_at_creation: self.mapped_at_creation,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
|
||||
pub struct BufferInitDescriptor {
|
||||
/// Debug label of a buffer. This will show up in graphics debuggers for easy identification.
|
||||
pub label: Option<String>,
|
||||
/// Contents of a buffer on creation.
|
||||
pub contents: Vec<u8>,
|
||||
/// Usages of a buffer. If the buffer is used in any way that isn't specified here, the operation
|
||||
/// will panic.
|
||||
pub usage: wgpu::BufferUsages,
|
||||
}
|
||||
|
||||
impl BufferInitDescriptor {
|
||||
pub fn new<T: bytemuck::Pod>(label: Option<&str>, data: &T, usage: wgpu::BufferUsages) -> Self {
|
||||
Self {
|
||||
label: label.map(|s| s.to_string()),
|
||||
contents: bytemuck::bytes_of(data).to_vec(),
|
||||
usage,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn as_wgpu<'a>(&'a self, label: Option<&'a str>) -> wgpu::util::BufferInitDescriptor<'a> {
|
||||
wgpu::util::BufferInitDescriptor {
|
||||
label,
|
||||
contents: &self.contents,
|
||||
usage: self.usage,
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,18 +0,0 @@
|
|||
use lyra_ecs::Component;
|
||||
|
||||
#[derive(Debug, Clone, Component)]
|
||||
pub struct DirectionalLight {
|
||||
pub enabled: bool,
|
||||
pub color: glam::Vec3,
|
||||
pub intensity: f32,
|
||||
}
|
||||
|
||||
impl Default for DirectionalLight {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
enabled: true,
|
||||
color: glam::Vec3::new(1.0, 1.0, 1.0),
|
||||
intensity: 1.0,
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,359 +0,0 @@
|
|||
pub mod directional;
|
||||
pub mod point;
|
||||
pub mod spotlight;
|
||||
|
||||
use lyra_ecs::{Entity, Tick, World};
|
||||
pub use point::*;
|
||||
pub use spotlight::*;
|
||||
|
||||
use std::{
|
||||
collections::{HashMap, VecDeque},
|
||||
marker::PhantomData,
|
||||
mem,
|
||||
sync::Arc,
|
||||
};
|
||||
|
||||
use crate::math::Transform;
|
||||
|
||||
use self::directional::DirectionalLight;
|
||||
|
||||
use super::graph::LightShadowMapId;
|
||||
|
||||
const MAX_LIGHT_COUNT: usize = 16;
|
||||
|
||||
/// A struct that stores a list of lights in a wgpu::Buffer.
|
||||
pub struct LightBuffer<U: Default + bytemuck::Pod + bytemuck::Zeroable> {
|
||||
_phantom: PhantomData<U>,
|
||||
/// The max amount of light casters that could fit in this buffer.
|
||||
pub max_count: usize,
|
||||
/// The amount of light casters that are taking up space in the buffer.
|
||||
///
|
||||
/// This means that a light may be inactive in the buffer, by being replaced
|
||||
/// with a default caster as to not affect lighting. Its easier this way than
|
||||
/// to recreate the array and remove the gaps.
|
||||
pub buffer_count: usize,
|
||||
/// The buffer index for a specific entity/caster.
|
||||
used_indexes: HashMap<Entity, usize>,
|
||||
/// Indexes that were being used but are no longer needed.
|
||||
dead_indexes: VecDeque<usize>,
|
||||
}
|
||||
|
||||
impl<U: Default + bytemuck::Pod + bytemuck::Zeroable> LightBuffer<U> {
|
||||
pub fn new(max_count: usize) -> Self {
|
||||
Self {
|
||||
_phantom: PhantomData,
|
||||
max_count,
|
||||
buffer_count: 0,
|
||||
used_indexes: HashMap::new(),
|
||||
dead_indexes: VecDeque::new(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn has_light(&self, entity: Entity) -> bool {
|
||||
self.used_indexes.contains_key(&entity)
|
||||
}
|
||||
|
||||
/// Update an existing light in the light buffer.
|
||||
pub fn update_light(
|
||||
&mut self,
|
||||
lights_buffer: &mut [U; MAX_LIGHT_COUNT],
|
||||
entity: Entity,
|
||||
light: U,
|
||||
) {
|
||||
let buffer_idx = *self
|
||||
.used_indexes
|
||||
.get(&entity)
|
||||
.expect("Entity for Light is not in buffer!");
|
||||
|
||||
lights_buffer[buffer_idx] = light;
|
||||
}
|
||||
|
||||
/// Add a new light to the light buffer.
|
||||
pub fn add_light(
|
||||
&mut self,
|
||||
lights_buffer: &mut [U; MAX_LIGHT_COUNT],
|
||||
entity: Entity,
|
||||
light: U,
|
||||
) {
|
||||
let buffer_idx = match self.dead_indexes.pop_front() {
|
||||
Some(i) => i,
|
||||
None => {
|
||||
let i = self.buffer_count;
|
||||
self.buffer_count += 1;
|
||||
|
||||
// If this assert triggers, you are hitting trying to exceed
|
||||
// the max amount of lights
|
||||
assert!(self.buffer_count <= self.max_count);
|
||||
|
||||
i
|
||||
}
|
||||
};
|
||||
|
||||
self.used_indexes.insert(entity, buffer_idx);
|
||||
self.update_light(lights_buffer, entity, light);
|
||||
}
|
||||
|
||||
/// Update, or add a new caster, to the light buffer.
|
||||
pub fn update_or_add(
|
||||
&mut self,
|
||||
lights_buffer: &mut [U; MAX_LIGHT_COUNT],
|
||||
entity: Entity,
|
||||
light: U,
|
||||
) {
|
||||
if self.used_indexes.contains_key(&entity) {
|
||||
self.update_light(lights_buffer, entity, light);
|
||||
} else {
|
||||
self.add_light(lights_buffer, entity, light);
|
||||
}
|
||||
}
|
||||
|
||||
/// Remove a caster from the buffer, returns true if it was removed.
|
||||
pub fn remove_light(
|
||||
&mut self,
|
||||
lights_buffer: &mut [U; MAX_LIGHT_COUNT],
|
||||
entity: Entity,
|
||||
) -> bool {
|
||||
if let Some(removed_idx) = self.used_indexes.remove(&entity) {
|
||||
self.dead_indexes.push_back(removed_idx);
|
||||
//self.current_count -= 1;
|
||||
lights_buffer[removed_idx] = U::default();
|
||||
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) struct LightUniformBuffers {
|
||||
pub buffer: Arc<wgpu::Buffer>,
|
||||
pub bind_group: Arc<wgpu::BindGroup>,
|
||||
pub bind_group_layout: Arc<wgpu::BindGroupLayout>,
|
||||
max_light_count: u64,
|
||||
}
|
||||
|
||||
impl LightUniformBuffers {
|
||||
pub fn new(device: &wgpu::Device) -> Self {
|
||||
let limits = device.limits();
|
||||
// TODO: ensure we dont write over this limit
|
||||
let max_buffer_sizes = (limits.max_uniform_buffer_binding_size as u64) / 2;
|
||||
|
||||
let buffer = device.create_buffer(&wgpu::BufferDescriptor {
|
||||
label: Some("UBO_Lights"),
|
||||
usage: wgpu::BufferUsages::STORAGE | wgpu::BufferUsages::COPY_DST,
|
||||
size: max_buffer_sizes,
|
||||
mapped_at_creation: false,
|
||||
});
|
||||
|
||||
let bindgroup_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
|
||||
entries: &[wgpu::BindGroupLayoutEntry {
|
||||
binding: 0,
|
||||
visibility: wgpu::ShaderStages::FRAGMENT | wgpu::ShaderStages::COMPUTE,
|
||||
ty: wgpu::BindingType::Buffer {
|
||||
ty: wgpu::BufferBindingType::Storage { read_only: true },
|
||||
has_dynamic_offset: false,
|
||||
min_binding_size: None,
|
||||
},
|
||||
count: None,
|
||||
}],
|
||||
label: Some("BGL_Lights"),
|
||||
});
|
||||
|
||||
let bindgroup = device.create_bind_group(&wgpu::BindGroupDescriptor {
|
||||
layout: &bindgroup_layout,
|
||||
entries: &[wgpu::BindGroupEntry {
|
||||
binding: 0,
|
||||
resource: wgpu::BindingResource::Buffer(wgpu::BufferBinding {
|
||||
buffer: &buffer,
|
||||
offset: 0,
|
||||
size: None, // use the full buffer
|
||||
}),
|
||||
}],
|
||||
label: Some("BG_Lights"),
|
||||
});
|
||||
|
||||
Self {
|
||||
buffer: Arc::new(buffer),
|
||||
bind_group: Arc::new(bindgroup),
|
||||
bind_group_layout: Arc::new(bindgroup_layout),
|
||||
max_light_count: max_buffer_sizes / mem::size_of::<LightUniform>() as u64,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn update_lights(&mut self, queue: &wgpu::Queue, world_tick: Tick, world: &World) {
|
||||
let _ = world_tick;
|
||||
let mut lights = vec![];
|
||||
|
||||
for (point_light, transform, shadow_map_id) in
|
||||
world.view_iter::<(&PointLight, &Transform, Option<&LightShadowMapId>)>()
|
||||
{
|
||||
let shadow_map_id = shadow_map_id.map(|m| m.clone());
|
||||
let uniform =
|
||||
LightUniform::from_point_light_bundle(&point_light, &transform, shadow_map_id);
|
||||
lights.push(uniform);
|
||||
}
|
||||
|
||||
for (spot_light, transform, shadow_map_id) in
|
||||
world.view_iter::<(&SpotLight, &Transform, Option<&LightShadowMapId>)>()
|
||||
{
|
||||
let shadow_map_id = shadow_map_id.map(|m| m.clone());
|
||||
let uniform =
|
||||
LightUniform::from_spot_light_bundle(&spot_light, &transform, shadow_map_id);
|
||||
lights.push(uniform);
|
||||
}
|
||||
|
||||
for (dir_light, transform, shadow_map_id) in
|
||||
world.view_iter::<(&DirectionalLight, &Transform, Option<&LightShadowMapId>)>()
|
||||
{
|
||||
let shadow_map_id = shadow_map_id.map(|m| m.clone());
|
||||
let uniform =
|
||||
LightUniform::from_directional_bundle(&dir_light, &transform, shadow_map_id);
|
||||
lights.push(uniform);
|
||||
}
|
||||
|
||||
assert!(lights.len() < self.max_light_count as usize); // ensure we dont overwrite the buffer
|
||||
|
||||
// write the amount of lights to the buffer, and right after that the list of lights.
|
||||
queue.write_buffer(&self.buffer, 0, bytemuck::cast_slice(&[lights.len()]));
|
||||
// the size of u32 is multiplied by 4 because of gpu alignment requirements
|
||||
queue.write_buffer(
|
||||
&self.buffer,
|
||||
mem::size_of::<u32>() as u64 * 4,
|
||||
bytemuck::cast_slice(lights.as_slice()),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Default, Debug, Copy, Clone)]
|
||||
pub(crate) enum LightType {
|
||||
#[default]
|
||||
Directional = 0,
|
||||
Point = 1,
|
||||
Spotlight = 2,
|
||||
}
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Default, Debug, Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
|
||||
pub(crate) struct LightUniform {
|
||||
pub position: glam::Vec3,
|
||||
pub light_type: u32, // enum LightType
|
||||
pub direction: glam::Vec3,
|
||||
pub enabled: u32, // bool
|
||||
pub color: glam::Vec3,
|
||||
// no padding is needed here since range acts as the padding
|
||||
// that would usually be needed for the vec3
|
||||
pub range: f32,
|
||||
pub intensity: f32,
|
||||
pub smoothness: f32,
|
||||
|
||||
pub spot_cutoff_rad: f32,
|
||||
pub spot_outer_cutoff_rad: f32,
|
||||
pub light_shadow_uniform_index: [i32; 6],
|
||||
_padding: [u32; 2],
|
||||
}
|
||||
|
||||
impl LightUniform {
|
||||
pub fn from_point_light_bundle(
|
||||
light: &PointLight,
|
||||
transform: &Transform,
|
||||
map_id: Option<LightShadowMapId>,
|
||||
) -> Self {
|
||||
Self {
|
||||
light_type: LightType::Point as u32,
|
||||
enabled: light.enabled as u32,
|
||||
position: transform.translation,
|
||||
direction: transform.forward(),
|
||||
color: light.color,
|
||||
|
||||
range: light.range,
|
||||
intensity: light.intensity,
|
||||
smoothness: light.smoothness,
|
||||
|
||||
spot_cutoff_rad: 0.0,
|
||||
spot_outer_cutoff_rad: 0.0,
|
||||
light_shadow_uniform_index: map_id
|
||||
.map(|m| {
|
||||
[
|
||||
m.uniform_index(0) as i32,
|
||||
m.uniform_index(1) as i32,
|
||||
m.uniform_index(2) as i32,
|
||||
m.uniform_index(3) as i32,
|
||||
m.uniform_index(4) as i32,
|
||||
m.uniform_index(5) as i32,
|
||||
]
|
||||
})
|
||||
.unwrap_or([-1; 6]),
|
||||
_padding: [0; 2],
|
||||
}
|
||||
}
|
||||
|
||||
pub fn from_directional_bundle(
|
||||
light: &DirectionalLight,
|
||||
transform: &Transform,
|
||||
map_id: Option<LightShadowMapId>,
|
||||
) -> Self {
|
||||
Self {
|
||||
light_type: LightType::Directional as u32,
|
||||
enabled: light.enabled as u32,
|
||||
position: transform.translation,
|
||||
direction: transform.forward(),
|
||||
color: light.color,
|
||||
|
||||
range: 0.0,
|
||||
intensity: light.intensity,
|
||||
smoothness: 0.0,
|
||||
|
||||
spot_cutoff_rad: 0.0,
|
||||
spot_outer_cutoff_rad: 0.0,
|
||||
light_shadow_uniform_index: map_id
|
||||
.map(|m| {
|
||||
[
|
||||
m.uniform_index(0) as i32,
|
||||
m.uniform_index(1) as i32,
|
||||
m.uniform_index(2) as i32,
|
||||
m.uniform_index(3) as i32,
|
||||
m.uniform_index(4) as i32,
|
||||
m.uniform_index(5) as i32,
|
||||
]
|
||||
})
|
||||
.unwrap_or([-1; 6]),
|
||||
_padding: [0; 2],
|
||||
}
|
||||
}
|
||||
|
||||
// Create the SpotLightUniform from an ECS bundle
|
||||
pub fn from_spot_light_bundle(
|
||||
light: &SpotLight,
|
||||
transform: &Transform,
|
||||
map_id: Option<LightShadowMapId>,
|
||||
) -> Self {
|
||||
Self {
|
||||
light_type: LightType::Spotlight as u32,
|
||||
enabled: light.enabled as u32,
|
||||
position: transform.translation,
|
||||
direction: transform.forward(),
|
||||
color: light.color,
|
||||
|
||||
range: light.range,
|
||||
intensity: light.intensity,
|
||||
smoothness: light.smoothness,
|
||||
|
||||
spot_cutoff_rad: light.cutoff.to_radians(),
|
||||
spot_outer_cutoff_rad: light.outer_cutoff.to_radians(),
|
||||
light_shadow_uniform_index: map_id
|
||||
.map(|m| {
|
||||
[
|
||||
m.uniform_index(0) as i32,
|
||||
m.uniform_index(1) as i32,
|
||||
m.uniform_index(2) as i32,
|
||||
m.uniform_index(3) as i32,
|
||||
m.uniform_index(4) as i32,
|
||||
m.uniform_index(5) as i32,
|
||||
]
|
||||
})
|
||||
.unwrap_or([-1; 6]),
|
||||
_padding: [0; 2],
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,22 +0,0 @@
|
|||
use lyra_ecs::Component;
|
||||
|
||||
#[derive(Debug, Clone, Component)]
|
||||
pub struct PointLight {
|
||||
pub enabled: bool,
|
||||
pub color: glam::Vec3,
|
||||
pub range: f32,
|
||||
pub intensity: f32,
|
||||
pub smoothness: f32,
|
||||
}
|
||||
|
||||
impl Default for PointLight {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
enabled: true,
|
||||
color: glam::Vec3::new(1.0, 1.0, 1.0),
|
||||
range: 1.0,
|
||||
intensity: 1.0,
|
||||
smoothness: 0.75,
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,29 +0,0 @@
|
|||
use lyra_ecs::Component;
|
||||
|
||||
use crate::math;
|
||||
|
||||
#[derive(Debug, Clone, Component)]
|
||||
pub struct SpotLight {
|
||||
pub enabled: bool,
|
||||
pub color: glam::Vec3,
|
||||
pub range: f32,
|
||||
pub intensity: f32,
|
||||
pub smoothness: f32,
|
||||
pub cutoff: math::Angle,
|
||||
pub outer_cutoff: math::Angle,
|
||||
}
|
||||
|
||||
impl Default for SpotLight {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
enabled: true,
|
||||
color: glam::Vec3::new(1.0, 1.0, 1.0),
|
||||
range: 1.0,
|
||||
intensity: 1.0,
|
||||
smoothness: 0.75,
|
||||
|
||||
cutoff: math::Angle::Degrees(45.0),
|
||||
outer_cutoff: math::Angle::Degrees(45.0),
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,247 +0,0 @@
|
|||
use std::{borrow::Cow, mem, rc::Rc};
|
||||
|
||||
use glam::UVec2;
|
||||
use tracing::instrument;
|
||||
use wgpu::{util::DeviceExt, ComputePipeline};
|
||||
use winit::dpi::PhysicalSize;
|
||||
|
||||
use super::{light::LightUniformBuffers, render_buffer::{BindGroupPair, BufferWrapper}, texture::RenderTexture};
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub(crate) struct LightIndicesGridBuffer {
|
||||
index_counter_buffer: wgpu::Buffer,
|
||||
indices_buffer: wgpu::Buffer,
|
||||
grid_texture: wgpu::Texture,
|
||||
grid_texture_view: wgpu::TextureView,
|
||||
pub bg_pair: BindGroupPair,
|
||||
}
|
||||
|
||||
pub(crate) struct LightCullCompute {
|
||||
device: Arc<wgpu::Device>,
|
||||
queue: Arc<wgpu::Queue>,
|
||||
pipeline: ComputePipeline,
|
||||
pub light_indices_grid: LightIndicesGridBuffer,
|
||||
screen_size_buffer: BufferWrapper,
|
||||
workgroup_size: glam::UVec2,
|
||||
}
|
||||
|
||||
impl LightCullCompute {
|
||||
/// Create the LightIndiciesGridBuffer object
|
||||
fn create_grid(device: &wgpu::Device, workgroup_size: glam::UVec2) -> LightIndicesGridBuffer {
|
||||
let mut contents = Vec::<u8>::new();
|
||||
let contents_len = workgroup_size.x * workgroup_size.y * 200 * mem::size_of::<u32>() as u32;
|
||||
contents.resize(contents_len as _, 0);
|
||||
|
||||
let light_indices_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
|
||||
label: Some("B_LightIndices"),
|
||||
contents: &contents,
|
||||
usage: wgpu::BufferUsages::STORAGE | wgpu::BufferUsages::COPY_DST,
|
||||
});
|
||||
|
||||
let light_index_counter_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
|
||||
label: Some("B_LightIndexCounter"),
|
||||
contents: &bytemuck::cast_slice(&[0]),
|
||||
usage: wgpu::BufferUsages::STORAGE | wgpu::BufferUsages::COPY_DST,
|
||||
});
|
||||
|
||||
let light_indices_bg_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
|
||||
entries: &[
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 0,
|
||||
visibility: wgpu::ShaderStages::COMPUTE | wgpu::ShaderStages::FRAGMENT,
|
||||
ty: wgpu::BindingType::Buffer {
|
||||
ty: wgpu::BufferBindingType::Storage {
|
||||
read_only: false
|
||||
},
|
||||
has_dynamic_offset: false,
|
||||
min_binding_size: None,
|
||||
},
|
||||
count: None,
|
||||
},
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 1,
|
||||
visibility: wgpu::ShaderStages::COMPUTE | wgpu::ShaderStages::FRAGMENT,
|
||||
ty: wgpu::BindingType::StorageTexture {
|
||||
access: wgpu::StorageTextureAccess::ReadWrite,
|
||||
format: wgpu::TextureFormat::Rg32Uint, // vec2<uint>
|
||||
view_dimension: wgpu::TextureViewDimension::D2
|
||||
},
|
||||
count: None,
|
||||
},
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 2,
|
||||
visibility: wgpu::ShaderStages::COMPUTE,
|
||||
ty: wgpu::BindingType::Buffer {
|
||||
ty: wgpu::BufferBindingType::Storage {
|
||||
read_only: false
|
||||
},
|
||||
has_dynamic_offset: false,
|
||||
min_binding_size: None,
|
||||
},
|
||||
count: None,
|
||||
}
|
||||
],
|
||||
label: Some("BGL_LightIndicesGrid"),
|
||||
});
|
||||
|
||||
let size = wgpu::Extent3d {
|
||||
width: workgroup_size.x,
|
||||
height: workgroup_size.y,
|
||||
depth_or_array_layers: 1,
|
||||
};
|
||||
let grid_texture = device.create_texture(
|
||||
&wgpu::TextureDescriptor {
|
||||
label: Some("Tex_LightGrid"),
|
||||
size,
|
||||
mip_level_count: 1,
|
||||
sample_count: 1,
|
||||
dimension: wgpu::TextureDimension::D2,
|
||||
format: wgpu::TextureFormat::Rg32Uint, // vec2<uint>
|
||||
usage: wgpu::TextureUsages::STORAGE_BINDING,
|
||||
view_formats: &[],
|
||||
}
|
||||
);
|
||||
|
||||
let grid_texture_view = grid_texture.create_view(&wgpu::TextureViewDescriptor {
|
||||
label: Some("TexV_LightGrid"),
|
||||
format: Some(wgpu::TextureFormat::Rg32Uint), // vec2<uint>
|
||||
dimension: Some(wgpu::TextureViewDimension::D2),
|
||||
aspect: wgpu::TextureAspect::All,
|
||||
base_mip_level: 0,
|
||||
mip_level_count: None,
|
||||
base_array_layer: 0,
|
||||
array_layer_count: None,
|
||||
});
|
||||
|
||||
let light_indices_bg = device.create_bind_group(&wgpu::BindGroupDescriptor {
|
||||
layout: &light_indices_bg_layout,
|
||||
entries: &[
|
||||
wgpu::BindGroupEntry {
|
||||
binding: 0,
|
||||
resource: wgpu::BindingResource::Buffer(
|
||||
wgpu::BufferBinding {
|
||||
buffer: &light_indices_buffer,
|
||||
offset: 0,
|
||||
size: None, // the entire light buffer is needed
|
||||
}
|
||||
)
|
||||
},
|
||||
wgpu::BindGroupEntry {
|
||||
binding: 1,
|
||||
resource: wgpu::BindingResource::TextureView(&grid_texture_view)
|
||||
},
|
||||
wgpu::BindGroupEntry {
|
||||
binding: 2,
|
||||
resource: wgpu::BindingResource::Buffer(
|
||||
wgpu::BufferBinding {
|
||||
buffer: &light_index_counter_buffer,
|
||||
offset: 0,
|
||||
size: None, // the entire light buffer is needed
|
||||
}
|
||||
)
|
||||
},
|
||||
],
|
||||
label: Some("BG_LightIndicesGrid"),
|
||||
});
|
||||
|
||||
LightIndicesGridBuffer {
|
||||
index_counter_buffer: light_index_counter_buffer,
|
||||
indices_buffer: light_indices_buffer,
|
||||
grid_texture,
|
||||
grid_texture_view,
|
||||
bg_pair: BindGroupPair::new(light_indices_bg, light_indices_bg_layout),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new(device: Arc<wgpu::Device>, queue: Arc<wgpu::Queue>, screen_size: PhysicalSize<u32>, lights_buffers: &LightUniformBuffers, camera_buffers: &BufferWrapper, depth_texture: &mut RenderTexture) -> Self {
|
||||
let screen_size_buffer = BufferWrapper::builder()
|
||||
.buffer_usage(wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST)
|
||||
.label_prefix("ScreenSize")
|
||||
.visibility(wgpu::ShaderStages::COMPUTE)
|
||||
.buffer_dynamic_offset(false)
|
||||
.contents(&[UVec2::new(screen_size.width, screen_size.height)])
|
||||
.finish(&device);
|
||||
|
||||
let shader_src = include_str!("shaders/light_cull.comp.wgsl");
|
||||
let shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
|
||||
label: Some("LightCullCompute"),
|
||||
source: wgpu::ShaderSource::Wgsl(Cow::Borrowed(shader_src)),
|
||||
});
|
||||
|
||||
let workgroup_size = glam::UVec2::new((screen_size.width as f32 / 16.0).ceil() as u32,
|
||||
(screen_size.height as f32 / 16.0).ceil() as u32);
|
||||
let light_grid = Self::create_grid(&device, workgroup_size);
|
||||
|
||||
let depth_tex_pair = depth_texture.create_bind_group(&device);
|
||||
|
||||
let layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
|
||||
label: Some("PipeLay_LightCull"),
|
||||
bind_group_layouts: &[
|
||||
&depth_tex_pair.layout,
|
||||
&camera_buffers.bindgroup_layout().unwrap(),
|
||||
&lights_buffers.bind_group_pair.layout,
|
||||
&light_grid.bg_pair.layout,
|
||||
screen_size_buffer.bindgroup_layout().unwrap(),
|
||||
],
|
||||
push_constant_ranges: &[],
|
||||
});
|
||||
|
||||
let pipeline = device.create_compute_pipeline(&wgpu::ComputePipelineDescriptor {
|
||||
label: Some("Pipe_LightCull"),
|
||||
layout: Some(&layout),
|
||||
module: &shader,
|
||||
entry_point: "cs_main",
|
||||
});
|
||||
|
||||
Self {
|
||||
device,
|
||||
queue,
|
||||
pipeline,
|
||||
light_indices_grid: light_grid,
|
||||
screen_size_buffer,
|
||||
workgroup_size,
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(skip(self))]
|
||||
pub fn update_screen_size(&mut self, size: PhysicalSize<u32>) {
|
||||
self.screen_size_buffer.write_buffer(&self.queue, 0,
|
||||
&[UVec2::new(size.width, size.height)]);
|
||||
self.workgroup_size = glam::UVec2::new((size.width as f32 / 16.0).ceil() as u32,
|
||||
(size.height as f32 / 16.0).ceil() as u32);
|
||||
|
||||
// I hate that the entire bind group is recreated on a resize but its the only way :(
|
||||
self.light_indices_grid = Self::create_grid(&self.device, self.workgroup_size);
|
||||
}
|
||||
|
||||
#[instrument(skip(self, camera_buffers, lights_buffers, depth_texture))]
|
||||
pub fn compute(&mut self, camera_buffers: &BufferWrapper, lights_buffers: &LightUniformBuffers, depth_texture: &RenderTexture) {
|
||||
self.cleanup();
|
||||
let mut encoder = self.device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
|
||||
label: Some("LightCullCompute"),
|
||||
});
|
||||
|
||||
{
|
||||
let mut pass = encoder.begin_compute_pass(&wgpu::ComputePassDescriptor {
|
||||
label: Some("Pass_LightCull"),
|
||||
});
|
||||
|
||||
pass.set_pipeline(&self.pipeline);
|
||||
|
||||
pass.set_bind_group(0, depth_texture.bind_group(), &[]);
|
||||
pass.set_bind_group(1, &camera_buffers.bindgroup(), &[]);
|
||||
pass.set_bind_group(2, &lights_buffers.bind_group_pair.bindgroup, &[]);
|
||||
pass.set_bind_group(3, &self.light_indices_grid.bg_pair.bindgroup, &[]);
|
||||
pass.set_bind_group(4, self.screen_size_buffer.bindgroup(), &[]);
|
||||
|
||||
pass.dispatch_workgroups(self.workgroup_size.x, self.workgroup_size.y, 1);
|
||||
}
|
||||
|
||||
self.queue.submit(std::iter::once(encoder.finish()));
|
||||
//self.device.poll(wgpu::Maintain::Wait);
|
||||
}
|
||||
|
||||
pub fn cleanup(&mut self) {
|
||||
self.queue.write_buffer(&self.light_indices_grid.index_counter_buffer, 0, &bytemuck::cast_slice(&[0]));
|
||||
}
|
||||
}
|
|
@ -1,292 +0,0 @@
|
|||
use std::cell::RefCell;
|
||||
use std::collections::VecDeque;
|
||||
use std::ops::{Deref, DerefMut};
|
||||
use std::rc::Rc;
|
||||
use std::sync::Arc;
|
||||
|
||||
use lyra_ecs::World;
|
||||
use lyra_game_derive::RenderGraphLabel;
|
||||
use tracing::{debug, instrument, warn};
|
||||
use winit::window::Window;
|
||||
|
||||
use crate::render::graph::{BasePass, BasePassLabel, BasePassSlots, FxaaPass, FxaaPassLabel, LightBasePass, LightBasePassLabel, LightCullComputePass, LightCullComputePassLabel, MeshPass, MeshPrepNode, MeshPrepNodeLabel, MeshesPassLabel, PresentPass, PresentPassLabel, RenderGraphLabelValue, RenderTarget, ShadowMapsPass, ShadowMapsPassLabel, SubGraphNode, TransformsNode, TransformsNodeLabel, ViewTarget};
|
||||
|
||||
use super::graph::RenderGraph;
|
||||
use super::{resource::RenderPipeline, render_job::RenderJob};
|
||||
|
||||
use crate::math;
|
||||
|
||||
#[derive(Clone, Copy, Debug)]
|
||||
pub struct ScreenSize(glam::UVec2);
|
||||
|
||||
impl Deref for ScreenSize {
|
||||
type Target = glam::UVec2;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl DerefMut for ScreenSize {
|
||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||
&mut self.0
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, Hash, RenderGraphLabel)]
|
||||
struct TestSubGraphLabel;
|
||||
|
||||
pub trait Renderer {
|
||||
fn prepare(&mut self, main_world: &mut World);
|
||||
fn render(&mut self) -> Result<(), wgpu::SurfaceError>;
|
||||
fn on_resize(&mut self, world: &mut World, new_size: winit::dpi::PhysicalSize<u32>);
|
||||
|
||||
fn surface_size(&self) -> winit::dpi::PhysicalSize<u32>;
|
||||
fn add_render_pipeline(&mut self, shader_id: u64, pipeline: Arc<RenderPipeline>);
|
||||
}
|
||||
|
||||
pub trait RenderPass {
|
||||
fn prepare(&mut self, main_world: &mut World);
|
||||
fn render(&mut self, encoder: &mut wgpu::CommandEncoder) -> Result<(), wgpu::SurfaceError>;
|
||||
fn on_resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>);
|
||||
}
|
||||
|
||||
pub struct BasicRenderer {
|
||||
pub device: Arc<wgpu::Device>, // device does not need to be mutable, no need for refcell
|
||||
pub queue: Arc<wgpu::Queue>,
|
||||
pub size: winit::dpi::PhysicalSize<u32>,
|
||||
pub window: Arc<Window>,
|
||||
|
||||
pub clear_color: wgpu::Color,
|
||||
|
||||
pub render_pipelines: rustc_hash::FxHashMap<u64, Arc<RenderPipeline>>,
|
||||
pub render_jobs: VecDeque<RenderJob>,
|
||||
|
||||
graph: RenderGraph,
|
||||
}
|
||||
|
||||
impl BasicRenderer {
|
||||
#[instrument(skip(world, window))]
|
||||
pub async fn create_with_window(world: &mut World, window: Arc<Window>) -> BasicRenderer {
|
||||
let size = window.inner_size();
|
||||
world.add_resource(ScreenSize(glam::UVec2::new(size.width, size.height)));
|
||||
|
||||
// Get a GPU handle
|
||||
let instance = wgpu::Instance::new(wgpu::InstanceDescriptor {
|
||||
backends: wgpu::Backends::all(),
|
||||
dx12_shader_compiler: Default::default(),
|
||||
flags: wgpu::InstanceFlags::default(),
|
||||
gles_minor_version: wgpu::Gles3MinorVersion::Automatic,
|
||||
});
|
||||
|
||||
let surface: wgpu::Surface::<'static> = instance.create_surface(window.clone()).unwrap();
|
||||
|
||||
let adapter = instance.request_adapter(
|
||||
&wgpu::RequestAdapterOptions {
|
||||
power_preference: wgpu::PowerPreference::HighPerformance,
|
||||
compatible_surface: Some(&surface),
|
||||
force_fallback_adapter: false,
|
||||
},
|
||||
).await.unwrap();
|
||||
|
||||
let (device, queue) = adapter.request_device(
|
||||
&wgpu::DeviceDescriptor {
|
||||
label: None,
|
||||
required_features: wgpu::Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES | wgpu::Features::ADDRESS_MODE_CLAMP_TO_BORDER,
|
||||
// WebGL does not support all wgpu features.
|
||||
// Not sure if the engine will ever completely support WASM,
|
||||
// but its here just in case
|
||||
required_limits: if cfg!(target_arch = "wasm32") {
|
||||
wgpu::Limits::downlevel_webgl2_defaults()
|
||||
} else {
|
||||
wgpu::Limits {
|
||||
max_bind_groups: 8,
|
||||
..Default::default()
|
||||
}
|
||||
},
|
||||
memory_hints: wgpu::MemoryHints::MemoryUsage,
|
||||
},
|
||||
None,
|
||||
).await.unwrap();
|
||||
|
||||
let surface_caps = surface.get_capabilities(&adapter);
|
||||
let present_mode = surface_caps.present_modes[0];
|
||||
debug!("present mode: {:?}", present_mode);
|
||||
|
||||
let surface_format = surface_caps.formats.iter()
|
||||
.copied()
|
||||
.find(|f| f.is_srgb())
|
||||
.unwrap_or(surface_caps.formats[0]);
|
||||
let config = wgpu::SurfaceConfiguration {
|
||||
usage: wgpu::TextureUsages::RENDER_ATTACHMENT | wgpu::TextureUsages::COPY_DST,
|
||||
format: surface_format,
|
||||
width: size.width,
|
||||
height: size.height,
|
||||
present_mode: wgpu::PresentMode::default(), //wgpu::PresentMode::Mailbox, // "Fast Vsync"
|
||||
alpha_mode: surface_caps.alpha_modes[0],
|
||||
desired_maximum_frame_latency: 2,
|
||||
view_formats: vec![],
|
||||
};
|
||||
surface.configure(&device, &config);
|
||||
|
||||
let device = Arc::new(device);
|
||||
let queue = Arc::new(queue);
|
||||
|
||||
let surface_target = RenderTarget::from_surface(surface, config);
|
||||
let view_target = Rc::new(RefCell::new(ViewTarget::new(device.clone(), surface_target)));
|
||||
|
||||
let mut main_graph = RenderGraph::new(device.clone(), queue.clone(), view_target.clone());
|
||||
|
||||
debug!("Adding base pass");
|
||||
main_graph.add_node(BasePassLabel, BasePass::new());
|
||||
|
||||
{
|
||||
let mut forward_plus_graph = RenderGraph::new(device.clone(), queue.clone(), view_target.clone());
|
||||
|
||||
debug!("Adding light base pass");
|
||||
forward_plus_graph.add_node(LightBasePassLabel, LightBasePass::new());
|
||||
|
||||
debug!("Adding light cull compute pass");
|
||||
forward_plus_graph.add_node(LightCullComputePassLabel, LightCullComputePass::new(size));
|
||||
|
||||
debug!("Adding Transforms node");
|
||||
forward_plus_graph.add_node(TransformsNodeLabel, TransformsNode::new());
|
||||
|
||||
debug!("Adding shadow maps pass");
|
||||
forward_plus_graph.add_node(ShadowMapsPassLabel, ShadowMapsPass::new(&device));
|
||||
|
||||
debug!("Adding mesh prep node");
|
||||
let mesh_prep = MeshPrepNode::new(&device);
|
||||
let material_bgl = mesh_prep.material_bgl.clone();
|
||||
forward_plus_graph.add_node(MeshPrepNodeLabel, mesh_prep);
|
||||
debug!("Adding mesh pass");
|
||||
forward_plus_graph.add_node(MeshesPassLabel, MeshPass::new(material_bgl));
|
||||
forward_plus_graph.add_edge(TransformsNodeLabel, MeshPrepNodeLabel);
|
||||
|
||||
forward_plus_graph.add_edge(LightBasePassLabel, LightCullComputePassLabel);
|
||||
forward_plus_graph.add_edge(LightCullComputePassLabel, MeshesPassLabel);
|
||||
forward_plus_graph.add_edge(MeshPrepNodeLabel, MeshesPassLabel);
|
||||
|
||||
// run ShadowMapsPass after MeshPrep and before MeshesPass
|
||||
forward_plus_graph.add_edge(MeshPrepNodeLabel, ShadowMapsPassLabel);
|
||||
forward_plus_graph.add_edge(ShadowMapsPassLabel, MeshesPassLabel);
|
||||
|
||||
main_graph.add_sub_graph(TestSubGraphLabel, forward_plus_graph);
|
||||
main_graph.add_node(TestSubGraphLabel, SubGraphNode::new(TestSubGraphLabel,
|
||||
vec![
|
||||
/* RenderGraphLabelValue::from(BasePassSlots::WindowTextureView),
|
||||
RenderGraphLabelValue::from(BasePassSlots::MainRenderTarget), */
|
||||
RenderGraphLabelValue::from(BasePassSlots::DepthTexture),
|
||||
RenderGraphLabelValue::from(BasePassSlots::DepthTextureView),
|
||||
RenderGraphLabelValue::from(BasePassSlots::Camera),
|
||||
RenderGraphLabelValue::from(BasePassSlots::ScreenSize),
|
||||
]
|
||||
));
|
||||
}
|
||||
|
||||
main_graph.add_node(FxaaPassLabel, FxaaPass::default());
|
||||
main_graph.add_edge(TestSubGraphLabel, FxaaPassLabel);
|
||||
|
||||
//let present_pass_label = PresentPassLabel::new(BasePassSlots::Frame);//TintPassSlots::Frame);
|
||||
let p = PresentPass;
|
||||
main_graph.add_node(PresentPassLabel, p);
|
||||
|
||||
main_graph.add_edge(BasePassLabel, TestSubGraphLabel);
|
||||
main_graph.add_edge(TestSubGraphLabel, PresentPassLabel);
|
||||
|
||||
/* debug!("Adding base pass");
|
||||
g.add_node(BasePassLabel, BasePass::new(surface_target));
|
||||
|
||||
//debug!("Adding triangle pass");
|
||||
//g.add_node(TrianglePass::new());
|
||||
|
||||
|
||||
|
||||
debug!("Adding present pass");
|
||||
let present_pass_label = PresentPassLabel::new(BasePassSlots::Frame);//TintPassSlots::Frame);
|
||||
let p = PresentPass::from_node_label(present_pass_label.clone());
|
||||
g.add_node(p.label.clone(), p); */
|
||||
|
||||
/* debug!("adding tint pass");
|
||||
g.add_node(TintPassLabel, TintPass::new(surface_target));
|
||||
|
||||
g.add_edge(BasePassLabel, TintPassLabel);
|
||||
g.add_edge(LightCullComputePassLabel, TintPassLabel);
|
||||
g.add_edge(MeshesPassLabel, TintPassLabel);
|
||||
|
||||
g.add_edge(TintPassLabel, present_pass_label.clone());
|
||||
*/
|
||||
|
||||
/* g.add_edge(BasePassLabel, LightBasePassLabel);
|
||||
g.add_edge(LightBasePassLabel, LightCullComputePassLabel);
|
||||
g.add_edge(BasePassLabel, MeshesPassLabel);
|
||||
|
||||
g.add_edge(BasePassLabel, present_pass_label.clone());
|
||||
g.add_edge(LightCullComputePassLabel, present_pass_label.clone());
|
||||
g.add_edge(MeshesPassLabel, present_pass_label.clone()); */
|
||||
|
||||
main_graph.setup(&device);
|
||||
|
||||
Self {
|
||||
window,
|
||||
device,
|
||||
queue,
|
||||
size,
|
||||
clear_color: wgpu::Color {
|
||||
r: 0.1,
|
||||
g: 0.2,
|
||||
b: 0.3,
|
||||
a: 1.0,
|
||||
},
|
||||
render_pipelines: Default::default(),
|
||||
render_jobs: Default::default(),
|
||||
|
||||
graph: main_graph,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Renderer for BasicRenderer {
|
||||
#[instrument(skip(self, main_world))]
|
||||
fn prepare(&mut self, main_world: &mut World) {
|
||||
self.graph.prepare(main_world);
|
||||
}
|
||||
|
||||
#[instrument(skip(self))]
|
||||
fn render(&mut self) -> Result<(), wgpu::SurfaceError> {
|
||||
self.graph.render();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(skip(world, self))]
|
||||
fn on_resize(&mut self, world: &mut World, new_size: winit::dpi::PhysicalSize<u32>) {
|
||||
if new_size.width > 0 && new_size.height > 0 {
|
||||
self.size = new_size;
|
||||
|
||||
// update surface config and the surface
|
||||
/* let mut rt = self.graph.slot_value_mut(BasePassSlots::MainRenderTarget)
|
||||
.unwrap().as_render_target_mut().unwrap();
|
||||
rt.resize(&self.device, math::UVec2::new(new_size.width, new_size.height)); */
|
||||
self.graph.view_target_mut().resize(&self.device, math::UVec2::new(new_size.width, new_size.height));
|
||||
/* rt.surface_config.width = new_size.width;
|
||||
rt.surface_config.height = new_size.height;
|
||||
rt.surface.configure(&self.device, &rt.surface_config); */
|
||||
|
||||
// update screen size resource in ecs
|
||||
let mut world_ss = world.get_resource_mut::<ScreenSize>()
|
||||
.expect("world missing ScreenSize resource");
|
||||
world_ss.0 = glam::UVec2::new(new_size.width, new_size.height);
|
||||
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
fn surface_size(&self) -> winit::dpi::PhysicalSize<u32> {
|
||||
self.size
|
||||
}
|
||||
|
||||
fn add_render_pipeline(&mut self, shader_id: u64, pipeline: Arc<RenderPipeline>) {
|
||||
self.render_pipelines.insert(shader_id, pipeline);
|
||||
}
|
||||
}
|
|
@ -1,115 +0,0 @@
|
|||
use std::{ops::Deref, rc::Rc, sync::Arc};
|
||||
|
||||
use wgpu::PipelineLayout;
|
||||
|
||||
use super::{PipelineCompilationOptions, Shader};
|
||||
|
||||
//#[derive(Debug, Clone)]
|
||||
pub struct ComputePipelineDescriptor {
|
||||
pub label: Option<String>,
|
||||
pub layouts: Vec<Arc<wgpu::BindGroupLayout>>,
|
||||
// TODO: make this a ResHandle<Shader>
|
||||
/// The compiled shader module for the stage.
|
||||
pub shader: Rc<Shader>,
|
||||
/// The entry point in the compiled shader.
|
||||
/// There must be a function in the shader with the same name.
|
||||
pub shader_entry_point: String,
|
||||
/// Advanced options for when this pipeline is compiled
|
||||
///
|
||||
/// This implements `Default`, and for most users can be set to `Default::default()`
|
||||
pub compilation_options: PipelineCompilationOptions,
|
||||
pub push_constant_ranges: Vec<wgpu::PushConstantRange>,
|
||||
/// The pipeline cache to use when creating this pipeline.
|
||||
pub cache: Option<Arc<wgpu::PipelineCache>>,
|
||||
}
|
||||
|
||||
impl ComputePipelineDescriptor {
|
||||
/// Create the [`wgpu::PipelineLayout`] for this pipeline
|
||||
pub(crate) fn create_layout(&self, device: &wgpu::Device) -> wgpu::PipelineLayout {
|
||||
let bgs = self
|
||||
.layouts
|
||||
.iter()
|
||||
.map(|bg| bg.as_ref())
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
|
||||
label: None, //self.label.as_ref().map(|s| format!("{}Layout", s)),
|
||||
bind_group_layouts: &bgs,
|
||||
push_constant_ranges: &self.push_constant_ranges,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub struct ComputePipeline {
|
||||
layout: Option<PipelineLayout>,
|
||||
wgpu_pipeline: wgpu::ComputePipeline,
|
||||
}
|
||||
|
||||
impl Deref for ComputePipeline {
|
||||
type Target = wgpu::ComputePipeline;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.wgpu_pipeline
|
||||
}
|
||||
}
|
||||
|
||||
impl From<wgpu::ComputePipeline> for ComputePipeline {
|
||||
fn from(value: wgpu::ComputePipeline) -> Self {
|
||||
Self {
|
||||
layout: None,
|
||||
wgpu_pipeline: value,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ComputePipeline {
|
||||
/// Creates a new compute pipeline on the `device`.
|
||||
///
|
||||
/// Parameters:
|
||||
/// * `device` - The device to create the pipeline on.
|
||||
/// * `desc` - The discriptor of the compute pipeline
|
||||
pub fn create(device: &wgpu::Device, desc: &ComputePipelineDescriptor) -> ComputePipeline {
|
||||
// create the layout only if bind groups layouts were specified
|
||||
let layout = if !desc.layouts.is_empty() {
|
||||
Some(desc.create_layout(device))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
// an Rc was used here so that this shader could be reused by the fragment stage if
|
||||
// they share the same shader. I tried to do it without an Rc but couldn't get past
|
||||
// the borrow checker
|
||||
let compiled_shader = Rc::new(device.create_shader_module(wgpu::ShaderModuleDescriptor {
|
||||
label: desc.shader.label.as_deref(),
|
||||
source: wgpu::ShaderSource::Wgsl(std::borrow::Cow::Borrowed(
|
||||
&desc.shader.source,
|
||||
)),
|
||||
}));
|
||||
|
||||
let desc = wgpu::ComputePipelineDescriptor {
|
||||
label: desc.label.as_deref(),
|
||||
layout: layout.as_ref(),
|
||||
module: &compiled_shader,
|
||||
entry_point: &desc.shader_entry_point,
|
||||
cache: desc.cache.as_ref().map(|c| &**c),
|
||||
compilation_options: desc.compilation_options.as_wgpu(),
|
||||
};
|
||||
|
||||
let pipeline = device.create_compute_pipeline(&desc);
|
||||
|
||||
Self {
|
||||
layout,
|
||||
wgpu_pipeline: pipeline,
|
||||
}
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn layout(&self) -> Option<&PipelineLayout> {
|
||||
self.layout.as_ref()
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn wgpu_pipeline(&self) -> &wgpu::ComputePipeline {
|
||||
&self.wgpu_pipeline
|
||||
}
|
||||
}
|
|
@ -1,33 +0,0 @@
|
|||
mod shader;
|
||||
use std::collections::HashMap;
|
||||
|
||||
pub use shader::*;
|
||||
|
||||
mod pipeline;
|
||||
pub use pipeline::*;
|
||||
|
||||
mod compute_pipeline;
|
||||
pub use compute_pipeline::*;
|
||||
|
||||
mod render_pipeline;
|
||||
pub use render_pipeline::*;
|
||||
|
||||
mod pass;
|
||||
pub use pass::*;
|
||||
|
||||
#[derive(Default, Clone)]
|
||||
pub struct PipelineCompilationOptions {
|
||||
pub constants: HashMap<String, f64>,
|
||||
pub zero_initialize_workgroup_memory: bool,
|
||||
pub vertex_pulling_transform: bool,
|
||||
}
|
||||
|
||||
impl PipelineCompilationOptions {
|
||||
pub fn as_wgpu(&self) -> wgpu::PipelineCompilationOptions {
|
||||
wgpu::PipelineCompilationOptions {
|
||||
constants: &self.constants,
|
||||
zero_initialize_workgroup_memory: self.zero_initialize_workgroup_memory,
|
||||
vertex_pulling_transform: self.vertex_pulling_transform,
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,16 +0,0 @@
|
|||
/// A trait that represents a [`wgpu::ComputePass`] or [`wgpu::RenderPass`].
|
||||
pub trait Pass<'a> {
|
||||
fn set_bind_group(&mut self, index: u32, bind_group: &'a wgpu::BindGroup, offsets: &[wgpu::DynamicOffset]);
|
||||
}
|
||||
|
||||
impl<'a> Pass<'a> for wgpu::ComputePass<'a> {
|
||||
fn set_bind_group(&mut self, index: u32, bind_group: &'a wgpu::BindGroup, offsets: &[wgpu::DynamicOffset]) {
|
||||
self.set_bind_group(index, bind_group, offsets);
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> Pass<'a> for wgpu::RenderPass<'a> {
|
||||
fn set_bind_group(&mut self, index: u32, bind_group: &'a wgpu::BindGroup, offsets: &[wgpu::DynamicOffset]) {
|
||||
self.set_bind_group(index, bind_group, offsets);
|
||||
}
|
||||
}
|
|
@ -1,87 +0,0 @@
|
|||
use super::{compute_pipeline::ComputePipeline, render_pipeline::RenderPipeline, ComputePipelineDescriptor, RenderPipelineDescriptor};
|
||||
|
||||
#[allow(clippy::large_enum_variant)]
|
||||
pub enum PipelineDescriptor {
|
||||
Render(RenderPipelineDescriptor),
|
||||
Compute(ComputePipelineDescriptor),
|
||||
}
|
||||
|
||||
impl PipelineDescriptor {
|
||||
pub fn as_render_pipeline_descriptor(&self) -> Option<&RenderPipelineDescriptor> {
|
||||
match self {
|
||||
Self::Render(r) => Some(r),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn as_compute_pipeline_descriptor(&self) -> Option<&ComputePipelineDescriptor> {
|
||||
match self {
|
||||
Self::Compute(c) => Some(c),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub enum Pipeline {
|
||||
Render(RenderPipeline),
|
||||
Compute(ComputePipeline),
|
||||
}
|
||||
|
||||
impl From<Pipeline> for RenderPipeline {
|
||||
fn from(val: Pipeline) -> Self {
|
||||
match val {
|
||||
Pipeline::Render(r) => r,
|
||||
_ => panic!("Pipeline is not a RenderPipeline"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Pipeline> for ComputePipeline {
|
||||
fn from(val: Pipeline) -> Self {
|
||||
match val {
|
||||
Pipeline::Compute(c) => c,
|
||||
_ => panic!("Pipeline is not a RenderPipeline"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Pipeline {
|
||||
pub fn bind_group_layout(&self, index: u32) -> wgpu::BindGroupLayout {
|
||||
match self {
|
||||
Pipeline::Render(r) => r.get_bind_group_layout(index),
|
||||
Pipeline::Compute(c) => c.get_bind_group_layout(index),
|
||||
}
|
||||
}
|
||||
|
||||
/// Gets self as a render pipeline, panics if self is not a render pipeline.
|
||||
pub fn as_render(&self) -> &RenderPipeline {
|
||||
match self {
|
||||
Self::Render(r) => r,
|
||||
_ => panic!("Pipeline is not a RenderPipeline")
|
||||
}
|
||||
}
|
||||
|
||||
/// Gets self as a render pipeline, returns `None` if self is not a render pipeline.
|
||||
pub fn try_as_render(&self) -> Option<&RenderPipeline> {
|
||||
match self {
|
||||
Self::Render(r) => Some(r),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Gets self as a compute pipeline, panics if self is not a compute pipeline.
|
||||
pub fn as_compute(&self) -> &ComputePipeline {
|
||||
match self {
|
||||
Self::Compute(r) => r,
|
||||
_ => panic!("Pipeline is not a ComputePipeline")
|
||||
}
|
||||
}
|
||||
|
||||
/// Gets self as a compute pipeline, returns `None` if self is not a compute pipeline.
|
||||
pub fn try_as_compute(&self) -> Option<&ComputePipeline> {
|
||||
match self {
|
||||
Self::Compute(c) => Some(c),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,151 +0,0 @@
|
|||
use std::{num::NonZeroU32, ops::Deref, sync::Arc};
|
||||
|
||||
use wgpu::PipelineLayout;
|
||||
|
||||
use super::{FragmentState, VertexState};
|
||||
|
||||
//#[derive(Debug, Clone)]
|
||||
pub struct RenderPipelineDescriptor {
|
||||
pub label: Option<String>,
|
||||
pub layouts: Vec<Arc<wgpu::BindGroupLayout>>,
|
||||
pub push_constant_ranges: Vec<wgpu::PushConstantRange>,
|
||||
pub vertex: VertexState,
|
||||
pub fragment: Option<FragmentState>,
|
||||
pub primitive: wgpu::PrimitiveState,
|
||||
pub depth_stencil: Option<wgpu::DepthStencilState>,
|
||||
pub multisample: wgpu::MultisampleState,
|
||||
pub multiview: Option<NonZeroU32>,
|
||||
}
|
||||
|
||||
impl RenderPipelineDescriptor {
|
||||
/// Create the [`wgpu::PipelineLayout`] for this pipeline
|
||||
pub(crate) fn create_layout(&self, device: &wgpu::Device) -> wgpu::PipelineLayout {
|
||||
let bgs = self
|
||||
.layouts
|
||||
.iter()
|
||||
.map(|bg| bg.as_ref())
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
|
||||
label: None, //self.label.as_ref().map(|s| format!("{}Layout", s)),
|
||||
bind_group_layouts: &bgs,
|
||||
push_constant_ranges: &self.push_constant_ranges,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub struct RenderPipeline {
|
||||
layout: Option<PipelineLayout>,
|
||||
wgpu_pipeline: wgpu::RenderPipeline,
|
||||
}
|
||||
|
||||
impl Deref for RenderPipeline {
|
||||
type Target = wgpu::RenderPipeline;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.wgpu_pipeline
|
||||
}
|
||||
}
|
||||
|
||||
impl From<wgpu::RenderPipeline> for RenderPipeline {
|
||||
fn from(value: wgpu::RenderPipeline) -> Self {
|
||||
Self {
|
||||
layout: None,
|
||||
wgpu_pipeline: value,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl RenderPipeline {
|
||||
/// Creates the default render pipeline
|
||||
///
|
||||
/// Parameters:
|
||||
/// * `device` - The device to create the pipeline on.
|
||||
/// * `config` - The surface config to use to create the pipeline.
|
||||
/// * `label` - The label of the pipeline.
|
||||
/// * `shader` - The compiled shader of the pipeline.
|
||||
/// * `vertex_entry_point` - The entry point name of the vertex shader
|
||||
pub fn create(device: &wgpu::Device, desc: &RenderPipelineDescriptor) -> RenderPipeline {
|
||||
// create the layout only if bind groups layouts were specified
|
||||
let layout = if !desc.layouts.is_empty() {
|
||||
Some(desc.create_layout(device))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let vrtx_buffs = desc
|
||||
.vertex
|
||||
.buffers
|
||||
.iter()
|
||||
.map(|vbl| wgpu::VertexBufferLayout {
|
||||
array_stride: vbl.array_stride,
|
||||
step_mode: vbl.step_mode,
|
||||
attributes: &vbl.attributes,
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
// an Rc was used here so that this shader could be reused by the fragment stage if
|
||||
// they share the same shader. I tried to do it without an Rc but couldn't get past
|
||||
// the borrow checker
|
||||
let vrtx_shad = Arc::new(device.create_shader_module(wgpu::ShaderModuleDescriptor {
|
||||
label: desc.vertex.module.label.as_deref(),
|
||||
source: wgpu::ShaderSource::Wgsl(std::borrow::Cow::Borrowed(
|
||||
&desc.vertex.module.source,
|
||||
)),
|
||||
}));
|
||||
let vrtx_state = wgpu::VertexState {
|
||||
module: &vrtx_shad,
|
||||
entry_point: &desc.vertex.entry_point,
|
||||
buffers: &vrtx_buffs,
|
||||
compilation_options: Default::default(),
|
||||
};
|
||||
|
||||
let frag_module = desc.fragment.as_ref().map(|f| {
|
||||
if f.module == desc.vertex.module {
|
||||
vrtx_shad.clone()
|
||||
} else {
|
||||
Arc::new(device.create_shader_module(wgpu::ShaderModuleDescriptor {
|
||||
label: f.module.label.as_deref(),
|
||||
source: wgpu::ShaderSource::Wgsl(std::borrow::Cow::Borrowed(&f.module.source)),
|
||||
}))
|
||||
}
|
||||
});
|
||||
|
||||
let fm = frag_module.as_ref();
|
||||
let fstate = desc.fragment.as_ref().map(move |f| wgpu::FragmentState {
|
||||
module: fm.unwrap(),
|
||||
entry_point: &f.entry_point,
|
||||
targets: &f.targets,
|
||||
compilation_options: Default::default(),
|
||||
});
|
||||
|
||||
let render_desc = wgpu::RenderPipelineDescriptor {
|
||||
label: desc.label.as_deref(),
|
||||
layout: layout.as_ref(),
|
||||
vertex: vrtx_state,
|
||||
primitive: desc.primitive,
|
||||
depth_stencil: desc.depth_stencil.clone(),
|
||||
multisample: desc.multisample,
|
||||
fragment: fstate,
|
||||
multiview: desc.multiview,
|
||||
cache: None,
|
||||
};
|
||||
|
||||
let render_pipeline = device.create_render_pipeline(&render_desc);
|
||||
|
||||
Self {
|
||||
layout,
|
||||
wgpu_pipeline: render_pipeline,
|
||||
}
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn layout(&self) -> Option<&PipelineLayout> {
|
||||
self.layout.as_ref()
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn wgpu_pipeline(&self) -> &wgpu::RenderPipeline {
|
||||
&self.wgpu_pipeline
|
||||
}
|
||||
}
|
|
@ -1,50 +0,0 @@
|
|||
use std::rc::Rc;
|
||||
|
||||
#[derive(Debug, Default, Clone)]
|
||||
pub struct VertexBufferLayout {
|
||||
pub array_stride: wgpu::BufferAddress,
|
||||
pub step_mode: wgpu::VertexStepMode,
|
||||
pub attributes: Vec<wgpu::VertexAttribute>,
|
||||
}
|
||||
|
||||
impl<'a> From<wgpu::VertexBufferLayout<'a>> for VertexBufferLayout {
|
||||
fn from(value: wgpu::VertexBufferLayout) -> Self {
|
||||
Self {
|
||||
array_stride: value.array_stride,
|
||||
step_mode: value.step_mode,
|
||||
attributes: value.attributes.to_vec(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Describes the vertex stage in a render pipeline.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct VertexState {
|
||||
// TODO: make this a ResHandle<Shader>
|
||||
/// The compiled shader module for the stage.
|
||||
pub module: Rc<Shader>,
|
||||
/// The entry point in the compiled shader.
|
||||
/// There must be a function in the shader with the same name.
|
||||
pub entry_point: String,
|
||||
/// The format of the vertex buffers used with this pipeline.
|
||||
pub buffers: Vec<VertexBufferLayout>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct Shader {
|
||||
pub label: Option<String>,
|
||||
pub source: String,
|
||||
}
|
||||
|
||||
/// Describes the fragment stage in the render pipeline.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct FragmentState {
|
||||
// TODO: make this a ResHandle<Shader>
|
||||
/// The compiled shader module for the stage.
|
||||
pub module: Rc<Shader>,
|
||||
/// The entry point in the compiled shader.
|
||||
/// There must be a function in the shader with the same name.
|
||||
pub entry_point: String,
|
||||
/// The color state of the render targets.
|
||||
pub targets: Vec<Option<wgpu::ColorTargetState>>,
|
||||
}
|
|
@ -1,285 +0,0 @@
|
|||
#define_module lyra::main_3d
|
||||
#import lyra::shadows::bindings::{u_light_shadow}
|
||||
#import lyra::shadows::calc::{calc_shadow_dir_light, calc_shadow_point_light, calc_shadow_spot_light}
|
||||
|
||||
// Vertex shader
|
||||
|
||||
const LIGHT_TY_DIRECTIONAL = 0u;
|
||||
const LIGHT_TY_POINT = 1u;
|
||||
const LIGHT_TY_SPOT = 2u;
|
||||
|
||||
const ALPHA_CUTOFF = 0.1;
|
||||
|
||||
struct VertexInput {
|
||||
@location(0) position: vec3<f32>,
|
||||
@location(1) tex_coords: vec2<f32>,
|
||||
@location(2) normal: vec3<f32>,
|
||||
}
|
||||
|
||||
struct VertexOutput {
|
||||
@builtin(position) clip_position: vec4<f32>,
|
||||
@location(0) tex_coords: vec2<f32>,
|
||||
@location(1) world_position: vec3<f32>,
|
||||
@location(2) world_normal: vec3<f32>,
|
||||
@location(3) frag_pos_light_space: vec4<f32>,
|
||||
}
|
||||
|
||||
struct TransformData {
|
||||
transform: mat4x4<f32>,
|
||||
normal_matrix: mat4x4<f32>,
|
||||
}
|
||||
|
||||
struct CameraUniform {
|
||||
view: mat4x4<f32>,
|
||||
inverse_projection: mat4x4<f32>,
|
||||
view_projection: mat4x4<f32>,
|
||||
projection: mat4x4<f32>,
|
||||
position: vec3<f32>,
|
||||
tile_debug: u32,
|
||||
}
|
||||
|
||||
struct Light {
|
||||
position: vec3<f32>,
|
||||
light_ty: u32,
|
||||
direction: vec3<f32>,
|
||||
enabled: u32,
|
||||
color: vec3<f32>,
|
||||
|
||||
range: f32,
|
||||
intensity: f32,
|
||||
smoothness: f32,
|
||||
|
||||
spot_cutoff: f32,
|
||||
spot_outer_cutoff: f32,
|
||||
light_shadow_uniform_index: array<i32, 6>,
|
||||
}
|
||||
|
||||
struct Lights {
|
||||
light_count: u32,
|
||||
data: array<Light>,
|
||||
}
|
||||
|
||||
@group(1) @binding(0)
|
||||
var<uniform> u_model_transform_data: TransformData;
|
||||
|
||||
@group(2) @binding(0)
|
||||
var<uniform> u_camera: CameraUniform;
|
||||
|
||||
@group(3) @binding(0)
|
||||
var<storage> u_lights: Lights;
|
||||
|
||||
@vertex
|
||||
fn vs_main(
|
||||
model: VertexInput,
|
||||
) -> VertexOutput {
|
||||
var out: VertexOutput;
|
||||
|
||||
var world_position: vec4<f32> = u_model_transform_data.transform * vec4<f32>(model.position, 1.0);
|
||||
out.world_position = world_position.xyz;
|
||||
|
||||
out.tex_coords = model.tex_coords;
|
||||
out.clip_position = u_camera.view_projection * world_position;
|
||||
|
||||
// the normal mat is actually only a mat3x3, but there's a bug in wgpu: https://github.com/gfx-rs/wgpu-rs/issues/36
|
||||
let normal_mat4 = u_model_transform_data.normal_matrix;
|
||||
let normal_mat = mat3x3(normal_mat4[0].xyz, normal_mat4[1].xyz, normal_mat4[2].xyz);
|
||||
out.world_normal = normalize(normal_mat * model.normal);
|
||||
|
||||
return out;
|
||||
}
|
||||
|
||||
// Fragment shader
|
||||
|
||||
struct Material {
|
||||
ambient: vec3<f32>,
|
||||
diffuse: vec3<f32>,
|
||||
shininess: f32,
|
||||
specular_factor: f32,
|
||||
specular_color: vec3<f32>,
|
||||
}
|
||||
|
||||
@group(0) @binding(0)
|
||||
var<uniform> u_material: Material;
|
||||
@group(0) @binding(1)
|
||||
var t_diffuse: texture_2d<f32>;
|
||||
@group(0) @binding(2)
|
||||
var s_diffuse: sampler;
|
||||
|
||||
@group(4) @binding(0)
|
||||
var<storage, read_write> u_light_indices: array<u32>;
|
||||
@group(4) @binding(1)
|
||||
var t_light_grid: texture_storage_2d<rg32uint, read_write>; // rg32uint = vec2<u32>
|
||||
|
||||
@fragment
|
||||
fn fs_main(in: VertexOutput) -> @location(0) vec4<f32> {
|
||||
if (u_camera.tile_debug == 1u) {
|
||||
return debug_grid(in);
|
||||
}
|
||||
|
||||
let object_color: vec4<f32> = textureSample(t_diffuse, s_diffuse, in.tex_coords);
|
||||
let specular_color: vec3<f32> = vec3<f32>(0.0); //textureSample(t_specular, s_specular, in.tex_coords).xyz;
|
||||
var light_res = vec3<f32>(0.0);
|
||||
|
||||
if (object_color.a < ALPHA_CUTOFF) {
|
||||
discard;
|
||||
}
|
||||
|
||||
let tile_index = vec2<u32>(floor(in.clip_position.xy / 16.0));
|
||||
let tile: vec2<u32> = textureLoad(t_light_grid, tile_index).xy;
|
||||
|
||||
let light_offset = tile.x;
|
||||
let light_count = tile.y;
|
||||
|
||||
for (var i = 0u; i < light_count; i++) {
|
||||
let light_index = u_light_indices[light_offset + i];
|
||||
let light: Light = u_lights.data[light_index];
|
||||
let light_dir = normalize(-light.direction);
|
||||
|
||||
if (light.light_ty == LIGHT_TY_DIRECTIONAL) {
|
||||
let shadow_u: LightShadowMapUniform = u_light_shadow[light.light_shadow_uniform_index[0]];
|
||||
let frag_pos_light_space = shadow_u.light_space_matrix * vec4<f32>(in.world_position, 1.0);
|
||||
|
||||
let shadow = calc_shadow_dir_light(in.world_position, in.world_normal, light_dir, light);
|
||||
light_res += blinn_phong_dir_light(in.world_position, in.world_normal, light, u_material, specular_color, shadow);
|
||||
} else if (light.light_ty == LIGHT_TY_POINT) {
|
||||
let shadow = calc_shadow_point_light(in.world_position, in.world_normal, light_dir, light);
|
||||
light_res += blinn_phong_point_light(in.world_position, in.world_normal, light, u_material, specular_color, shadow);
|
||||
} else if (light.light_ty == LIGHT_TY_SPOT) {
|
||||
let shadow = calc_shadow_spot_light(in.world_position, in.world_normal, light_dir, light);
|
||||
light_res += blinn_phong_spot_light(in.world_position, in.world_normal, light, u_material, specular_color, shadow);
|
||||
}
|
||||
}
|
||||
|
||||
let light_object_res = light_res * (object_color.xyz);
|
||||
return vec4<f32>(light_object_res, object_color.a);
|
||||
}
|
||||
|
||||
fn debug_grid(in: VertexOutput) -> vec4<f32> {
|
||||
let tile_index_float: vec2<f32> = in.clip_position.xy / 16.0;
|
||||
let tile_index = vec2<u32>(floor(tile_index_float));
|
||||
let tile: vec2<u32> = textureLoad(t_light_grid, tile_index).xy;
|
||||
|
||||
// detect where the line grids would be at
|
||||
let x = tile_index_float.x - trunc(tile_index_float.x);
|
||||
let y = tile_index_float.y - trunc(tile_index_float.y);
|
||||
let ta: bool = x < 0.05 || y < 0.05;
|
||||
let tb: bool = x > 0.95 || y > 0.95;
|
||||
|
||||
let ratio = f32(tile.y) / f32(u_lights.light_count);
|
||||
return vec4<f32>(ratio, ratio, ratio, 1.0);
|
||||
}
|
||||
|
||||
fn blinn_phong_dir_light(world_pos: vec3<f32>, world_norm: vec3<f32>, dir_light: Light, material: Material, specular_factor: vec3<f32>, shadow: f32) -> vec3<f32> {
|
||||
let light_color = dir_light.color.xyz;
|
||||
let camera_view_pos = u_camera.position;
|
||||
|
||||
//// Ambient light ////
|
||||
var ambient_color = light_color * material.ambient.xyz * material.diffuse.xyz;
|
||||
|
||||
//// diffuse ////
|
||||
let light_dir = normalize(-dir_light.direction);
|
||||
|
||||
let diffuse_strength = max(dot(world_norm, light_dir), 0.0);
|
||||
var diffuse_color = light_color * (diffuse_strength * material.diffuse.xyz);
|
||||
//// end of diffuse ////
|
||||
|
||||
//// specular ////
|
||||
let view_dir = normalize(camera_view_pos - world_pos);
|
||||
let half_dir = normalize(view_dir + light_dir);
|
||||
|
||||
let specular_strength = pow(max(dot(world_norm, half_dir), 0.0), material.shininess);
|
||||
var specular_color = specular_strength * (light_color * specular_factor);
|
||||
//// end of specular ////
|
||||
|
||||
/*ambient_color *= dir_light.ambient;
|
||||
diffuse_color *= dir_light.diffuse;
|
||||
specular_color *= dir_light.specular;*/
|
||||
|
||||
return (ambient_color + (shadow) * (diffuse_color + specular_color)) * dir_light.intensity;
|
||||
}
|
||||
|
||||
fn blinn_phong_point_light(world_pos: vec3<f32>, world_norm: vec3<f32>, point_light: Light, material: Material, specular_factor: vec3<f32>, shadow: f32) -> vec3<f32> {
|
||||
let light_color = point_light.color.xyz;
|
||||
let light_pos = point_light.position.xyz;
|
||||
let camera_view_pos = u_camera.position;
|
||||
|
||||
//// Ambient light ////
|
||||
var ambient_color = light_color * material.ambient.xyz * material.diffuse.xyz;
|
||||
|
||||
//// diffuse ////
|
||||
let light_dir = normalize(light_pos - world_pos);
|
||||
|
||||
let diffuse_strength = max(dot(world_norm, light_dir), 0.0);
|
||||
var diffuse_color = light_color * (diffuse_strength * material.diffuse.xyz);
|
||||
//// end of diffuse ////
|
||||
|
||||
//// specular ////
|
||||
let view_dir = normalize(camera_view_pos - world_pos);
|
||||
let half_dir = normalize(view_dir + light_dir);
|
||||
|
||||
let specular_strength = pow(max(dot(world_norm, half_dir), 0.0), material.shininess);
|
||||
var specular_color = specular_strength * (light_color * specular_factor);
|
||||
//// end of specular ////
|
||||
|
||||
let distance = length(light_pos - world_pos);
|
||||
let attenuation = 1.0 - smoothstep(point_light.range * point_light.smoothness, point_light.range, distance);
|
||||
|
||||
ambient_color *= attenuation;
|
||||
diffuse_color *= attenuation;
|
||||
specular_color *= attenuation;
|
||||
|
||||
//return (ambient_color + shadow * (diffuse_color + specular_color)) * point_light.intensity;
|
||||
return (shadow * (ambient_color + diffuse_color + specular_color)) * point_light.intensity;
|
||||
}
|
||||
|
||||
fn blinn_phong_spot_light(world_pos: vec3<f32>, world_norm: vec3<f32>, spot_light: Light, material: Material, specular_factor: vec3<f32>, shadow: f32) -> vec3<f32> {
|
||||
let light_color = spot_light.color;
|
||||
let light_pos = spot_light.position;
|
||||
let camera_view_pos = u_camera.position;
|
||||
|
||||
let light_dir = normalize(spot_light.position - world_pos);
|
||||
|
||||
var ambient_color = light_color * material.ambient.xyz * material.diffuse.xyz;
|
||||
|
||||
//// diffuse ////
|
||||
//let light_dir = normalize(light_pos - world_pos);
|
||||
|
||||
let diffuse_strength = max(dot(world_norm, light_dir), 0.0);
|
||||
var diffuse_color = light_color * (diffuse_strength * material.diffuse.xyz);
|
||||
//// end of diffuse ////
|
||||
|
||||
//// specular ////
|
||||
let view_dir = normalize(camera_view_pos - world_pos);
|
||||
let half_dir = normalize(view_dir + light_dir);
|
||||
|
||||
let specular_strength = pow(max(dot(world_norm, half_dir), 0.0), material.shininess);
|
||||
var specular_color = specular_strength * (light_color * specular_factor);
|
||||
//// end of specular ////
|
||||
|
||||
//// spot light soft edges ////
|
||||
let min_cos = cos(spot_light.spot_cutoff);
|
||||
let max_cos = lerp(min_cos, 1.0, 0.5);
|
||||
let cos_angle = dot(spot_light.direction, -light_dir);
|
||||
let cone = smoothstep(min_cos, max_cos, cos_angle);
|
||||
//// end of spot light soft edges ////
|
||||
|
||||
//// spot light attenuation ////
|
||||
let distance = length(light_pos - world_pos);
|
||||
let attenuation = calc_attenuation(spot_light, distance);
|
||||
|
||||
ambient_color *= attenuation * cone;
|
||||
diffuse_color *= attenuation * cone;
|
||||
specular_color *= attenuation * cone;
|
||||
//// end of spot light attenuation ////
|
||||
|
||||
//return /*ambient_color +*/ diffuse_color + specular_color;
|
||||
return (shadow * (diffuse_color + specular_color)) * spot_light.intensity;
|
||||
}
|
||||
|
||||
fn calc_attenuation(light: Light, distance: f32) -> f32 {
|
||||
return 1.0 - smoothstep(light.range * light.smoothness, light.range, distance);
|
||||
}
|
||||
|
||||
fn lerp(start: f32, end: f32, alpha: f32) -> f32 {
|
||||
return (start + (end - start) * alpha);
|
||||
}
|
|
@ -1,263 +0,0 @@
|
|||
// Largely based off of https://blog.simonrodriguez.fr/articles/2016/07/implementing_fxaa.html
|
||||
|
||||
const EDGE_THRESHOLD_MIN: f32 = 0.0312;
|
||||
const EDGE_THRESHOLD_MAX: f32 = 0.125;
|
||||
const ITERATIONS: i32 = 12;
|
||||
const SUBPIXEL_QUALITY: f32 = 0.75;
|
||||
|
||||
@group(0) @binding(0)
|
||||
var t_screen: texture_2d<f32>;
|
||||
@group(0) @binding(1)
|
||||
var s_screen: sampler;
|
||||
|
||||
struct VertexOutput {
|
||||
@builtin(position)
|
||||
clip_position: vec4<f32>,
|
||||
@location(0)
|
||||
tex_coords: vec2<f32>,
|
||||
}
|
||||
|
||||
fn QUALITY(q: i32) -> f32 {
|
||||
switch (q) {
|
||||
default: { return 1.0; }
|
||||
case 5: { return 1.5; }
|
||||
case 6, 7, 8, 9: { return 2.0; }
|
||||
case 10: { return 4.0; }
|
||||
case 11: { return 8.0; }
|
||||
}
|
||||
}
|
||||
|
||||
fn rgb2luma(rgb: vec3<f32>) -> f32 {
|
||||
return sqrt(dot(rgb, vec3<f32>(0.299, 0.587, 0.114)));
|
||||
}
|
||||
|
||||
@vertex
|
||||
fn vs_main(
|
||||
@builtin(vertex_index) vertex_index: u32,
|
||||
) -> VertexOutput {
|
||||
let tex_coords = vec2<f32>(f32(vertex_index >> 1u), f32(vertex_index & 1u)) * 2.0;
|
||||
let clip_position = vec4<f32>(tex_coords * vec2<f32>(2.0, -2.0) + vec2<f32>(-1.0, 1.0), 0.0, 1.0);
|
||||
|
||||
return VertexOutput(clip_position, tex_coords);
|
||||
}
|
||||
|
||||
fn texture_offset(tex: texture_2d<f32>, samp: sampler, point: vec2<f32>, offset: vec2<i32>) -> vec3<f32> {
|
||||
var tex_coords = point + vec2<f32>(offset);
|
||||
return textureSample(tex, samp, tex_coords).xyz;
|
||||
}
|
||||
|
||||
@fragment
|
||||
fn fs_main(in: VertexOutput) -> @location(0) vec4<f32> {
|
||||
let resolution = vec2<f32>(textureDimensions(t_screen));
|
||||
let inverse_screen_size = 1.0 / resolution.xy;
|
||||
let tex_coords = in.clip_position.xy * inverse_screen_size;
|
||||
|
||||
var color_center: vec3<f32> = textureSampleLevel(t_screen, s_screen, tex_coords, 0.0).xyz;
|
||||
|
||||
// Luma at the current fragment
|
||||
let luma_center = rgb2luma(color_center);
|
||||
|
||||
// Luma at the four direct neighbours of the current fragment.
|
||||
let luma_down = rgb2luma(textureSampleLevel(t_screen, s_screen, tex_coords, 0.0, vec2<i32>(0, -1)).xyz);
|
||||
let luma_up = rgb2luma(textureSampleLevel(t_screen, s_screen, tex_coords, 0.0, vec2<i32>(0, 1)).xyz);
|
||||
let luma_left = rgb2luma(textureSampleLevel(t_screen, s_screen, tex_coords, 0.0, vec2<i32>(-1, 0)).xyz);
|
||||
let luma_right = rgb2luma(textureSampleLevel(t_screen, s_screen, tex_coords, 0.0, vec2<i32>(1, 0)).xyz);
|
||||
|
||||
// Find the maximum and minimum luma around the current fragment.
|
||||
let luma_min = min(luma_center, min(min(luma_down, luma_up), min(luma_left, luma_right)));
|
||||
let luma_max = max(luma_center, max(max(luma_down, luma_up), max(luma_left, luma_right)));
|
||||
|
||||
// Compute the delta
|
||||
let luma_range = luma_max - luma_min;
|
||||
|
||||
// If the luma variation is lower that a threshold (or if we are in a really dark area),
|
||||
// we are not on an edge, don't perform any AA.
|
||||
if (luma_range < max(EDGE_THRESHOLD_MIN, luma_max * EDGE_THRESHOLD_MAX)) {
|
||||
return vec4<f32>(color_center, 1.0);
|
||||
}
|
||||
|
||||
// Query the 4 remaining corners lumas
|
||||
let luma_down_left = rgb2luma(textureSampleLevel(t_screen, s_screen, tex_coords, 0.0, vec2<i32>(-1, -1)).xyz);
|
||||
let luma_up_right = rgb2luma(textureSampleLevel(t_screen, s_screen, tex_coords, 0.0, vec2<i32>(1, 1)).xyz);
|
||||
let luma_up_left = rgb2luma(textureSampleLevel(t_screen, s_screen, tex_coords, 0.0, vec2<i32>(-1, 1)).xyz);
|
||||
let luma_down_right = rgb2luma(textureSampleLevel(t_screen, s_screen, tex_coords, 0.0, vec2<i32>(1, -1)).xyz);
|
||||
|
||||
// Combine the four edges lumas (using intermediary variables for future computations with the same values).
|
||||
let luma_down_up = luma_down + luma_up;
|
||||
let luma_left_right = luma_left + luma_right;
|
||||
|
||||
// Same for corners
|
||||
let luma_left_corners = luma_down_left + luma_up_left;
|
||||
let luma_down_corners = luma_down_left + luma_down_right;
|
||||
let luma_right_corners = luma_down_right + luma_up_right;
|
||||
let luma_up_corners = luma_up_right + luma_up_left;
|
||||
|
||||
// Compute an estimation of the gradient along the horizontal and verical axis.
|
||||
let edge_horizontal = abs(-2.0 * luma_left + luma_left_corners)
|
||||
+ abs(-2.0 * luma_center + luma_down_up) * 2.0
|
||||
+ abs(-2.0 * luma_right + luma_right_corners);
|
||||
let edge_vertical = abs(-2.0 * luma_up + luma_up_corners)
|
||||
+ abs(-2.0 * luma_center + luma_left_right) * 2.0
|
||||
+ abs(-2.0 * luma_down + luma_down_corners);
|
||||
|
||||
// Is the local edge horizontal or vertical?
|
||||
let is_horizontal = edge_horizontal >= edge_vertical;
|
||||
|
||||
// Select the two neighboring texels lumas in the opposite direction to the local edge.
|
||||
let luma1 = select(luma_left, luma_down, is_horizontal);
|
||||
let luma2 = select(luma_right, luma_up, is_horizontal);
|
||||
|
||||
// Compute gradients in this direction
|
||||
let gradient1 = luma1 - luma_center;
|
||||
let gradient2 = luma2 - luma_center;
|
||||
|
||||
// Which direction is the steepest?
|
||||
let is_1_steepest = abs(gradient1) >= abs(gradient2);
|
||||
|
||||
// Gradient in the corresponding direction, normalized
|
||||
let gradient_scaled = 0.25 * max(abs(gradient1), abs(gradient2));
|
||||
|
||||
// Choose the step size (one pixel) according to the edge direction.
|
||||
var step_length: f32;
|
||||
if (is_horizontal) {
|
||||
step_length = inverse_screen_size.y;
|
||||
} else {
|
||||
step_length = inverse_screen_size.x;
|
||||
}
|
||||
|
||||
// Average luma in the correct direction.
|
||||
var luma_local_average = 0.0;
|
||||
if (is_1_steepest) {
|
||||
// Switch the direction
|
||||
step_length = -step_length;
|
||||
luma_local_average = 0.5 * (luma1 + luma_center);
|
||||
} else {
|
||||
luma_local_average = 0.5 * (luma2 + luma_center);
|
||||
}
|
||||
|
||||
// Shift UV in the correct direction by half a pixel.
|
||||
var current_uv = tex_coords;
|
||||
if (is_horizontal) {
|
||||
current_uv.y += step_length * 0.5;
|
||||
} else {
|
||||
current_uv.x += step_length * 0.5;
|
||||
}
|
||||
|
||||
// Compute offset (for each iteration step) in the right direction.
|
||||
var offset: vec2<f32>;
|
||||
if (is_horizontal) {
|
||||
offset = vec2<f32>(inverse_screen_size.x, 0.0);
|
||||
} else {
|
||||
offset = vec2<f32>(0.0, inverse_screen_size.y);
|
||||
}
|
||||
// Compute UVs to explore on each side of the edge, orthogonally. The QUALITY allows us to
|
||||
// step faster.
|
||||
var uv1 = current_uv - offset;
|
||||
var uv2 = current_uv + offset;
|
||||
|
||||
// Read the lumas at both current extremities of the exploration segment, and compute the
|
||||
// delta wrt to the local average luma.
|
||||
var luma_end1 = rgb2luma(textureSampleLevel(t_screen, s_screen, uv1, 0.0).xyz);
|
||||
var luma_end2 = rgb2luma(textureSampleLevel(t_screen, s_screen, uv2, 0.0).xyz);
|
||||
luma_end1 -= luma_local_average;
|
||||
luma_end2 -= luma_local_average;
|
||||
|
||||
// If the luma deltas at the current extremities are larger than the local gradient, we have
|
||||
// reached the side of the edge.
|
||||
var reached1 = abs(luma_end1) >= gradient_scaled;
|
||||
var reached2 = abs(luma_end2) >= gradient_scaled;
|
||||
var reached_both = reached1 && reached2;
|
||||
|
||||
// If the side is not reached, we continue to explore in this direction.
|
||||
if (!reached1) {
|
||||
uv1 -= offset;
|
||||
}
|
||||
if (!reached2) {
|
||||
uv2 += offset;
|
||||
}
|
||||
|
||||
if (!reached_both) {
|
||||
for (var i = 2; i < ITERATIONS; i++) {
|
||||
// If needed, read luma in 1st direction, compute delta.
|
||||
if (!reached1) {
|
||||
luma_end1 = rgb2luma(textureSampleLevel(t_screen, s_screen, uv1, 0.0).xyz);
|
||||
luma_end1 = luma_end1 - luma_local_average;
|
||||
}
|
||||
// If needed, read luma in opposite direction, compute delta.
|
||||
if (!reached2) {
|
||||
luma_end2 = rgb2luma(textureSampleLevel(t_screen, s_screen, uv2, 0.0).xyz);
|
||||
luma_end2 = luma_end2 - luma_local_average;
|
||||
}
|
||||
// If the luma deltas at the current extremities is larger than the local gradient, we have reached the side of the edge.
|
||||
reached1 = abs(luma_end1) >= gradient_scaled;
|
||||
reached2 = abs(luma_end2) >= gradient_scaled;
|
||||
reached_both = reached1 && reached2;
|
||||
|
||||
// If the side is not reached, we continue to explore in this direction, with a variable quality.
|
||||
if (!reached1) {
|
||||
uv1 -= offset * QUALITY(i);
|
||||
}
|
||||
if (!reached2) {
|
||||
uv2 += offset * QUALITY(i);
|
||||
}
|
||||
|
||||
// If both sides have been reached, stop the exploration
|
||||
if (reached_both) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Compute the distances to each extremity of the edge.
|
||||
var distance1 = select(tex_coords.y - uv1.y, tex_coords.x - uv1.x, is_horizontal);
|
||||
var distance2 = select(uv2.y - tex_coords.y, uv2.x - tex_coords.x, is_horizontal);
|
||||
|
||||
// In which direction is the extremity of the edge closer?
|
||||
let is_direction1 = distance1 < distance2;
|
||||
let distance_final = min(distance1, distance2);
|
||||
|
||||
// Length of the edge.
|
||||
let edge_thickness = (distance1 + distance2);
|
||||
|
||||
// UV offset: read in the direction of the closest side of the edge.
|
||||
let pixel_offset = -distance_final / edge_thickness + 0.5;
|
||||
|
||||
// Is the luma at center smaller than the local average?
|
||||
let is_luma_center_smaller = luma_center < luma_local_average;
|
||||
|
||||
// If the luma at center is smaller than at its neighbour, the delta luma at each end should
|
||||
// be positive (same variation). (in the direction of the closer side of the edge.)
|
||||
var direction_luma_end: f32;
|
||||
if (is_direction1) {
|
||||
direction_luma_end = luma_end1;
|
||||
} else {
|
||||
direction_luma_end = luma_end2;
|
||||
}
|
||||
let correct_variation = (direction_luma_end < 0.0) != is_luma_center_smaller;
|
||||
|
||||
// If the luma variation is incorrect, do not offset.
|
||||
var final_offset = select(0.0, pixel_offset, correct_variation);
|
||||
|
||||
// Sub-pixel shifting
|
||||
// Full weighted average of the luma over the 3x3 neighborhood.
|
||||
let luma_average = (1.0 / 12.0) * (2.0 * (luma_down_up + luma_left_right) + luma_left_corners + luma_right_corners);
|
||||
// Ratio of the delta between the global average and the center luma, over the luma range
|
||||
// in the 3x3 neighborhood.
|
||||
let sub_pixel_offset1 = clamp(abs(luma_average - luma_center) / luma_range, 0.0, 1.0);
|
||||
let sub_pixel_offset2 = (-2.0 * sub_pixel_offset1 + 3.0) * sub_pixel_offset1 * sub_pixel_offset1;
|
||||
// Compute a sub-pixel offset based on this delta.
|
||||
let sub_pixel_offset_final = sub_pixel_offset2 * sub_pixel_offset2 * SUBPIXEL_QUALITY;
|
||||
|
||||
// Pick the biggest of the two offsets.
|
||||
final_offset = max(final_offset, sub_pixel_offset_final);
|
||||
|
||||
var final_uv = tex_coords;
|
||||
if (is_horizontal) {
|
||||
final_uv.y += final_offset * step_length;
|
||||
} else {
|
||||
final_uv.x += final_offset * step_length;
|
||||
}
|
||||
|
||||
let color = textureSampleLevel(t_screen, s_screen, final_uv, 0.0).xyz;
|
||||
return vec4<f32>(color, 1.0);
|
||||
}
|
|
@ -1,330 +0,0 @@
|
|||
const BLOCK_SIZE: u32 = 16u;
|
||||
const MAX_TILE_VISIBLE_LIGHTS: u32 = 1024u;
|
||||
|
||||
const LIGHT_TY_DIRECTIONAL = 0u;
|
||||
const LIGHT_TY_POINT = 1u;
|
||||
const LIGHT_TY_SPOT = 2u;
|
||||
|
||||
alias vec2f = vec2<f32>;
|
||||
alias vec3f = vec3<f32>;
|
||||
alias vec4f = vec4<f32>;
|
||||
|
||||
struct CameraUniform {
|
||||
view: mat4x4<f32>,
|
||||
inverse_projection: mat4x4<f32>,
|
||||
view_projection: mat4x4<f32>,
|
||||
projection: mat4x4<f32>,
|
||||
position: vec3f,
|
||||
tile_debug: u32,
|
||||
};
|
||||
|
||||
struct Light {
|
||||
position: vec3f,
|
||||
light_ty: u32,
|
||||
direction: vec3f,
|
||||
enabled: u32,
|
||||
color: vec3f,
|
||||
|
||||
range: f32,
|
||||
intensity: f32,
|
||||
smoothness: f32,
|
||||
|
||||
spot_cutoff: f32,
|
||||
spot_outer_cutoff: f32,
|
||||
light_shadow_uniform_index: array<i32, 6>,
|
||||
};
|
||||
|
||||
struct Lights {
|
||||
light_count: u32,
|
||||
data: array<Light>,
|
||||
};
|
||||
|
||||
struct Cone {
|
||||
tip: vec3f,
|
||||
height: f32,
|
||||
direction: vec3f,
|
||||
radius: f32,
|
||||
}
|
||||
|
||||
struct Plane {
|
||||
normal: vec3f,
|
||||
origin_distance: f32,
|
||||
}
|
||||
|
||||
var<workgroup> wg_min_depth: atomic<u32>;
|
||||
var<workgroup> wg_max_depth: atomic<u32>;
|
||||
var<workgroup> wg_light_index_start: atomic<u32>;
|
||||
var<workgroup> wg_frustum_planes: array<Plane, 6>;
|
||||
|
||||
// index list of visible light sources for this tile
|
||||
var<workgroup> wg_visible_light_indices: array<u32, MAX_TILE_VISIBLE_LIGHTS>;
|
||||
var<workgroup> wg_visible_light_count: atomic<u32>;
|
||||
|
||||
@group(0) @binding(0)
|
||||
var t_depthmap: texture_depth_2d;
|
||||
@group(0) @binding(1)
|
||||
var s_depthmap: sampler;
|
||||
|
||||
@group(1) @binding(0)
|
||||
var<uniform> u_camera: CameraUniform;
|
||||
|
||||
@group(2) @binding(0)
|
||||
var<storage, read> u_lights: Lights;
|
||||
|
||||
@group(3) @binding(0)
|
||||
var<storage, read_write> u_light_indices: array<u32>;
|
||||
@group(3) @binding(1)
|
||||
var t_light_grid: texture_storage_2d<rg32uint, read_write>;
|
||||
@group(3) @binding(2)
|
||||
var<storage, read_write> u_light_index_counter: atomic<u32>;
|
||||
|
||||
@group(4) @binding(0)
|
||||
var<uniform> u_screen_size: vec2<u32>;
|
||||
|
||||
@compute
|
||||
@workgroup_size(16, 16, 1)
|
||||
fn cs_main(
|
||||
@builtin(local_invocation_id) local_invocation_id: vec3<u32>,
|
||||
@builtin(workgroup_id) workgroup_id: vec3<u32>,
|
||||
@builtin(global_invocation_id) global_invocation_id: vec3<u32>,
|
||||
@builtin(num_workgroups) num_workgroups: vec3<u32>,
|
||||
@builtin(local_invocation_index) local_invocation_index: u32,
|
||||
) {
|
||||
// Initialize some shared global values for depth and light count
|
||||
if (local_invocation_index == 0u) {
|
||||
wg_min_depth = 0xFFFFFFFu;
|
||||
wg_max_depth = 0u;
|
||||
wg_visible_light_count = 0u;
|
||||
}
|
||||
|
||||
workgroupBarrier();
|
||||
|
||||
// step 1: calculate the minimum and maximum depth values for this tile (using the depth map)
|
||||
var tex_coord = vec2<u32>(global_invocation_id.xy);
|
||||
var depth_float: f32 = textureLoad(t_depthmap, tex_coord, 0);
|
||||
// bitcast the floating depth to u32 for atomic comparisons between threads
|
||||
var depth_uint: u32 = bitcast<u32>(depth_float);
|
||||
|
||||
// step 2: find the minimum and max depth for this tile.
|
||||
// atomically update the workgroup depth
|
||||
atomicMin(&wg_min_depth, depth_uint);
|
||||
atomicMax(&wg_max_depth, depth_uint);
|
||||
|
||||
workgroupBarrier();
|
||||
|
||||
// convert them back into floats
|
||||
var min_depth: f32 = bitcast<f32>(wg_min_depth);
|
||||
var max_depth: f32 = bitcast<f32>(wg_max_depth);
|
||||
|
||||
// Create the frustum planes that will be used for this time
|
||||
if (local_invocation_index == 0u) {
|
||||
// this algorithm is adapted from Google's filament:
|
||||
// https://github.com/google/filament/blob/3644e7f80827f1cd2caef4a21e410a2243eb6e84/filament/src/Froxelizer.cpp#L402C57-L402C73
|
||||
let tile_width_clip_space = f32(2u * BLOCK_SIZE) / f32(u_screen_size.x);
|
||||
let tile_height_clip_space = f32(2u * BLOCK_SIZE) / f32(u_screen_size.y);
|
||||
|
||||
let tr_projection = transpose(u_camera.projection);
|
||||
|
||||
var planes: array<vec4f, 4>;
|
||||
|
||||
// left plane
|
||||
{
|
||||
let x = (f32(workgroup_id.x) * tile_width_clip_space) - 1.0;
|
||||
let p = tr_projection * vec4f(-1.0, 0.0, 0.0, x);
|
||||
planes[0] = -vec4f(normalize(p.xyz), 0.0);
|
||||
}
|
||||
|
||||
// right plane
|
||||
{
|
||||
let x = (f32(workgroup_id.x + 1u) * tile_width_clip_space) - 1.0;
|
||||
let p = tr_projection * vec4f(-1.0, 0.0, 0.0, x);
|
||||
planes[1] = vec4f(normalize(p.xyz), 0.0);
|
||||
}
|
||||
|
||||
// top plane
|
||||
{
|
||||
let y = (f32(workgroup_id.y) * tile_height_clip_space) - 1.0;
|
||||
let p = tr_projection * vec4f(0.0, 1.0, 0.0, y);
|
||||
planes[2] = -vec4f(normalize(p.xyz), 0.0);
|
||||
}
|
||||
|
||||
// bottom plane
|
||||
{
|
||||
let y = (f32(workgroup_id.y + 1u) * tile_height_clip_space) - 1.0;
|
||||
let p = tr_projection * vec4f(0.0, 1.0, 0.0, y);
|
||||
planes[3] = vec4f(normalize(p.xyz), 0.0);
|
||||
}
|
||||
|
||||
wg_frustum_planes[0] = Plane(planes[0].xyz, planes[0].w);
|
||||
wg_frustum_planes[1] = Plane(planes[1].xyz, planes[1].w);
|
||||
wg_frustum_planes[2] = Plane(planes[2].xyz, planes[2].w);
|
||||
wg_frustum_planes[3] = Plane(planes[3].xyz, planes[3].w);
|
||||
|
||||
wg_frustum_planes[4] = Plane(vec3f(0.0, 0.0, -1.0), -min_depth);
|
||||
wg_frustum_planes[5] = Plane(vec3f(0.0, 0.0, 1.0), -max_depth);
|
||||
}
|
||||
|
||||
workgroupBarrier();
|
||||
|
||||
// Step 3: cull lights
|
||||
|
||||
// Process the lights detecting which ones to cull for this tile.
|
||||
// Processes 256 lights simultaniously, each on a thread in the workgroup. Requires multiple
|
||||
// iterations for more lights.
|
||||
for (var i = local_invocation_index; i < u_lights.light_count; i += BLOCK_SIZE * BLOCK_SIZE) {
|
||||
let light_index = i;
|
||||
|
||||
let light = u_lights.data[light_index];
|
||||
|
||||
if (light.enabled == 1u) {
|
||||
let position_vs = (u_camera.view * vec4f(light.position, 1.0)).xyz;
|
||||
|
||||
if (light.light_ty == LIGHT_TY_DIRECTIONAL) {
|
||||
add_light(light_index);
|
||||
} else if (light.light_ty == LIGHT_TY_POINT
|
||||
&& sphere_inside_frustrum(wg_frustum_planes, position_vs, light.range)) {
|
||||
// TODO: add the light to the transparent geometry list
|
||||
|
||||
if (!sphere_inside_plane(position_vs, light.range, wg_frustum_planes[4])) {
|
||||
add_light(light_index);
|
||||
}
|
||||
} else if (light.light_ty == LIGHT_TY_SPOT) {
|
||||
let dir_vs = (u_camera.view * vec4f(light.direction, 1.0)).xyz;
|
||||
let cone_radius = tan(light.spot_cutoff) * light.range;
|
||||
let cone = Cone(position_vs, light.range, dir_vs, cone_radius);
|
||||
|
||||
if (cone_inside_frustum(cone, wg_frustum_planes)) {
|
||||
// TODO: add the light to the transparent geometry list
|
||||
|
||||
add_light(light_index);
|
||||
if (!cone_inside_plane(cone, wg_frustum_planes[4])) {
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
workgroupBarrier();
|
||||
|
||||
// Update the global memory with the visible light buffer.
|
||||
|
||||
// first update the light grid on the first thread
|
||||
if (local_invocation_index == 0u) {
|
||||
wg_light_index_start = atomicAdd(&u_light_index_counter, wg_visible_light_count);
|
||||
textureStore(t_light_grid, workgroup_id.xy, vec4<u32>(wg_light_index_start, wg_visible_light_count, 0u, 1u));
|
||||
|
||||
// TODO: store light grid for transparent geometry
|
||||
}
|
||||
|
||||
workgroupBarrier();
|
||||
|
||||
// now update the light index list on all threads.
|
||||
for (var i = local_invocation_index; i < wg_visible_light_count; i += BLOCK_SIZE * BLOCK_SIZE) {
|
||||
u_light_indices[wg_light_index_start + i] = wg_visible_light_indices[i];
|
||||
}
|
||||
}
|
||||
|
||||
/// Add a light to the visible light indicies list.
|
||||
/// Returns a boolean indicating if the light was added.
|
||||
fn add_light(light_index: u32) -> bool {
|
||||
//var offset: u32 = wg_visible_light_count;
|
||||
|
||||
if (wg_visible_light_count < MAX_TILE_VISIBLE_LIGHTS) {
|
||||
let offset = atomicAdd(&wg_visible_light_count, 1u);
|
||||
wg_visible_light_indices[offset] = light_index;
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
fn sphere_inside_frustrum(frustum: array<Plane, 6>, sphere_origin: vec3f, radius: f32) -> bool {
|
||||
// to be able to index this array with a non-const value,
|
||||
// it must be defined as a var
|
||||
var frustum_v = frustum;
|
||||
|
||||
// only check the sides of the frustum
|
||||
for (var i = 0u; i < 4u; i++) {
|
||||
if (sphere_inside_plane(sphere_origin, radius, frustum_v[i])) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/// Check if the sphere is fully behind (i.e., inside the negative half-space of) a plane.
|
||||
///
|
||||
/// Source: Real-time collision detection, Christer Ericson (2005)
|
||||
/// (https://www.3dgep.com/forward-plus/#light-culling-compute-shader)
|
||||
fn sphere_inside_plane(sphere_origin: vec3f, radius: f32, plane: Plane) -> bool {
|
||||
return dot(plane.normal, sphere_origin) - plane.origin_distance < -radius;
|
||||
}
|
||||
|
||||
fn clip_to_view(clip: vec4f) -> vec4f {
|
||||
// view space position
|
||||
var view = u_camera.inverse_projection * clip;
|
||||
|
||||
// perspective projection
|
||||
return view / view.w;
|
||||
}
|
||||
|
||||
fn screen_to_view(screen: vec4f) -> vec4f {
|
||||
// convert to normalized texture coordinates
|
||||
let tex_coord = screen.xy / vec2<f32>(u_screen_size);
|
||||
|
||||
// convert to clip space
|
||||
let clip = vec4f( vec2<f32>(tex_coord.x, 1.0 - tex_coord.y) * 2.0 - 1.0, screen.z, screen.w);
|
||||
|
||||
return clip_to_view(clip);
|
||||
}
|
||||
|
||||
/// Compute a plane from 3 noncollinear points that form a triangle.
|
||||
/// This equation assumes a right-handed (counter-clockwise winding order)
|
||||
/// coordinate system to determine the direction of the plane normal.
|
||||
fn compute_plane(p0: vec3f, p1: vec3f, p2: vec3f) -> Plane {
|
||||
let v0 = p1 - p0;
|
||||
let v2 = p2 - p0;
|
||||
|
||||
let normal = vec4f(normalize(cross(v0, v2)), 0.0);
|
||||
|
||||
// find the distance to the origin
|
||||
let distance = dot(normal.xyz, p0);
|
||||
|
||||
return Plane(normal.xyz, distance);
|
||||
}
|
||||
|
||||
fn point_inside_plane(point: vec3f, plane: Plane) -> bool {
|
||||
return dot(plane.normal, point) + plane.origin_distance < 0.0;
|
||||
}
|
||||
|
||||
fn point_intersect_plane(point: vec3f, plane: Plane) -> f32 {
|
||||
return dot(plane.normal, point) + plane.origin_distance;
|
||||
}
|
||||
|
||||
/// Check to see if a cone if fully behind (inside the negative halfspace of) a plane.
|
||||
///
|
||||
/// Source: Real-time collision detection, Christer Ericson (2005)
|
||||
/// (https://www.3dgep.com/forward-plus/#light-culling-compute-shader)
|
||||
fn cone_inside_plane(cone: Cone, plane: Plane) -> bool {
|
||||
let dir = cone.direction;
|
||||
let furthest_direction = cross(cross(plane.normal, dir), dir);
|
||||
let furthest = cone.tip + dir * cone.height - furthest_direction * cone.radius;
|
||||
|
||||
// The cone is in the negative halfspace of the plane if the tip of the cone,
|
||||
// and the farthest point on the end of the cone are inside the negative halfspace
|
||||
// of the plane.
|
||||
return point_inside_plane(cone.tip, plane) && point_inside_plane(furthest, plane);
|
||||
}
|
||||
|
||||
fn cone_inside_frustum(cone: Cone, frustum_in: array<Plane, 6>) -> bool {
|
||||
var frustum = frustum_in;
|
||||
for (var i = 0u; i < 4u; i++) {
|
||||
// TODO: better cone checking
|
||||
if (sphere_inside_plane(cone.tip, cone.radius, frustum[i])) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
|
@ -1,19 +0,0 @@
|
|||
#define_module lyra::shadows::bindings
|
||||
#import lyra::shadows::structs::{ShadowSettingsUniform, LightShadowMapUniform}
|
||||
|
||||
@group(5) @binding(0)
|
||||
var t_shadow_maps_atlas: texture_depth_2d;
|
||||
@group(5) @binding(1)
|
||||
var s_shadow_maps_atlas: sampler;
|
||||
@group(5) @binding(2)
|
||||
var s_shadow_maps_atlas_compare: sampler_comparison;
|
||||
@group(5) @binding(3)
|
||||
var<uniform> u_shadow_settings: ShadowSettingsUniform;
|
||||
@group(5) @binding(4)
|
||||
var<storage, read> u_light_shadow: array<LightShadowMapUniform>;
|
||||
@group(5) @binding(5)
|
||||
var<storage, read> u_pcf_poisson_disc: array<vec2<f32>>;
|
||||
@group(5) @binding(6)
|
||||
var<storage, read> u_pcf_poisson_disc_3d: array<vec3<f32>>;
|
||||
@group(5) @binding(7)
|
||||
var<storage, read> u_pcss_poisson_disc: array<vec2<f32>>;
|
|
@ -1,352 +0,0 @@
|
|||
#define_module lyra::shadows::calc
|
||||
#import lyra::shadows::structs::{ShadowSettingsUniform, LightShadowMapUniform}
|
||||
#import lyra::shadows::bindings::{t_shadow_maps_atlas, s_shadow_maps_atlas, s_shadow_maps_atlas_compare, u_shadow_settings, u_light_shadow, u_pcf_poisson_disc, u_pcss_poisson_disc}
|
||||
|
||||
/// Convert 3d coords for an unwrapped cubemap to 2d coords and a side index of the cube map.
|
||||
///
|
||||
/// The `xy` components are the 2d coordinates in the side of the cube, and `z` is the cube
|
||||
/// map side index.
|
||||
///
|
||||
/// Cube map index results:
|
||||
/// 0 -> UNKNOWN
|
||||
/// 1 -> right
|
||||
/// 2 -> left
|
||||
/// 3 -> top
|
||||
/// 4 -> bottom
|
||||
/// 5 -> near
|
||||
/// 6 -> far
|
||||
fn coords_to_cube_atlas(tex_coord: vec3<f32>) -> vec3<f32> {
|
||||
let abs_x = abs(tex_coord.x);
|
||||
let abs_y = abs(tex_coord.y);
|
||||
let abs_z = abs(tex_coord.z);
|
||||
|
||||
var major_axis: f32 = 0.0;
|
||||
var cube_idx: i32 = 0;
|
||||
var res = vec2<f32>(0.0);
|
||||
|
||||
// Determine the dominant axis
|
||||
if (abs_x >= abs_y && abs_x >= abs_z) {
|
||||
major_axis = tex_coord.x;
|
||||
if (tex_coord.x > 0.0) {
|
||||
cube_idx = 1;
|
||||
res = vec2<f32>(-tex_coord.z, -tex_coord.y);
|
||||
} else {
|
||||
cube_idx = 2;
|
||||
res = vec2<f32>(tex_coord.z, -tex_coord.y);
|
||||
}
|
||||
} else if (abs_y >= abs_x && abs_y >= abs_z) {
|
||||
major_axis = tex_coord.y;
|
||||
if (tex_coord.y > 0.0) {
|
||||
cube_idx = 3;
|
||||
res = vec2<f32>(tex_coord.x, tex_coord.z);
|
||||
} else {
|
||||
cube_idx = 4;
|
||||
res = vec2<f32>(tex_coord.x, -tex_coord.z);
|
||||
}
|
||||
} else {
|
||||
major_axis = tex_coord.z;
|
||||
if (tex_coord.z > 0.0) {
|
||||
cube_idx = 5;
|
||||
res = vec2<f32>(tex_coord.x, -tex_coord.y);
|
||||
} else {
|
||||
cube_idx = 6;
|
||||
res = vec2<f32>(-tex_coord.x, -tex_coord.y);
|
||||
}
|
||||
}
|
||||
|
||||
res = (res / abs(major_axis) + 1.0) * 0.5;
|
||||
res.y = 1.0 - res.y;
|
||||
|
||||
return vec3<f32>(res, f32(cube_idx));
|
||||
}
|
||||
|
||||
/// Get shadow settings for a light.
|
||||
/// Returns x as `pcf_samples_num` and y as `pcss_blocker_search_samples`.
|
||||
fn get_shadow_settings(shadow_u: LightShadowMapUniform) -> vec2<u32> {
|
||||
if shadow_u.has_shadow_settings == 1u {
|
||||
return vec2<u32>(shadow_u.pcf_samples_num, shadow_u.pcss_blocker_search_samples);
|
||||
} else {
|
||||
return vec2<u32>(u_shadow_settings.pcf_samples_num, u_shadow_settings.pcss_blocker_search_samples);
|
||||
}
|
||||
}
|
||||
|
||||
fn calc_shadow_dir_light(world_pos: vec3<f32>, world_normal: vec3<f32>, light_dir: vec3<f32>, light: Light) -> f32 {
|
||||
let map_data: LightShadowMapUniform = u_light_shadow[light.light_shadow_uniform_index[0]];
|
||||
let frag_pos_light_space = map_data.light_space_matrix * vec4<f32>(world_pos, 1.0);
|
||||
|
||||
var proj_coords = frag_pos_light_space.xyz / frag_pos_light_space.w;
|
||||
// for some reason the y component is flipped after transforming
|
||||
proj_coords.y = -proj_coords.y;
|
||||
|
||||
// Remap xy to [0.0, 1.0]
|
||||
let xy_remapped = proj_coords.xy * 0.5 + 0.5;
|
||||
|
||||
// use a bias to avoid shadow acne
|
||||
let current_depth = proj_coords.z - map_data.constant_depth_bias;
|
||||
|
||||
// get settings
|
||||
let settings = get_shadow_settings(map_data);
|
||||
let pcf_samples_num = settings.x;
|
||||
let pcss_blocker_search_samples = settings.y;
|
||||
|
||||
var shadow = 0.0;
|
||||
// hardware 2x2 PCF via camparison sampler
|
||||
if pcf_samples_num == 2u {
|
||||
let region_coords = to_atlas_frame_coords(map_data, xy_remapped, false);
|
||||
shadow = textureSampleCompareLevel(t_shadow_maps_atlas, s_shadow_maps_atlas_compare, region_coords, current_depth);
|
||||
}
|
||||
// PCSS
|
||||
else if pcf_samples_num > 0u && pcss_blocker_search_samples > 0u {
|
||||
shadow = pcss_dir_light(xy_remapped, current_depth, map_data);
|
||||
}
|
||||
// only PCF
|
||||
else if pcf_samples_num > 0u {
|
||||
let texel_size = 1.0 / f32(map_data.atlas_frame.width);
|
||||
shadow = pcf_dir_light(xy_remapped, current_depth, map_data, texel_size);
|
||||
}
|
||||
// no filtering
|
||||
else {
|
||||
let region_coords = to_atlas_frame_coords(map_data, xy_remapped, false);
|
||||
let closest_depth = textureSampleLevel(t_shadow_maps_atlas, s_shadow_maps_atlas, region_coords, 0.0);
|
||||
shadow = select(1.0, 0.0, current_depth > closest_depth);
|
||||
}
|
||||
|
||||
// dont cast shadows outside the light's far plane
|
||||
if (proj_coords.z > 1.0) {
|
||||
shadow = 1.0;
|
||||
}
|
||||
|
||||
// dont cast shadows if the texture coords would go past the shadow maps
|
||||
if (xy_remapped.x > 1.0 || xy_remapped.x < 0.0 || xy_remapped.y > 1.0 || xy_remapped.y < 0.0) {
|
||||
shadow = 1.0;
|
||||
}
|
||||
|
||||
return shadow;
|
||||
}
|
||||
|
||||
// Comes from https://developer.download.nvidia.com/whitepapers/2008/PCSS_Integration.pdf
|
||||
fn search_width(light_near: f32, uv_light_size: f32, receiver_depth: f32) -> f32 {
|
||||
return uv_light_size * (receiver_depth - light_near) / receiver_depth;
|
||||
}
|
||||
|
||||
/// Convert texture coords to be texture coords of an atlas frame.
|
||||
///
|
||||
/// If `safety_offset` is true, the frame will be shrank by a tiny amount to avoid bleeding
|
||||
/// into adjacent frames from fiiltering.
|
||||
fn to_atlas_frame_coords(shadow_u: LightShadowMapUniform, coords: vec2<f32>, safety_offset: bool) -> vec2<f32> {
|
||||
let atlas_dimensions = textureDimensions(t_shadow_maps_atlas);
|
||||
|
||||
// get the rect of the frame as a vec4
|
||||
var region_rect = vec4<f32>(f32(shadow_u.atlas_frame.x), f32(shadow_u.atlas_frame.y),
|
||||
f32(shadow_u.atlas_frame.width), f32(shadow_u.atlas_frame.height));
|
||||
// put the frame rect in atlas UV space
|
||||
region_rect /= f32(atlas_dimensions.x);
|
||||
|
||||
// if safety_offset is true, calculate a relatively tiny offset to avoid getting the end of
|
||||
// the frame and causing linear or nearest filtering to bleed to the adjacent frame.
|
||||
let texel_size = select(0.0, (1.0 / f32(shadow_u.atlas_frame.x)) * 4.0, safety_offset);
|
||||
|
||||
// lerp input coords
|
||||
let region_coords = vec2<f32>(
|
||||
mix(region_rect.x + texel_size, region_rect.x + region_rect.z - texel_size, coords.x),
|
||||
mix(region_rect.y + texel_size, region_rect.y + region_rect.w - texel_size, coords.y)
|
||||
);
|
||||
|
||||
return region_coords;
|
||||
}
|
||||
|
||||
/// Find the average blocker distance for a directiona llight
|
||||
fn find_blocker_distance_dir_light(tex_coords: vec2<f32>, receiver_depth: f32, bias: f32, shadow_u: LightShadowMapUniform) -> vec2<f32> {
|
||||
let search_width = search_width(shadow_u.near_plane, shadow_u.light_size_uv, receiver_depth);
|
||||
|
||||
var blockers = 0;
|
||||
var avg_dist = 0.0;
|
||||
let samples = i32(u_shadow_settings.pcss_blocker_search_samples);
|
||||
for (var i = 0; i < samples; i++) {
|
||||
let offset_coords = tex_coords + u_pcss_poisson_disc[i] * search_width;
|
||||
let new_coords = to_atlas_frame_coords(shadow_u, offset_coords, false);
|
||||
let z = textureSampleLevel(t_shadow_maps_atlas, s_shadow_maps_atlas, new_coords, 0.0);
|
||||
|
||||
if z < (receiver_depth - bias) {
|
||||
blockers += 1;
|
||||
avg_dist += z;
|
||||
}
|
||||
}
|
||||
|
||||
let b = f32(blockers);
|
||||
return vec2<f32>(avg_dist / b, b);
|
||||
}
|
||||
|
||||
fn pcss_dir_light(tex_coords: vec2<f32>, receiver_depth: f32, shadow_u: LightShadowMapUniform) -> f32 {
|
||||
let blocker_search = find_blocker_distance_dir_light(tex_coords, receiver_depth, 0.0, shadow_u);
|
||||
|
||||
// If no blockers were found, exit now to save in filtering
|
||||
if blocker_search.y == 0.0 {
|
||||
return 1.0;
|
||||
}
|
||||
let blocker_depth = blocker_search.x;
|
||||
|
||||
// penumbra estimation
|
||||
let penumbra_width = (receiver_depth - blocker_depth) / blocker_depth;
|
||||
|
||||
// PCF
|
||||
let uv_radius = penumbra_width * shadow_u.light_size_uv * shadow_u.near_plane / receiver_depth;
|
||||
return pcf_dir_light(tex_coords, receiver_depth, shadow_u, uv_radius);
|
||||
}
|
||||
|
||||
/// Calculate the shadow coefficient using PCF of a directional light
|
||||
fn pcf_dir_light(tex_coords: vec2<f32>, test_depth: f32, shadow_u: LightShadowMapUniform, uv_radius: f32) -> f32 {
|
||||
var shadow = 0.0;
|
||||
let samples_num = i32(u_shadow_settings.pcf_samples_num);
|
||||
for (var i = 0; i < samples_num; i++) {
|
||||
let offset = tex_coords + u_pcf_poisson_disc[i] * uv_radius;
|
||||
let new_coords = to_atlas_frame_coords(shadow_u, offset, false);
|
||||
|
||||
shadow += textureSampleCompare(t_shadow_maps_atlas, s_shadow_maps_atlas_compare, new_coords, test_depth);
|
||||
}
|
||||
shadow /= f32(samples_num);
|
||||
|
||||
// clamp shadow to [0; 1]
|
||||
return saturate(shadow);
|
||||
}
|
||||
|
||||
fn calc_shadow_point_light(world_pos: vec3<f32>, world_normal: vec3<f32>, light_dir: vec3<f32>, light: Light) -> f32 {
|
||||
var frag_to_light = world_pos - light.position;
|
||||
let temp = coords_to_cube_atlas(normalize(frag_to_light));
|
||||
var coords_2d = temp.xy;
|
||||
let cube_idx = i32(temp.z);
|
||||
|
||||
var indices = light.light_shadow_uniform_index;
|
||||
let i = indices[cube_idx - 1];
|
||||
let u: LightShadowMapUniform = u_light_shadow[i];
|
||||
|
||||
let uniforms = array<LightShadowMapUniform, 6>(
|
||||
u_light_shadow[indices[0]],
|
||||
u_light_shadow[indices[1]],
|
||||
u_light_shadow[indices[2]],
|
||||
u_light_shadow[indices[3]],
|
||||
u_light_shadow[indices[4]],
|
||||
u_light_shadow[indices[5]]
|
||||
);
|
||||
|
||||
var current_depth = length(frag_to_light);
|
||||
current_depth /= u.far_plane;
|
||||
current_depth -= u.constant_depth_bias;
|
||||
|
||||
// get settings
|
||||
let settings = get_shadow_settings(u);
|
||||
let pcf_samples_num = settings.x;
|
||||
let pcss_blocker_search_samples = settings.y;
|
||||
|
||||
var shadow = 0.0;
|
||||
// hardware 2x2 PCF via camparison sampler
|
||||
if pcf_samples_num == 2u {
|
||||
let region_coords = to_atlas_frame_coords(u, coords_2d, true);
|
||||
shadow = textureSampleCompareLevel(t_shadow_maps_atlas, s_shadow_maps_atlas_compare, region_coords, current_depth);
|
||||
}
|
||||
// PCSS
|
||||
else if pcf_samples_num > 0u && pcss_blocker_search_samples > 0u {
|
||||
shadow = pcss_dir_light(coords_2d, current_depth, u);
|
||||
}
|
||||
// only PCF
|
||||
else if pcf_samples_num > 0u {
|
||||
let texel_size = 1.0 / f32(u.atlas_frame.width);
|
||||
shadow = pcf_point_light(frag_to_light, current_depth, uniforms, pcf_samples_num, 0.007);
|
||||
//shadow = pcf_point_light(coords_2d, current_depth, u, pcf_samples_num, texel_size);
|
||||
}
|
||||
// no filtering
|
||||
else {
|
||||
let region_coords = to_atlas_frame_coords(u, coords_2d, true);
|
||||
let closest_depth = textureSampleLevel(t_shadow_maps_atlas, s_shadow_maps_atlas, region_coords, 0.0);
|
||||
shadow = select(1.0, 0.0, current_depth > closest_depth);
|
||||
}
|
||||
|
||||
return shadow;
|
||||
}
|
||||
|
||||
/// Calculate the shadow coefficient using PCF of a directional light
|
||||
fn pcf_point_light(tex_coords: vec3<f32>, test_depth: f32, shadow_us: array<LightShadowMapUniform, 6>, samples_num: u32, uv_radius: f32) -> f32 {
|
||||
var shadow_unis = shadow_us;
|
||||
|
||||
var shadow = 0.0;
|
||||
for (var i = 0; i < i32(samples_num); i++) {
|
||||
var temp = coords_to_cube_atlas(tex_coords);
|
||||
var coords_2d = temp.xy;
|
||||
var cube_idx = i32(temp.z);
|
||||
var shadow_u = shadow_unis[cube_idx - 1];
|
||||
|
||||
coords_2d += u_pcf_poisson_disc[i] * uv_radius;
|
||||
|
||||
let new_coords = to_atlas_frame_coords(shadow_u, coords_2d, true);
|
||||
shadow += textureSampleCompare(t_shadow_maps_atlas, s_shadow_maps_atlas_compare, new_coords, test_depth);
|
||||
}
|
||||
shadow /= f32(samples_num);
|
||||
|
||||
// clamp shadow to [0; 1]
|
||||
return saturate(shadow);
|
||||
}
|
||||
|
||||
fn calc_shadow_spot_light(world_pos: vec3<f32>, world_normal: vec3<f32>, light_dir: vec3<f32>, light: Light) -> f32 {
|
||||
let map_data: LightShadowMapUniform = u_light_shadow[light.light_shadow_uniform_index[0]];
|
||||
let frag_pos_light_space = map_data.light_space_matrix * vec4<f32>(world_pos, 1.0);
|
||||
|
||||
var proj_coords = frag_pos_light_space.xyz / frag_pos_light_space.w;
|
||||
// for some reason the y component is flipped after transforming
|
||||
proj_coords.y = -proj_coords.y;
|
||||
|
||||
// Remap xy to [0.0, 1.0]
|
||||
let xy_remapped = proj_coords.xy * 0.5 + 0.5;
|
||||
|
||||
// use a bias to avoid shadow acne
|
||||
let current_depth = proj_coords.z - map_data.constant_depth_bias;
|
||||
|
||||
// get settings
|
||||
let settings = get_shadow_settings(map_data);
|
||||
let pcf_samples_num = settings.x;
|
||||
let pcss_blocker_search_samples = settings.y;
|
||||
|
||||
var shadow = 0.0;
|
||||
// hardware 2x2 PCF via camparison sampler
|
||||
if pcf_samples_num == 2u {
|
||||
let region_coords = to_atlas_frame_coords(map_data, xy_remapped, false);
|
||||
shadow = textureSampleCompareLevel(t_shadow_maps_atlas, s_shadow_maps_atlas_compare, region_coords, current_depth);
|
||||
}
|
||||
// only PCF is supported for spot lights
|
||||
else if pcf_samples_num > 0u {
|
||||
let texel_size = 1.0 / f32(map_data.atlas_frame.width);
|
||||
shadow = pcf_spot_light(xy_remapped, current_depth, map_data, i32(pcf_samples_num), texel_size);
|
||||
}
|
||||
// no filtering
|
||||
else {
|
||||
let region_coords = to_atlas_frame_coords(map_data, xy_remapped, false);
|
||||
let closest_depth = textureSampleLevel(t_shadow_maps_atlas, s_shadow_maps_atlas, region_coords, 0.0);
|
||||
shadow = select(1.0, 0.0, current_depth > closest_depth);
|
||||
}
|
||||
|
||||
// dont cast shadows outside the light's far plane
|
||||
if (proj_coords.z > 1.0) {
|
||||
shadow = 1.0;
|
||||
}
|
||||
|
||||
// dont cast shadows if the texture coords would go past the shadow maps
|
||||
if (xy_remapped.x > 1.0 || xy_remapped.x < 0.0 || xy_remapped.y > 1.0 || xy_remapped.y < 0.0) {
|
||||
shadow = 1.0;
|
||||
}
|
||||
|
||||
return shadow;
|
||||
}
|
||||
|
||||
/// Calculate the shadow coefficient using PCF of a directional light
|
||||
fn pcf_spot_light(tex_coords: vec2<f32>, test_depth: f32, shadow_u: LightShadowMapUniform, samples_num: i32, uv_radius: f32) -> f32 {
|
||||
var shadow = 0.0;
|
||||
for (var i = 0; i < samples_num; i++) {
|
||||
let offset = tex_coords + u_pcf_poisson_disc[i] * uv_radius;
|
||||
let new_coords = to_atlas_frame_coords(shadow_u, offset, false);
|
||||
|
||||
shadow += textureSampleCompare(t_shadow_maps_atlas, s_shadow_maps_atlas_compare, new_coords, test_depth);
|
||||
}
|
||||
shadow /= f32(samples_num);
|
||||
|
||||
// clamp shadow to [0; 1]
|
||||
return saturate(shadow);
|
||||
}
|
|
@ -1,48 +0,0 @@
|
|||
#define_module lyra::shadows::depth_pass
|
||||
#import lyra::shadows::structs::{LightShadowMapUniform}
|
||||
|
||||
struct TransformData {
|
||||
transform: mat4x4<f32>,
|
||||
normal_matrix: mat4x4<f32>,
|
||||
}
|
||||
|
||||
@group(0) @binding(0)
|
||||
var<storage, read> u_light_shadow: array<LightShadowMapUniform>;
|
||||
@group(1) @binding(0)
|
||||
var<uniform> u_model_transform_data: TransformData;
|
||||
|
||||
struct VertexOutput {
|
||||
@builtin(position)
|
||||
clip_position: vec4<f32>,
|
||||
@location(0) world_pos: vec3<f32>,
|
||||
@location(1) instance_index: u32,
|
||||
}
|
||||
|
||||
@vertex
|
||||
fn vs_main(
|
||||
@location(0) position: vec3<f32>,
|
||||
@builtin(instance_index) instance_index: u32,
|
||||
) -> VertexOutput {
|
||||
let world_pos = u_model_transform_data.transform * vec4<f32>(position, 1.0);
|
||||
let pos = u_light_shadow[instance_index].light_space_matrix * world_pos;
|
||||
return VertexOutput(pos, world_pos.xyz, instance_index);
|
||||
}
|
||||
|
||||
struct FragmentOutput {
|
||||
@builtin(frag_depth) depth: f32,
|
||||
}
|
||||
|
||||
/// Fragment shader used for point lights (or other perspective lights) to create linear depth
|
||||
@fragment
|
||||
fn fs_point_light_main(
|
||||
in: VertexOutput
|
||||
) -> FragmentOutput {
|
||||
let u = u_light_shadow[in.instance_index];
|
||||
|
||||
var light_dis = length(in.world_pos - u.light_pos);
|
||||
|
||||
// map to [0; 1] range by dividing by far plane
|
||||
light_dis = light_dis / u.far_plane;
|
||||
|
||||
return FragmentOutput(light_dis);
|
||||
}
|
|
@ -1,29 +0,0 @@
|
|||
#define_module lyra::shadows::structs
|
||||
|
||||
struct TextureAtlasFrame {
|
||||
/*offset: vec2<u32>,
|
||||
size: vec2<u32>,*/
|
||||
x: u32,
|
||||
y: u32,
|
||||
width: u32,
|
||||
height: u32,
|
||||
}
|
||||
|
||||
struct LightShadowMapUniform {
|
||||
light_space_matrix: mat4x4<f32>,
|
||||
atlas_frame: TextureAtlasFrame,
|
||||
near_plane: f32,
|
||||
far_plane: f32,
|
||||
light_size_uv: f32,
|
||||
light_pos: vec3<f32>,
|
||||
/// boolean casted as u32
|
||||
has_shadow_settings: u32,
|
||||
pcf_samples_num: u32,
|
||||
pcss_blocker_search_samples: u32,
|
||||
constant_depth_bias: f32,
|
||||
}
|
||||
|
||||
struct ShadowSettingsUniform {
|
||||
pcf_samples_num: u32,
|
||||
pcss_blocker_search_samples: u32,
|
||||
}
|
|
@ -1,32 +0,0 @@
|
|||
@group(0) @binding(0)
|
||||
var t_screen: texture_2d<f32>;
|
||||
@group(0) @binding(1)
|
||||
var s_screen: sampler;
|
||||
|
||||
struct VertexOutput {
|
||||
@builtin(position)
|
||||
clip_position: vec4<f32>,
|
||||
@location(0)
|
||||
tex_coords: vec2<f32>,
|
||||
}
|
||||
|
||||
@vertex
|
||||
fn vs_main(
|
||||
@builtin(vertex_index) vertex_index: u32,
|
||||
) -> VertexOutput {
|
||||
let tex_coords = vec2<f32>(f32(vertex_index >> 1u), f32(vertex_index & 1u)) * 2.0;
|
||||
let clip_position = vec4<f32>(tex_coords * vec2<f32>(2.0, -2.0) + vec2<f32>(-1.0, 1.0), 0.0, 1.0);
|
||||
|
||||
return VertexOutput(clip_position, tex_coords);
|
||||
}
|
||||
|
||||
@fragment
|
||||
fn fs_main(in: VertexOutput) -> @location(0) vec4<f32> {
|
||||
let resolution = vec2<f32>(textureDimensions(t_screen));
|
||||
let inverse_screen_size = 1.0 / resolution.xy;
|
||||
let tex_coords = in.clip_position.xy * inverse_screen_size;
|
||||
|
||||
var rgb: vec3<f32> = textureSample(t_screen, s_screen, tex_coords).xyz;
|
||||
rgb *= vec3<f32>(1.0, 0.2, 0.2);
|
||||
return vec4<f32>(rgb, 1.0);
|
||||
}
|
|
@ -1,22 +0,0 @@
|
|||
struct VertexOutput {
|
||||
@builtin(position) clip_position: vec4<f32>,
|
||||
};
|
||||
|
||||
@group(0) @binding(0)
|
||||
var<uniform> u_triangle_color: vec4<f32>;
|
||||
|
||||
@vertex
|
||||
fn vs_main(
|
||||
@builtin(vertex_index) in_vertex_index: u32,
|
||||
) -> VertexOutput {
|
||||
var out: VertexOutput;
|
||||
let x = f32(1 - i32(in_vertex_index)) * 0.5;
|
||||
let y = f32(i32(in_vertex_index & 1u) * 2 - 1) * 0.5;
|
||||
out.clip_position = vec4<f32>(x, y, 0.0, 1.0);
|
||||
return out;
|
||||
}
|
||||
|
||||
@fragment
|
||||
fn fs_main(in: VertexOutput) -> @location(0) vec4<f32> {
|
||||
return vec4<f32>(u_triangle_color.xyz, 1.0);
|
||||
}
|
|
@ -1,149 +0,0 @@
|
|||
use std::{collections::VecDeque, marker::PhantomData, mem, sync::Arc};
|
||||
|
||||
/// A buffer on the GPU that has persistent indices.
|
||||
///
|
||||
/// `GpuSlotBuffer` allocates a buffer on the GPU and keeps stable indices of elements and
|
||||
/// reuses ones that were removed. It supports aligned buffers with [`GpuSlotBuffer::new_aligned`],
|
||||
/// as well as unaligned buffers with [`GpuSlotBuffer::new`].
|
||||
pub struct GpuSlotBuffer<T: bytemuck::Pod + bytemuck::Zeroable> {
|
||||
/// The amount of elements that can fit in the buffer.
|
||||
capacity: u64,
|
||||
/// The ending point of the buffer elements.
|
||||
len: u64,
|
||||
/// The list of dead and reusable indices in the buffer.
|
||||
dead_indices: VecDeque<u64>,
|
||||
/// The optional alignment of elements in the buffer.
|
||||
alignment: Option<u64>,
|
||||
/// The actual gpu buffer
|
||||
buffer: Arc<wgpu::Buffer>,
|
||||
_marker: PhantomData<T>,
|
||||
}
|
||||
|
||||
impl<T: bytemuck::Pod + bytemuck::Zeroable> GpuSlotBuffer<T> {
|
||||
/// Create a new GpuSlotBuffer with unaligned elements.
|
||||
///
|
||||
/// See [`GpuSlotBuffer::new_aligned`].
|
||||
pub fn new(device: &wgpu::Device, label: Option<&str>, usage: wgpu::BufferUsages, capacity: u64) -> Self {
|
||||
Self::new_impl(device, label, usage, capacity, None)
|
||||
}
|
||||
|
||||
/// Create a new buffer with **aligned** elements.
|
||||
///
|
||||
/// See [`GpuSlotBuffer::new`].
|
||||
pub fn new_aligned(device: &wgpu::Device, label: Option<&str>, usage: wgpu::BufferUsages, capacity: u64, alignment: u64) -> Self {
|
||||
Self::new_impl(device, label, usage, capacity, Some(alignment))
|
||||
}
|
||||
|
||||
fn new_impl(device: &wgpu::Device, label: Option<&str>, usage: wgpu::BufferUsages, capacity: u64, alignment: Option<u64>) -> Self {
|
||||
let buffer = Arc::new(device.create_buffer(&wgpu::BufferDescriptor {
|
||||
label,
|
||||
size: capacity * mem::size_of::<T>() as u64,
|
||||
usage,
|
||||
mapped_at_creation: false,
|
||||
}));
|
||||
|
||||
Self {
|
||||
capacity,
|
||||
len: 0,
|
||||
dead_indices: VecDeque::default(),
|
||||
buffer,
|
||||
alignment,
|
||||
_marker: PhantomData
|
||||
}
|
||||
}
|
||||
|
||||
/// Calculates the byte offset in the buffer of the element at `i`.
|
||||
pub fn offset_of(&self, i: u64) -> u64 {
|
||||
if let Some(align) = self.alignment {
|
||||
let transform_index = i % self.capacity;
|
||||
transform_index * align
|
||||
} else {
|
||||
i * mem::size_of::<T>() as u64
|
||||
}
|
||||
}
|
||||
|
||||
/// Set an element at `i` in the buffer to `val`.
|
||||
pub fn set_at(&self, queue: &wgpu::Queue, i: u64, val: &T) {
|
||||
let offset = self.offset_of(i);
|
||||
queue.write_buffer(&self.buffer, offset, bytemuck::bytes_of(val));
|
||||
}
|
||||
|
||||
/// Attempt to insert an element to the GPU buffer, returning the index it was inserted at.
|
||||
///
|
||||
/// Returns `None` when the buffer has no space to fit the element.
|
||||
pub fn try_insert(&mut self, queue: &wgpu::Queue, val: &T) -> Option<u64> {
|
||||
// reuse a dead index or get the next one
|
||||
let i = match self.dead_indices.pop_front() {
|
||||
Some(i) => i,
|
||||
None => {
|
||||
if self.len == self.capacity {
|
||||
return None;
|
||||
}
|
||||
|
||||
let i = self.len;
|
||||
self.len += 1;
|
||||
i
|
||||
}
|
||||
};
|
||||
|
||||
self.set_at(queue, i, val);
|
||||
|
||||
Some(i)
|
||||
}
|
||||
|
||||
/// Insert an element to the GPU buffer, returning the index it was inserted at.
|
||||
///
|
||||
/// The index is not guaranteed to be the end of the buffer since this structure reuses
|
||||
/// indices after they're removed.
|
||||
///
|
||||
/// # Panics
|
||||
/// Panics if the buffer does not have space to fit `val`, see [`GpuSlotBuffer::try_insert`].
|
||||
pub fn insert(&mut self, queue: &wgpu::Queue, val: &T) -> u64 {
|
||||
self.try_insert(queue, val)
|
||||
.expect("GPU slot buffer ran out of slots to push elements into")
|
||||
}
|
||||
|
||||
/// Remove the element at `i`, clearing the elements slot in the buffer.
|
||||
///
|
||||
/// If you do not care that the slot in the buffer is emptied, use
|
||||
/// [`GpuSlotBuffer::remove_quick`].
|
||||
pub fn remove(&mut self, queue: &wgpu::Queue, i: u64) {
|
||||
let mut zeros = Vec::new();
|
||||
zeros.resize(mem::size_of::<T>(), 0);
|
||||
|
||||
let offset = self.offset_of(i);
|
||||
queue.write_buffer(&self.buffer, offset, bytemuck::cast_slice(zeros.as_slice()));
|
||||
self.dead_indices.push_back(i);
|
||||
}
|
||||
|
||||
/// Remove the element at `i` without clearing its space in the buffer.
|
||||
///
|
||||
/// If you want to ensure that the slot in the buffer is emptied, use
|
||||
/// [`GpuSlotBuffer::remove`].
|
||||
pub fn remove_quick(&mut self, i: u64) {
|
||||
self.dead_indices.push_back(i);
|
||||
}
|
||||
|
||||
/// Returns the backing [`wgpu::Buffer`].
|
||||
pub fn buffer(&self) -> &Arc<wgpu::Buffer> {
|
||||
&self.buffer
|
||||
}
|
||||
|
||||
/// Return the length of the buffer.
|
||||
///
|
||||
/// This value may not reflect the amount of elements that are actually alive in the buffer if
|
||||
/// elements were removed and not re-added.
|
||||
pub fn len(&self) -> u64 {
|
||||
self.len
|
||||
}
|
||||
|
||||
/// Return the amount of inuse indices in the buffer.
|
||||
pub fn inuse_len(&self) -> u64 {
|
||||
self.len - self.dead_indices.len() as u64
|
||||
}
|
||||
|
||||
/// Returns the amount of elements the buffer can fit.
|
||||
pub fn capacity(&self) -> u64 {
|
||||
self.capacity
|
||||
}
|
||||
}
|
|
@ -1,389 +0,0 @@
|
|||
use std::sync::Arc;
|
||||
|
||||
use image::GenericImageView;
|
||||
use lyra_resource::{FilterMode, ResHandle, Texture, WrappingMode};
|
||||
|
||||
use super::render_buffer::BindGroupPair;
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub struct RenderTexture {
|
||||
pub inner_texture: wgpu::Texture,
|
||||
pub view: wgpu::TextureView,
|
||||
pub sampler: wgpu::Sampler,
|
||||
|
||||
/// Most RenderTextures will have this, but things like depth buffers wont
|
||||
pub bindgroup_pair: Option<BindGroupPair>,
|
||||
}
|
||||
|
||||
impl RenderTexture {
|
||||
pub const DEPTH_FORMAT: wgpu::TextureFormat = wgpu::TextureFormat::Depth32Float;
|
||||
|
||||
pub fn create_layout(device: &wgpu::Device) -> wgpu::BindGroupLayout {
|
||||
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
|
||||
entries: &[
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 0,
|
||||
visibility: wgpu::ShaderStages::FRAGMENT,
|
||||
ty: wgpu::BindingType::Texture {
|
||||
multisampled: false,
|
||||
view_dimension: wgpu::TextureViewDimension::D2,
|
||||
sample_type: wgpu::TextureSampleType::Float { filterable: true },
|
||||
},
|
||||
count: None,
|
||||
},
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 1,
|
||||
visibility: wgpu::ShaderStages::FRAGMENT,
|
||||
// This should match the filterable field of the
|
||||
// corresponding Texture entry above.
|
||||
ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
|
||||
count: None,
|
||||
},
|
||||
],
|
||||
label: Some("BGL_Texture"),
|
||||
})
|
||||
}
|
||||
|
||||
fn create_bind_group_pair(device: &wgpu::Device, layout: Arc<wgpu::BindGroupLayout>, view: &wgpu::TextureView, sampler: &wgpu::Sampler) -> BindGroupPair {
|
||||
let bg = device.create_bind_group(
|
||||
&wgpu::BindGroupDescriptor {
|
||||
layout: &layout,
|
||||
entries: &[
|
||||
wgpu::BindGroupEntry {
|
||||
binding: 0,
|
||||
resource: wgpu::BindingResource::TextureView(view),
|
||||
},
|
||||
wgpu::BindGroupEntry {
|
||||
binding: 1,
|
||||
resource: wgpu::BindingResource::Sampler(sampler),
|
||||
}
|
||||
],
|
||||
label: Some("default_texture"),
|
||||
}
|
||||
);
|
||||
|
||||
BindGroupPair {
|
||||
layout,
|
||||
bindgroup: bg
|
||||
}
|
||||
}
|
||||
|
||||
pub fn from_bytes(device: &wgpu::Device, queue: &wgpu::Queue, bg_layout: Arc<wgpu::BindGroupLayout>, bytes: &[u8], label: &str) -> anyhow::Result<Self> {
|
||||
let img = image::load_from_memory(bytes)?;
|
||||
Self::from_image(device, queue, bg_layout, &img, Some(label))
|
||||
}
|
||||
|
||||
pub fn from_image(device: &wgpu::Device, queue: &wgpu::Queue, bg_layout: Arc<wgpu::BindGroupLayout>, img: &image::DynamicImage, label: Option<&str>) -> anyhow::Result<Self> {
|
||||
let rgba = img.to_rgba8();
|
||||
let dimensions = img.dimensions();
|
||||
|
||||
let size = wgpu::Extent3d {
|
||||
width: dimensions.0,
|
||||
height: dimensions.1,
|
||||
depth_or_array_layers: 1,
|
||||
};
|
||||
let texture = device.create_texture(
|
||||
&wgpu::TextureDescriptor {
|
||||
label,
|
||||
size,
|
||||
mip_level_count: 1,
|
||||
sample_count: 1,
|
||||
dimension: wgpu::TextureDimension::D2,
|
||||
format: wgpu::TextureFormat::Rgba8UnormSrgb,
|
||||
usage: wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST,
|
||||
view_formats: &[],
|
||||
}
|
||||
);
|
||||
|
||||
queue.write_texture(
|
||||
wgpu::ImageCopyTexture {
|
||||
aspect: wgpu::TextureAspect::All,
|
||||
texture: &texture,
|
||||
mip_level: 0,
|
||||
origin: wgpu::Origin3d::ZERO,
|
||||
},
|
||||
&rgba,
|
||||
wgpu::ImageDataLayout {
|
||||
offset: 0,
|
||||
bytes_per_row: Some(4 * dimensions.0),
|
||||
rows_per_image: Some(dimensions.1),
|
||||
},
|
||||
size,
|
||||
);
|
||||
|
||||
let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
|
||||
let sampler = device.create_sampler(
|
||||
&wgpu::SamplerDescriptor {
|
||||
address_mode_u: wgpu::AddressMode::ClampToEdge,
|
||||
address_mode_v: wgpu::AddressMode::ClampToEdge,
|
||||
address_mode_w: wgpu::AddressMode::ClampToEdge,
|
||||
mag_filter: wgpu::FilterMode::Linear,
|
||||
min_filter: wgpu::FilterMode::Nearest,
|
||||
mipmap_filter: wgpu::FilterMode::Nearest,
|
||||
..Default::default()
|
||||
}
|
||||
);
|
||||
|
||||
let bgp = Self::create_bind_group_pair(device, bg_layout, &view, &sampler);
|
||||
|
||||
Ok(Self {
|
||||
inner_texture: texture,
|
||||
view,
|
||||
sampler,
|
||||
bindgroup_pair: Some(bgp),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn from_resource(device: &wgpu::Device, queue: &wgpu::Queue, bg_layout: Arc<wgpu::BindGroupLayout>, texture_res: &ResHandle<Texture>, label: Option<&str>) -> anyhow::Result<Self> {
|
||||
let texture_ref = texture_res.data_ref().unwrap();
|
||||
let img = texture_ref.image.data_ref().unwrap();
|
||||
|
||||
let rgba = img.to_rgba8();
|
||||
let dimensions = img.dimensions();
|
||||
|
||||
let size = wgpu::Extent3d {
|
||||
width: dimensions.0,
|
||||
height: dimensions.1,
|
||||
depth_or_array_layers: 1,
|
||||
};
|
||||
let texture = device.create_texture(
|
||||
&wgpu::TextureDescriptor {
|
||||
label,
|
||||
size,
|
||||
mip_level_count: 1,
|
||||
sample_count: 1,
|
||||
dimension: wgpu::TextureDimension::D2,
|
||||
format: wgpu::TextureFormat::Rgba8UnormSrgb,
|
||||
usage: wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST,
|
||||
view_formats: &[],
|
||||
}
|
||||
);
|
||||
|
||||
queue.write_texture(
|
||||
wgpu::ImageCopyTexture {
|
||||
aspect: wgpu::TextureAspect::All,
|
||||
texture: &texture,
|
||||
mip_level: 0,
|
||||
origin: wgpu::Origin3d::ZERO,
|
||||
},
|
||||
&rgba,
|
||||
wgpu::ImageDataLayout {
|
||||
offset: 0,
|
||||
bytes_per_row: Some(4 * dimensions.0),
|
||||
rows_per_image: Some(dimensions.1),
|
||||
},
|
||||
size,
|
||||
);
|
||||
|
||||
// convert resource sampler into wgpu sampler
|
||||
let sampler_desc = match &texture_ref.sampler {
|
||||
Some(sampler) => {
|
||||
let magf = res_filter_to_wgpu(sampler.mag_filter.unwrap_or(FilterMode::Linear));
|
||||
let minf = res_filter_to_wgpu(sampler.min_filter.unwrap_or(FilterMode::Nearest));
|
||||
let mipf = res_filter_to_wgpu(sampler.mipmap_filter.unwrap_or(FilterMode::Nearest));
|
||||
|
||||
let wrap_u = res_wrap_to_wgpu(sampler.wrap_u);
|
||||
let wrap_v = res_wrap_to_wgpu(sampler.wrap_v);
|
||||
let wrap_w = res_wrap_to_wgpu(sampler.wrap_w);
|
||||
|
||||
wgpu::SamplerDescriptor {
|
||||
address_mode_u: wrap_u,
|
||||
address_mode_v: wrap_v,
|
||||
address_mode_w: wrap_w,
|
||||
mag_filter: magf,
|
||||
min_filter: minf,
|
||||
mipmap_filter: mipf,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
None => wgpu::SamplerDescriptor {
|
||||
address_mode_u: wgpu::AddressMode::ClampToEdge,
|
||||
address_mode_v: wgpu::AddressMode::ClampToEdge,
|
||||
address_mode_w: wgpu::AddressMode::ClampToEdge,
|
||||
mag_filter: wgpu::FilterMode::Linear,
|
||||
min_filter: wgpu::FilterMode::Nearest,
|
||||
mipmap_filter: wgpu::FilterMode::Nearest,
|
||||
..Default::default()
|
||||
},
|
||||
};
|
||||
|
||||
let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
|
||||
let sampler = device.create_sampler(
|
||||
&sampler_desc
|
||||
);
|
||||
|
||||
let bgp = Self::create_bind_group_pair(device, bg_layout, &view, &sampler);
|
||||
|
||||
Ok(Self {
|
||||
inner_texture: texture,
|
||||
view,
|
||||
sampler,
|
||||
bindgroup_pair: Some(bgp),
|
||||
})
|
||||
}
|
||||
|
||||
/// Updates the texture on the gpu with the provided texture.
|
||||
///
|
||||
/// # Panics
|
||||
/// Panics if `texture` is not loaded
|
||||
pub fn update_texture(&mut self, _device: &wgpu::Device, queue: &wgpu::Queue, texture: &ResHandle<Texture>) {
|
||||
let texture = &texture.data_ref().unwrap().image;
|
||||
let texture = &texture.data_ref().unwrap();
|
||||
let rgba = texture.to_rgba8();
|
||||
let dimensions = texture.dimensions();
|
||||
let size = wgpu::Extent3d {
|
||||
width: dimensions.0,
|
||||
height: dimensions.1,
|
||||
depth_or_array_layers: 1,
|
||||
};
|
||||
|
||||
queue.write_texture(
|
||||
wgpu::ImageCopyTexture {
|
||||
aspect: wgpu::TextureAspect::All,
|
||||
texture: &self.inner_texture,
|
||||
mip_level: 0,
|
||||
origin: wgpu::Origin3d::ZERO,
|
||||
},
|
||||
&rgba,
|
||||
wgpu::ImageDataLayout {
|
||||
offset: 0,
|
||||
bytes_per_row: Some(4 * dimensions.0),
|
||||
rows_per_image: Some(dimensions.1),
|
||||
},
|
||||
size,
|
||||
);
|
||||
}
|
||||
|
||||
pub fn create_depth_texture(device: &wgpu::Device, size: crate::math::UVec2, label: &str) -> Self {
|
||||
let size = wgpu::Extent3d {
|
||||
width: size.x,
|
||||
height: size.y,
|
||||
depth_or_array_layers: 1,
|
||||
};
|
||||
let desc = wgpu::TextureDescriptor {
|
||||
label: Some(label),
|
||||
size,
|
||||
mip_level_count: 1,
|
||||
sample_count: 1,
|
||||
dimension: wgpu::TextureDimension::D2,
|
||||
format: Self::DEPTH_FORMAT,
|
||||
usage: wgpu::TextureUsages::RENDER_ATTACHMENT // we'll be rendering to it
|
||||
| wgpu::TextureUsages::TEXTURE_BINDING,
|
||||
view_formats: &[],
|
||||
};
|
||||
let texture = device.create_texture(&desc);
|
||||
|
||||
let view = texture.create_view(&wgpu::TextureViewDescriptor {
|
||||
format: Some(wgpu::TextureFormat::Depth32Float),
|
||||
..Default::default()
|
||||
});
|
||||
let sampler = device.create_sampler(
|
||||
&wgpu::SamplerDescriptor { // 4.
|
||||
address_mode_u: wgpu::AddressMode::ClampToEdge,
|
||||
address_mode_v: wgpu::AddressMode::ClampToEdge,
|
||||
address_mode_w: wgpu::AddressMode::ClampToEdge,
|
||||
mag_filter: wgpu::FilterMode::Linear,
|
||||
min_filter: wgpu::FilterMode::Linear,
|
||||
mipmap_filter: wgpu::FilterMode::Nearest,
|
||||
compare: Some(wgpu::CompareFunction::LessEqual),
|
||||
lod_min_clamp: 0.0,
|
||||
lod_max_clamp: 100.0,
|
||||
..Default::default()
|
||||
}
|
||||
);
|
||||
|
||||
Self {
|
||||
inner_texture: texture,
|
||||
view,
|
||||
sampler,
|
||||
bindgroup_pair: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a bind group for this texture and returns a borrow to the [`BindGroupPair`]
|
||||
///
|
||||
/// This does not create a new bind group if the texture already has one.
|
||||
/// The view dimension will be the same as the texture dimension.
|
||||
pub fn create_bind_group(&mut self, device: &wgpu::Device) -> &BindGroupPair {
|
||||
if self.bindgroup_pair.is_some() {
|
||||
// could not use an if-let here due to the borrow checker thinking
|
||||
// that there was multiple borrows to self.bindgroup_pair
|
||||
return self.bindgroup_pair.as_ref().unwrap();
|
||||
}
|
||||
|
||||
let view_dim = match self.inner_texture.dimension() {
|
||||
wgpu::TextureDimension::D1 => wgpu::TextureViewDimension::D1,
|
||||
wgpu::TextureDimension::D2 => wgpu::TextureViewDimension::D2,
|
||||
wgpu::TextureDimension::D3 => wgpu::TextureViewDimension::D3,
|
||||
};
|
||||
|
||||
let layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
|
||||
entries: &[
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 0,
|
||||
visibility: wgpu::ShaderStages::COMPUTE | wgpu::ShaderStages::FRAGMENT,
|
||||
ty: wgpu::BindingType::Texture {
|
||||
sample_type: wgpu::TextureSampleType::Depth,
|
||||
view_dimension: view_dim,
|
||||
multisampled: false
|
||||
},
|
||||
count: None,
|
||||
},
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 1,
|
||||
visibility: wgpu::ShaderStages::COMPUTE | wgpu::ShaderStages::FRAGMENT,
|
||||
ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Comparison),
|
||||
count: None,
|
||||
}
|
||||
],
|
||||
label: Some("BGL_Texture"),
|
||||
});
|
||||
|
||||
let bg = device.create_bind_group(&wgpu::BindGroupDescriptor {
|
||||
layout: &layout,
|
||||
entries: &[
|
||||
wgpu::BindGroupEntry {
|
||||
binding: 0,
|
||||
resource: wgpu::BindingResource::TextureView(&self.view)
|
||||
},
|
||||
wgpu::BindGroupEntry {
|
||||
binding: 1,
|
||||
resource: wgpu::BindingResource::Sampler(&self.sampler)
|
||||
}
|
||||
],
|
||||
label: Some("BG_Texture"),
|
||||
});
|
||||
|
||||
let pair = BindGroupPair::new(bg, layout);
|
||||
|
||||
self.bindgroup_pair = Some(pair);
|
||||
self.bindgroup_pair.as_ref().unwrap()
|
||||
}
|
||||
|
||||
/// Returns the bind group stored inside the bind group pair.
|
||||
///
|
||||
/// Panics:
|
||||
/// * This will panic if the texture isn't storing its bind group.
|
||||
pub fn bind_group(&self) -> &wgpu::BindGroup {
|
||||
&self.bindgroup_pair.as_ref().unwrap().bindgroup
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert [`lyra_resource::WrappingMode`] to [`wgpu::AddressMode`]
|
||||
#[inline(always)]
|
||||
pub(crate) fn res_wrap_to_wgpu(wmode: WrappingMode) -> wgpu::AddressMode {
|
||||
match wmode {
|
||||
WrappingMode::ClampToEdge => wgpu::AddressMode::ClampToEdge,
|
||||
WrappingMode::MirroredRepeat => wgpu::AddressMode::MirrorRepeat,
|
||||
WrappingMode::Repeat => wgpu::AddressMode::Repeat,
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert [`lyra_resource::FilterMode`] to [`wgpu::FilterMode`]
|
||||
#[inline(always)]
|
||||
pub(crate) fn res_filter_to_wgpu(fmode: FilterMode) -> wgpu::FilterMode {
|
||||
match fmode {
|
||||
FilterMode::Nearest => wgpu::FilterMode::Nearest,
|
||||
FilterMode::Linear => wgpu::FilterMode::Linear,
|
||||
}
|
||||
}
|
|
@ -1,297 +0,0 @@
|
|||
use std::{
|
||||
cmp::max, collections::HashMap, sync::Arc
|
||||
};
|
||||
|
||||
use glam::UVec2;
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum AtlasPackError {
|
||||
/// The rectangles can't be placed into the atlas. The atlas must increase in size
|
||||
#[error("There is not enough space in the atlas for the textures")]
|
||||
NotEnoughSpace,
|
||||
}
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Debug, Default, Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
|
||||
pub struct AtlasFrame {
|
||||
pub x: u32,
|
||||
pub y: u32,
|
||||
pub width: u32,
|
||||
pub height: u32,
|
||||
}
|
||||
|
||||
impl AtlasFrame {
|
||||
pub fn new(x: u32, y: u32, width: u32, height: u32) -> Self {
|
||||
Self {
|
||||
x, y, width, height
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct TextureAtlas<P: AtlasPacker = SkylinePacker> {
|
||||
atlas_size: UVec2,
|
||||
|
||||
texture_format: wgpu::TextureFormat,
|
||||
texture: Arc<wgpu::Texture>,
|
||||
view: Arc<wgpu::TextureView>,
|
||||
|
||||
packer: P,
|
||||
}
|
||||
|
||||
impl<P: AtlasPacker> TextureAtlas<P> {
|
||||
pub fn new(
|
||||
device: &wgpu::Device,
|
||||
format: wgpu::TextureFormat,
|
||||
usages: wgpu::TextureUsages,
|
||||
atlas_size: UVec2,
|
||||
) -> Self {
|
||||
let texture = device.create_texture(&wgpu::TextureDescriptor {
|
||||
label: Some("texture_atlas"),
|
||||
size: wgpu::Extent3d {
|
||||
width: atlas_size.x,
|
||||
height: atlas_size.y,
|
||||
depth_or_array_layers: 1,
|
||||
},
|
||||
mip_level_count: 1,
|
||||
sample_count: 1,
|
||||
dimension: wgpu::TextureDimension::D2,
|
||||
format,
|
||||
usage: usages,
|
||||
view_formats: &[],
|
||||
});
|
||||
let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
|
||||
|
||||
Self {
|
||||
atlas_size,
|
||||
texture_format: format,
|
||||
texture: Arc::new(texture),
|
||||
view: Arc::new(view),
|
||||
packer: P::new(atlas_size),
|
||||
}
|
||||
}
|
||||
|
||||
/// Add a texture of `size` and pack it into the atlas, returning the id of the texture in
|
||||
/// the atlas.
|
||||
///
|
||||
/// If you are adding multiple textures at a time and want to wait to pack the atlas, use
|
||||
/// [`TextureAtlas::add_texture_unpacked`] and then after you're done adding them, pack them
|
||||
/// with [`TextureAtlas::pack_atlas`].
|
||||
pub fn pack(&mut self, width: u32, height: u32) -> Result<u64, AtlasPackError> {
|
||||
let id = self.packer.pack(width, height)?;
|
||||
|
||||
Ok(id as u64)
|
||||
}
|
||||
|
||||
/// Get the viewport of a texture index in the atlas.
|
||||
pub fn texture_frame(&self, atlas_index: u64) -> Option<AtlasFrame> {
|
||||
self.packer.frame(atlas_index as _)
|
||||
}
|
||||
|
||||
pub fn view(&self) -> &Arc<wgpu::TextureView> {
|
||||
&self.view
|
||||
}
|
||||
|
||||
pub fn texture(&self) -> &Arc<wgpu::Texture> {
|
||||
&self.texture
|
||||
}
|
||||
|
||||
pub fn texture_format(&self) -> &wgpu::TextureFormat {
|
||||
&self.texture_format
|
||||
}
|
||||
|
||||
/// Returns the size of the entire texture atlas.
|
||||
pub fn atlas_size(&self) -> UVec2 {
|
||||
self.atlas_size
|
||||
}
|
||||
}
|
||||
|
||||
pub trait AtlasPacker {
|
||||
fn new(size: UVec2) -> Self;
|
||||
|
||||
/// Get an [`AtlasFrame`] of a texture with `id`.
|
||||
fn frame(&self, id: usize) -> Option<AtlasFrame>;
|
||||
|
||||
/// Get all [`AtlasFrame`]s in the atlas.
|
||||
fn frames(&self) -> &HashMap<usize, AtlasFrame>;
|
||||
|
||||
/// Pack a new rect into the atlas.
|
||||
fn pack(&mut self, width: u32, height: u32) -> Result<usize, AtlasPackError>;
|
||||
}
|
||||
|
||||
struct Skyline {
|
||||
/// Starting x of the skyline
|
||||
x: usize,
|
||||
/// Starting y of the skyline
|
||||
y: usize,
|
||||
/// Width of the skyline
|
||||
width: usize,
|
||||
}
|
||||
|
||||
impl Skyline {
|
||||
fn right(&self) -> usize {
|
||||
self.x + self.width
|
||||
}
|
||||
}
|
||||
|
||||
pub struct SkylinePacker {
|
||||
size: UVec2,
|
||||
skylines: Vec<Skyline>,
|
||||
frame_idx: usize,
|
||||
frames: HashMap<usize, AtlasFrame>,
|
||||
}
|
||||
|
||||
impl SkylinePacker {
|
||||
pub fn new(size: UVec2) -> Self {
|
||||
let skylines = vec![Skyline {
|
||||
x: 0,
|
||||
y: 0,
|
||||
width: size.x as _,
|
||||
}];
|
||||
|
||||
Self {
|
||||
size,
|
||||
skylines,
|
||||
frame_idx: 0,
|
||||
frames: Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
fn can_add(&self, mut i: usize, w: u32, h: u32) -> Option<usize> {
|
||||
let x = self.skylines[i].x as u32;
|
||||
if x + w > self.size.x {
|
||||
return None;
|
||||
}
|
||||
|
||||
let mut width_left = w;
|
||||
let mut y = self.skylines[i].y as u32;
|
||||
|
||||
loop {
|
||||
y = max(y, self.skylines[i].y as u32);
|
||||
|
||||
if y + h > self.size.y {
|
||||
return None;
|
||||
}
|
||||
|
||||
if self.skylines[i].width as u32 >= width_left {
|
||||
return Some(y as usize);
|
||||
}
|
||||
|
||||
width_left -= self.skylines[i].width as u32;
|
||||
i += 1;
|
||||
|
||||
if i >= self.skylines.len() {
|
||||
return None;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn find_skyline(&self, width: u32, height: u32) -> Option<(usize, AtlasFrame)> {
|
||||
let mut min_height = std::u32::MAX;
|
||||
let mut min_width = std::u32::MAX;
|
||||
let mut index = None;
|
||||
let mut frame = AtlasFrame::default();
|
||||
|
||||
// keep the min height as small as possible
|
||||
for i in 0..self.skylines.len() {
|
||||
if let Some(y) = self.can_add(i, width, height) {
|
||||
let y = y as u32;
|
||||
/* if r.bottom() < min_height
|
||||
|| (r.bottom() == min_height && self.skylines[i].width < min_width as usize) */
|
||||
if y + height < min_height ||
|
||||
(y + height == min_height && self.skylines[i].width < min_width as usize)
|
||||
{
|
||||
min_height = y + height;
|
||||
min_width = self.skylines[i].width as _;
|
||||
index = Some(i);
|
||||
frame = AtlasFrame::new(self.skylines[i].x as _, y, width, height);
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: rotation
|
||||
}
|
||||
|
||||
if let Some(index) = index {
|
||||
Some((index, frame))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
fn split(&mut self, i: usize, frame: &AtlasFrame) {
|
||||
let skyline = Skyline {
|
||||
x: frame.x as _,
|
||||
y: (frame.y + frame.height) as _,
|
||||
width: frame.width as _
|
||||
};
|
||||
|
||||
assert!(skyline.right() <= self.size.x as usize);
|
||||
assert!(skyline.y <= self.size.y as usize);
|
||||
|
||||
self.skylines.insert(i, skyline);
|
||||
|
||||
let i = i + 1;
|
||||
|
||||
while i < self.skylines.len() {
|
||||
assert!(self.skylines[i - 1].x <= self.skylines[i].x);
|
||||
|
||||
if self.skylines[i].x < self.skylines[i - 1].x + self.skylines[i - 1].width {
|
||||
let shrink = self.skylines[i-1].x + self.skylines[i-1].width - self.skylines[i].x;
|
||||
|
||||
if self.skylines[i].width <= shrink {
|
||||
self.skylines.remove(i);
|
||||
} else {
|
||||
self.skylines[i].x += shrink;
|
||||
self.skylines[i].width -= shrink;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Merge skylines with the same y value
|
||||
fn merge(&mut self) {
|
||||
let mut i = 1;
|
||||
while i < self.skylines.len() {
|
||||
if self.skylines[i - 1].y == self.skylines[i].y {
|
||||
self.skylines[i - 1].width += self.skylines[i].width;
|
||||
self.skylines.remove(i);
|
||||
} else {
|
||||
i += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//pub fn pack(&mut self, )
|
||||
}
|
||||
|
||||
impl AtlasPacker for SkylinePacker {
|
||||
fn new(size: UVec2) -> Self {
|
||||
SkylinePacker::new(size)
|
||||
}
|
||||
|
||||
fn frame(&self, id: usize) -> Option<AtlasFrame> {
|
||||
self.frames.get(&id).cloned()
|
||||
}
|
||||
|
||||
fn frames(&self) -> &HashMap<usize, AtlasFrame> {
|
||||
&self.frames
|
||||
}
|
||||
|
||||
fn pack(&mut self, width: u32, height: u32) -> Result<usize, AtlasPackError> {
|
||||
if let Some((i, frame)) = self.find_skyline(width, height) {
|
||||
self.split(i, &frame);
|
||||
self.merge();
|
||||
|
||||
let frame_idx = self.frame_idx;
|
||||
self.frame_idx += 1;
|
||||
|
||||
self.frames.insert(frame_idx, frame);
|
||||
|
||||
Ok(frame_idx)
|
||||
} else {
|
||||
Err(AtlasPackError::NotEnoughSpace)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,322 +0,0 @@
|
|||
use std::{collections::{HashMap, VecDeque}, hash::{BuildHasher, DefaultHasher, Hash, Hasher, RandomState}, num::NonZeroU64, sync::Arc};
|
||||
|
||||
use lyra_ecs::{Component, Entity};
|
||||
use tracing::instrument;
|
||||
use uuid::Uuid;
|
||||
|
||||
use std::mem;
|
||||
|
||||
/// A group id created from a [`TransformGroup`].
|
||||
///
|
||||
/// This is mainly created so that [`TransformGroup::OwnedGroup`] can use another group inside of it.
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
|
||||
pub struct TransformGroupId(u64);
|
||||
|
||||
impl From<TransformGroup> for TransformGroupId {
|
||||
fn from(value: TransformGroup) -> Self {
|
||||
let mut hasher = DefaultHasher::new();
|
||||
value.hash(&mut hasher);
|
||||
let hash = hasher.finish();
|
||||
|
||||
TransformGroupId(hash)
|
||||
}
|
||||
}
|
||||
|
||||
/// Used as a key into the [`TransformBuffers`].
|
||||
///
|
||||
/// This enum is used as a key to identify a transform for a RenderJob. The renderer uses this
|
||||
/// to differentiate a transform between two entities that share a resource handle to the same
|
||||
/// scene:
|
||||
/// ```nobuild
|
||||
/// // The group of the mesh in the scene.
|
||||
/// let scene_mesh_group = TransformGroup::Res(scene_handle.uuid(), mesh_handle.uuid());
|
||||
/// // The group of the owned entity that has mesh in a scene.
|
||||
/// let finished_group = TransformGroup::OwnedGroup(entity, scene_mesh_group.into());
|
||||
/// ```
|
||||
///
|
||||
/// A simpler example of the use of a transform group is when processing lone mesh handles
|
||||
/// owned by entities:
|
||||
/// ```nobuild
|
||||
/// let group = TransformGroup::EntityRes(entity, mesh_handle.uuid());
|
||||
/// ```
|
||||
///
|
||||
/// These were made to fix [#6](https://git.seanomik.net/SeanOMik/lyra-engine/issues/6).
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
|
||||
pub enum TransformGroup {
|
||||
/// Just an entity.
|
||||
Entity(Entity),
|
||||
/// An entity that owns another group.
|
||||
OwnedGroup(Entity, TransformGroupId),
|
||||
/// A resource uuid grouped with an owning Entity.
|
||||
EntityRes(Entity, Uuid),
|
||||
/// A resource uuid grouped with another resource uuid.
|
||||
Res(Uuid, Uuid),
|
||||
}
|
||||
|
||||
/// The index of a specific Transform inside of the buffers.
|
||||
#[derive(Default, Copy, Clone, PartialEq, Eq, Debug, Component)]
|
||||
pub struct TransformIndex {
|
||||
/// The index of the entry in the buffer chain.
|
||||
entry_index: usize,
|
||||
/// The index of the transform in the entry.
|
||||
transform_index: usize,
|
||||
}
|
||||
|
||||
/// A struct representing a single transform buffer. There can be multiple of these
|
||||
struct BufferEntry {
|
||||
pub len: usize,
|
||||
pub bindgroup: wgpu::BindGroup,
|
||||
pub buffer: wgpu::Buffer,
|
||||
}
|
||||
|
||||
/// A HashMap that caches values for reuse.
|
||||
///
|
||||
/// The map detects dead values by tracking which entries were not updated since the last time
|
||||
/// [`CachedValMap::update`] was ran. When dead values are collected, they can be reused on an
|
||||
/// [`insert`](CachedValMap::insert) into the map.
|
||||
struct CachedValMap<K, V, S = RandomState> {
|
||||
latest: HashMap<K, V, S>,
|
||||
old: HashMap<K, V, S>,
|
||||
dead: VecDeque<V>,
|
||||
}
|
||||
|
||||
impl<K, V, S: Default> Default for CachedValMap<K, V, S> {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
latest: Default::default(),
|
||||
old: Default::default(),
|
||||
dead: Default::default()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
impl<K: Hash + Eq + PartialEq + Clone, V: Clone, S: BuildHasher> CachedValMap<K, V, S> {
|
||||
/// Insert a key, possibly reusing a value in the map.
|
||||
///
|
||||
/// Returns the reused value, if one was reused. If its `None`, then the value was retrieved
|
||||
/// from running `val_fn`.
|
||||
pub fn insert<F>(&mut self, key: K, mut val_fn: F) -> Option<V>
|
||||
where
|
||||
F: FnMut() -> V
|
||||
{
|
||||
match self.latest.entry(key) {
|
||||
std::collections::hash_map::Entry::Occupied(mut e) => {
|
||||
e.insert(val_fn());
|
||||
None
|
||||
}
|
||||
std::collections::hash_map::Entry::Vacant(e) => {
|
||||
let val = self.dead.pop_front()
|
||||
.unwrap_or_else(val_fn);
|
||||
e.insert(val.clone());
|
||||
Some(val)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a reference to the value corresponding to the key.
|
||||
pub fn get(&mut self, key: K) -> Option<&V> {
|
||||
if let Some(v) = self.old.remove(&key) {
|
||||
self.latest.insert(key.clone(), v);
|
||||
}
|
||||
|
||||
self.latest.get(&key)
|
||||
}
|
||||
|
||||
/// Keep a key alive without updating its value.
|
||||
pub fn keep_alive(&mut self, key: K) {
|
||||
if let Some(v) = self.old.remove(&key) {
|
||||
self.latest.insert(key, v);
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns `true` if the map contains a value for the specified key.
|
||||
pub fn contains(&self, key: K) -> bool {
|
||||
self.old.contains_key(&key) || self.latest.contains_key(&key)
|
||||
}
|
||||
|
||||
/// Collects the now dead values for reuse.
|
||||
///
|
||||
/// This detects dead values by tracking which entries were not updated since the last time
|
||||
/// update was ran.
|
||||
pub fn update(&mut self) {
|
||||
// drain the dead values into the dead queue
|
||||
self.dead.extend(self.old.drain().map(|(_, v)| v));
|
||||
|
||||
// now drain the latest entries into the old entries
|
||||
self.old.extend(self.latest.drain());
|
||||
}
|
||||
}
|
||||
|
||||
/// A helper struct for managing the Transform buffers for meshes.
|
||||
///
|
||||
/// This struct manages a "chain" of uniform buffers that store Transform for [`TransformGroup`]s.
|
||||
/// When first created it only has a single "chain-link" with a buffer that is the maximum length
|
||||
/// the GPU supports. When the first buffer fills up, a new one should be created which will also
|
||||
/// be the maximum length the GPU supports. When the new buffer fills up, a new one will be
|
||||
/// created once again, and so on.
|
||||
///
|
||||
/// [`TransformGroup`]s are used to represent entries in the buffer. They are used to insert,
|
||||
/// update, and retrieve the transforms.
|
||||
pub struct TransformBuffers {
|
||||
pub bindgroup_layout: Arc<wgpu::BindGroupLayout>,
|
||||
entries: Vec<BufferEntry>,
|
||||
limits: wgpu::Limits,
|
||||
max_transform_count: usize,
|
||||
next_index: usize,
|
||||
}
|
||||
|
||||
impl TransformBuffers {
|
||||
pub fn new(device: &wgpu::Device) -> Self {
|
||||
let limits = device.limits();
|
||||
|
||||
let bindgroup_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
|
||||
entries: &[
|
||||
wgpu::BindGroupLayoutEntry {
|
||||
binding: 0,
|
||||
visibility: wgpu::ShaderStages::VERTEX,
|
||||
ty: wgpu::BindingType::Buffer {
|
||||
ty: wgpu::BufferBindingType::Uniform,
|
||||
has_dynamic_offset: true,
|
||||
min_binding_size: None,
|
||||
},
|
||||
count: None,
|
||||
},
|
||||
],
|
||||
label: Some("transform_bind_group_layout"),
|
||||
});
|
||||
|
||||
let mut s = Self {
|
||||
bindgroup_layout: Arc::new(bindgroup_layout),
|
||||
entries: Default::default(),
|
||||
max_transform_count: (limits.max_uniform_buffer_binding_size) as usize / (limits.min_uniform_buffer_offset_alignment as usize), //(mem::size_of::<glam::Mat4>()),
|
||||
limits,
|
||||
next_index: 0,
|
||||
};
|
||||
|
||||
// create the first uniform buffer
|
||||
s.expand_buffers(device);
|
||||
|
||||
s
|
||||
}
|
||||
|
||||
/// Reserve a transform index.
|
||||
///
|
||||
/// The buffer chain may expand if its required
|
||||
pub fn reserve_transform(&mut self, device: &wgpu::Device) -> TransformIndex {
|
||||
let index = self.next_index;
|
||||
self.next_index += 1;
|
||||
|
||||
// the index of the transform buffer
|
||||
let entry_index = index / self.max_transform_count;
|
||||
// the index of the transform in the buffer
|
||||
let transform_index = index % self.max_transform_count;
|
||||
|
||||
if entry_index >= self.entries.len() {
|
||||
self.expand_buffers(device);
|
||||
}
|
||||
|
||||
let entry = self.entries.get_mut(entry_index).unwrap();
|
||||
entry.len += 1;
|
||||
|
||||
TransformIndex {
|
||||
entry_index,
|
||||
transform_index,
|
||||
}
|
||||
}
|
||||
|
||||
/// Updates a Transform at `index`.
|
||||
#[instrument(skip(self, queue, index, transform, normal_matrix))]
|
||||
#[inline(always)]
|
||||
pub fn update(&mut self, queue: &wgpu::Queue, index: TransformIndex, transform: glam::Mat4, normal_matrix: glam::Mat3) {
|
||||
let pair = TransformNormalMatPair {
|
||||
transform,
|
||||
normal_mat: glam::Mat4::from_mat3(normal_matrix),
|
||||
};
|
||||
|
||||
let entry = self.entries.get(index.entry_index).expect("invalid entry index, no entry!");
|
||||
let offset = self.buffer_offset(index);
|
||||
queue.write_buffer(&entry.buffer, offset as _, bytemuck::bytes_of(&pair));
|
||||
}
|
||||
|
||||
/// Expand the Transform buffers by adding another uniform buffer binding.
|
||||
///
|
||||
/// This object has a chain of uniform buffers, when the buffers are expanded, a new
|
||||
/// "chain-link" is created.
|
||||
#[instrument(skip(self, device))]
|
||||
pub fn expand_buffers(&mut self, device: &wgpu::Device) {
|
||||
let limits = device.limits();
|
||||
let max_buffer_sizes = self.max_transform_count as u64 * limits.min_uniform_buffer_offset_alignment as u64;
|
||||
|
||||
let transform_buffer = device.create_buffer(
|
||||
&wgpu::BufferDescriptor {
|
||||
label: Some(&format!("B_Transform_{}", self.entries.len())),
|
||||
usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
|
||||
size: max_buffer_sizes,
|
||||
mapped_at_creation: false,
|
||||
}
|
||||
);
|
||||
|
||||
let tran_stride = mem::size_of::<TransformNormalMatPair>();
|
||||
|
||||
let bindgroup = device.create_bind_group(&wgpu::BindGroupDescriptor {
|
||||
layout: &self.bindgroup_layout,
|
||||
entries: &[
|
||||
wgpu::BindGroupEntry {
|
||||
binding: 0,
|
||||
resource: wgpu::BindingResource::Buffer(
|
||||
wgpu::BufferBinding {
|
||||
buffer: &transform_buffer,
|
||||
offset: 0,
|
||||
size: Some(NonZeroU64::new(tran_stride as u64).unwrap())
|
||||
}
|
||||
)
|
||||
},
|
||||
],
|
||||
label: Some("BG_Transforms"),
|
||||
});
|
||||
|
||||
let entry = BufferEntry {
|
||||
bindgroup,
|
||||
buffer: transform_buffer,
|
||||
len: 0,
|
||||
};
|
||||
self.entries.push(entry);
|
||||
}
|
||||
|
||||
/// Returns the bind group for the transform index.
|
||||
#[inline(always)]
|
||||
pub fn bind_group(&self, transform_id: TransformIndex) -> &wgpu::BindGroup {
|
||||
let entry_index = transform_id.transform_index / self.max_transform_count;
|
||||
let entry = self.entries.get(entry_index).unwrap();
|
||||
&entry.bindgroup
|
||||
}
|
||||
|
||||
/// Returns the offset of the transform inside the bind group buffer.
|
||||
///
|
||||
/// ```nobuild
|
||||
/// let bindgroup = transform_buffers.bind_group(job.transform_id);
|
||||
/// let offset = transform_buffers.buffer_offset(job.transform_id);
|
||||
/// render_pass.set_bind_group(1, bindgroup, &[ offset, offset, ]);
|
||||
/// ```
|
||||
#[inline(always)]
|
||||
pub fn buffer_offset(&self, transform_index: TransformIndex) -> u32 {
|
||||
//Self::get_buffer_offset(&self.limits, transform_index)
|
||||
let transform_index = transform_index.transform_index % self.max_transform_count;
|
||||
|
||||
//debug!("offset: {t}");
|
||||
transform_index as u32 * self.limits.min_uniform_buffer_offset_alignment
|
||||
}
|
||||
|
||||
/// Returns a boolean indicating if the buffers need to be expanded
|
||||
pub fn needs_expand(&self) -> bool {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
|
||||
struct TransformNormalMatPair {
|
||||
transform: glam::Mat4,
|
||||
normal_mat: glam::Mat4,
|
||||
}
|
|
@ -1,94 +0,0 @@
|
|||
use super::desc_buf_lay::DescVertexBufferLayout;
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)]
|
||||
pub struct Vertex {
|
||||
pub position: glam::Vec3,
|
||||
pub tex_coords: glam::Vec2,
|
||||
pub normals: glam::Vec3,
|
||||
}
|
||||
|
||||
impl Vertex {
|
||||
pub fn new(position: glam::Vec3, tex_coords: glam::Vec2, normals: glam::Vec3) -> Self {
|
||||
Self {
|
||||
position, tex_coords, normals
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a [`wgpu::VertexBufferLayout`] with only the position as a vertex attribute.
|
||||
///
|
||||
/// The stride is still `std::mem::size_of::<Vertex>()`, but only position is included.
|
||||
pub fn position_desc<'a>() -> wgpu::VertexBufferLayout<'a> {
|
||||
wgpu::VertexBufferLayout {
|
||||
array_stride: std::mem::size_of::<Vertex>() as wgpu::BufferAddress,
|
||||
step_mode: wgpu::VertexStepMode::Vertex,
|
||||
attributes: &[
|
||||
wgpu::VertexAttribute {
|
||||
offset: 0,
|
||||
shader_location: 0,
|
||||
format: wgpu::VertexFormat::Float32x3, // Vec3
|
||||
},
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl DescVertexBufferLayout for Vertex {
|
||||
fn desc<'a>() -> wgpu::VertexBufferLayout<'a> {
|
||||
wgpu::VertexBufferLayout {
|
||||
array_stride: std::mem::size_of::<Vertex>() as wgpu::BufferAddress,
|
||||
step_mode: wgpu::VertexStepMode::Vertex,
|
||||
attributes: &[
|
||||
wgpu::VertexAttribute {
|
||||
offset: 0,
|
||||
shader_location: 0,
|
||||
format: wgpu::VertexFormat::Float32x3, // Vec3
|
||||
},
|
||||
wgpu::VertexAttribute {
|
||||
offset: std::mem::size_of::<[f32; 3]>() as wgpu::BufferAddress,
|
||||
shader_location: 1,
|
||||
format: wgpu::VertexFormat::Float32x2, // Vec2
|
||||
},
|
||||
wgpu::VertexAttribute {
|
||||
offset: std::mem::size_of::<[f32; 5]>() as wgpu::BufferAddress,
|
||||
shader_location: 2,
|
||||
format: wgpu::VertexFormat::Float32x3, // Vec3
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[repr(C)]
|
||||
#[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)]
|
||||
pub struct Vertex2D {
|
||||
pub position: glam::Vec3,
|
||||
pub tex_coords: glam::Vec2,
|
||||
}
|
||||
|
||||
impl Vertex2D {
|
||||
pub fn new(position: glam::Vec3, tex_coords: glam::Vec2) -> Self {
|
||||
Self {
|
||||
position, tex_coords
|
||||
}
|
||||
}
|
||||
|
||||
pub fn desc<'a>() -> wgpu::VertexBufferLayout<'a> {
|
||||
wgpu::VertexBufferLayout {
|
||||
array_stride: std::mem::size_of::<Vertex>() as wgpu::BufferAddress,
|
||||
step_mode: wgpu::VertexStepMode::Vertex,
|
||||
attributes: &[
|
||||
wgpu::VertexAttribute {
|
||||
offset: 0,
|
||||
shader_location: 0,
|
||||
format: wgpu::VertexFormat::Float32x3, // Vec3
|
||||
},
|
||||
wgpu::VertexAttribute {
|
||||
offset: std::mem::size_of::<glam::Vec3>() as wgpu::BufferAddress,
|
||||
shader_location: 1,
|
||||
format: wgpu::VertexFormat::Float32x2, // Vec2
|
||||
},
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,7 +0,0 @@
|
|||
pub mod camera;
|
||||
pub use camera::*;
|
||||
|
||||
pub mod free_fly_camera;
|
||||
pub use free_fly_camera::*;
|
||||
|
||||
pub use lyra_scene::*;
|
|
@ -1,51 +0,0 @@
|
|||
use lyra_ecs::Component;
|
||||
use lyra_reflect::Reflect;
|
||||
use lyra_resource::ResHandle;
|
||||
use lyra_math::{Vec3, Vec2};
|
||||
|
||||
/// How the sprite is positioned and rotated relative to its [`Transform`].
|
||||
///
|
||||
/// Default pivot is `Pivot::Center`, this makes it easier to rotate the sprites.
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Default, Component, Reflect)]
|
||||
pub enum Pivot {
|
||||
#[default]
|
||||
Center,
|
||||
CenterLeft,
|
||||
CenterRight,
|
||||
TopLeft,
|
||||
TopRight,
|
||||
TopCenter,
|
||||
BottomLeft,
|
||||
BottomRight,
|
||||
BottomCenter,
|
||||
/// A custom anchor point relative to top left.
|
||||
/// Top left is `(0.0, 0.0)`.
|
||||
Custom(Vec2)
|
||||
}
|
||||
|
||||
impl Pivot {
|
||||
/// Get the pivot point as a Vec2.
|
||||
///
|
||||
/// The point is offset from the top left `(0.0, 0.0)`.
|
||||
pub fn as_vec(&self) -> Vec2 {
|
||||
match self {
|
||||
Pivot::Center => Vec2::new(0.5, 0.5),
|
||||
Pivot::CenterLeft => Vec2::new(0.0, 0.5),
|
||||
Pivot::CenterRight => Vec2::new(1.0, 0.5),
|
||||
Pivot::TopLeft => Vec2::ZERO,
|
||||
Pivot::TopRight => Vec2::new(1.0, 0.0),
|
||||
Pivot::TopCenter => Vec2::new(0.0, 0.5),
|
||||
Pivot::BottomLeft => Vec2::new(0.0, 1.0),
|
||||
Pivot::BottomRight => Vec2::new(1.0, 1.0),
|
||||
Pivot::BottomCenter => Vec2::new(0.5, 1.0),
|
||||
Pivot::Custom(v) => *v,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Component, Reflect)]
|
||||
pub struct Sprite {
|
||||
pub texture: ResHandle<lyra_resource::Texture>,
|
||||
pub color: Vec3,
|
||||
pub pivot: Pivot,
|
||||
}
|
|
@ -1,8 +0,0 @@
|
|||
|
||||
mod plugin;
|
||||
pub use plugin::*;
|
||||
|
||||
mod window;
|
||||
pub use window::*;
|
||||
|
||||
pub use winit::dpi as dpi;
|
|
@ -1,336 +0,0 @@
|
|||
use std::{collections::VecDeque, sync::Arc};
|
||||
|
||||
use async_std::task::block_on;
|
||||
use glam::{DVec2, IVec2, UVec2};
|
||||
use lyra_ecs::Entity;
|
||||
use lyra_reflect::Reflect;
|
||||
use rustc_hash::FxHashMap;
|
||||
use tracing::{debug, error, warn};
|
||||
use winit::{
|
||||
application::ApplicationHandler,
|
||||
event::WindowEvent,
|
||||
event_loop::{ActiveEventLoop, EventLoop},
|
||||
window::{Window, WindowAttributes, WindowId},
|
||||
};
|
||||
|
||||
pub use winit::event::{DeviceId, DeviceEvent, MouseScrollDelta, ElementState, RawKeyEvent};
|
||||
pub use winit::keyboard::PhysicalKey;
|
||||
|
||||
use crate::{
|
||||
game::{App, WindowState},
|
||||
plugin::Plugin,
|
||||
render::renderer::BasicRenderer, winit::{FullscreenMode, LastWindow, PrimaryWindow},
|
||||
};
|
||||
|
||||
use super::WindowOptions;
|
||||
|
||||
/// A struct that contains a [`DeviceEvent`](winit::event::DeviceEvent) with its source
|
||||
/// [`DeviceId`](winit::event::DeviceId).
|
||||
#[derive(Debug, Clone, Reflect)]
|
||||
pub struct DeviceEventPair {
|
||||
#[reflect(skip)]
|
||||
pub device_src: DeviceId,
|
||||
#[reflect(skip)]
|
||||
pub event: DeviceEvent,
|
||||
}
|
||||
|
||||
pub struct WinitPlugin {
|
||||
/// The primary window that will be created.
|
||||
///
|
||||
/// This will become `None` after the window is created. If you want to get the
|
||||
/// primary world later, query for an entity with the [`PrimaryWindow`] and
|
||||
/// [`WindowOptions`] components.
|
||||
pub primary_window: Option<WindowOptions>,
|
||||
}
|
||||
|
||||
impl Default for WinitPlugin {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
primary_window: Some(WindowOptions::default()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Plugin for WinitPlugin {
|
||||
fn setup(&mut self, app: &mut crate::game::App) {
|
||||
app.set_run_fn(winit_app_runner);
|
||||
app.register_event::<WindowEvent>();
|
||||
app.register_event::<DeviceEventPair>();
|
||||
|
||||
if let Some(prim) = self.primary_window.take() {
|
||||
app.add_resource(WinitWindows::with_window(prim));
|
||||
} else {
|
||||
app.add_resource(WinitWindows::default());
|
||||
}
|
||||
}
|
||||
|
||||
fn is_ready(&self, _app: &mut crate::game::App) -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
fn complete(&self, _app: &mut crate::game::App) {}
|
||||
|
||||
fn cleanup(&self, _app: &mut crate::game::App) {}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct WinitWindows {
|
||||
pub windows: FxHashMap<WindowId, Arc<Window>>,
|
||||
pub entity_to_window: FxHashMap<Entity, WindowId>,
|
||||
pub window_to_entity: FxHashMap<WindowId, Entity>,
|
||||
/// windows that will be created when the Winit runner first starts.
|
||||
window_queue: VecDeque<WindowOptions>,
|
||||
}
|
||||
|
||||
impl WinitWindows {
|
||||
pub fn with_window(window: WindowOptions) -> Self {
|
||||
Self {
|
||||
window_queue: vec![window].into(),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn create_window(
|
||||
&mut self,
|
||||
event_loop: &ActiveEventLoop,
|
||||
entity: Entity,
|
||||
attr: WindowAttributes,
|
||||
) -> Result<WindowId, winit::error::OsError> {
|
||||
let win = event_loop.create_window(attr)?;
|
||||
let id = win.id();
|
||||
|
||||
self.windows.insert(id, Arc::new(win));
|
||||
self.entity_to_window.insert(entity, id);
|
||||
self.window_to_entity.insert(id, entity);
|
||||
|
||||
Ok(id)
|
||||
}
|
||||
|
||||
pub fn get_entity_window(&self, entity: Entity) -> Option<&Arc<Window>> {
|
||||
self.entity_to_window
|
||||
.get(&entity)
|
||||
.and_then(|id| self.windows.get(id))
|
||||
}
|
||||
}
|
||||
|
||||
pub fn winit_app_runner(app: App) {
|
||||
let evloop = EventLoop::new().expect("failed to create winit EventLoop");
|
||||
|
||||
let mut winit_runner = WinitRunner { app };
|
||||
evloop.run_app(&mut winit_runner).expect("loop error");
|
||||
}
|
||||
|
||||
struct WinitRunner {
|
||||
app: App,
|
||||
}
|
||||
|
||||
impl ApplicationHandler for WinitRunner {
|
||||
fn about_to_wait(&mut self, event_loop: &winit::event_loop::ActiveEventLoop) {
|
||||
self.app.update();
|
||||
|
||||
let renderer = self
|
||||
.app
|
||||
.renderer
|
||||
.get_mut()
|
||||
.expect("renderer was not initialized");
|
||||
renderer.prepare(&mut self.app.world);
|
||||
|
||||
match renderer.render() {
|
||||
Ok(_) => {}
|
||||
// Reconfigure the surface if lost
|
||||
//Err(wgpu::SurfaceError::Lost) => self.on_resize(.surface_size()),
|
||||
// The system is out of memory, we should probably quit
|
||||
Err(wgpu::SurfaceError::OutOfMemory) => {
|
||||
error!("OOM");
|
||||
event_loop.exit();
|
||||
}
|
||||
// All other errors (Outdated, Timeout) should be resolved by the next frame
|
||||
Err(e) => eprintln!("{:?}", e),
|
||||
}
|
||||
|
||||
let windows = self.app.world.get_resource::<WinitWindows>()
|
||||
.expect("world missing WinitWindows resource");
|
||||
for window in windows.windows.values() {
|
||||
window.request_redraw();
|
||||
}
|
||||
}
|
||||
|
||||
fn resumed(&mut self, event_loop: &winit::event_loop::ActiveEventLoop) {
|
||||
let world = &mut self.app.world;
|
||||
let en = world.reserve_entity();
|
||||
|
||||
let mut windows = world.get_resource_mut::<WinitWindows>()
|
||||
.expect("world missing WinitWindows resource");
|
||||
let mut to_create_window = windows.window_queue.pop_front().unwrap_or_default();
|
||||
let window_attr = to_create_window.as_attributes();
|
||||
//drop(windows);
|
||||
|
||||
|
||||
//let en = world.spawn((to_create_window, last, PrimaryWindow));
|
||||
|
||||
//let mut windows = world.get_resource_mut::<WinitWindows>()
|
||||
//.expect("world missing WinitWindows resource");
|
||||
let wid = windows.create_window(event_loop, en, window_attr).unwrap();
|
||||
let window = windows.windows.get(&wid).unwrap().clone();
|
||||
drop(windows);
|
||||
|
||||
// update fields that default to `None`
|
||||
to_create_window.position = window.outer_position()
|
||||
.or_else(|_| window.inner_position())
|
||||
.ok()
|
||||
.map(|p| IVec2::new(p.x, p.y));
|
||||
|
||||
// See [`WindowOptions::as_attributes`], it defaults to Windowed fullscreen mode, so we
|
||||
// must trigger an update in the sync system;
|
||||
let mut last = LastWindow { last: to_create_window.clone() };
|
||||
last.last.fullscreen_mode = FullscreenMode::Windowed;
|
||||
|
||||
world.insert(en, (to_create_window, last, PrimaryWindow));
|
||||
|
||||
debug!("Created window after resume");
|
||||
|
||||
let renderer = block_on(BasicRenderer::create_with_window(world, window));
|
||||
if self.app.renderer.set(Box::new(renderer)).is_err() {
|
||||
warn!("renderer was re-initialized");
|
||||
}
|
||||
}
|
||||
|
||||
fn device_event(
|
||||
&mut self,
|
||||
_: &ActiveEventLoop,
|
||||
device_src: winit::event::DeviceId,
|
||||
event: winit::event::DeviceEvent,
|
||||
) {
|
||||
self.app.push_event(DeviceEventPair { device_src, event });
|
||||
}
|
||||
|
||||
fn window_event(
|
||||
&mut self,
|
||||
event_loop: &winit::event_loop::ActiveEventLoop,
|
||||
window_id: winit::window::WindowId,
|
||||
event: WindowEvent,
|
||||
) {
|
||||
/* let windows = self.app.world.get_resource::<WinitWindows>();
|
||||
let window = match windows.windows.get(&window_id) {
|
||||
Some(w) => w.clone(),
|
||||
None => return,
|
||||
};
|
||||
drop(windows); */
|
||||
|
||||
self.app.push_event(event.clone());
|
||||
match event {
|
||||
WindowEvent::CursorMoved { position, .. } => {
|
||||
let windows = self.app.world.get_resource::<WinitWindows>()
|
||||
.expect("world missing WinitWindows resource");
|
||||
let en = windows.window_to_entity.get(&window_id)
|
||||
.expect("missing window entity");
|
||||
|
||||
// update the window and its cache so the sync system doesn't try to update the window
|
||||
let (mut en_window, mut en_last_win) = self.app.world.view_one::<(&mut WindowOptions, &mut LastWindow)>(*en).get().unwrap();
|
||||
let pos = Some(DVec2::new(position.x, position.y));
|
||||
en_window.set_physical_cursor_position(pos);
|
||||
en_last_win.set_physical_cursor_position(pos);
|
||||
},
|
||||
WindowEvent::ActivationTokenDone { .. } => todo!(),
|
||||
WindowEvent::Resized(physical_size) => {
|
||||
self.app.on_resize(physical_size);
|
||||
|
||||
let (mut window, mut last_window) = self
|
||||
.app
|
||||
.world
|
||||
.get_resource::<WinitWindows>()
|
||||
.expect("world missing WinitWindows resource")
|
||||
.window_to_entity
|
||||
.get(&window_id)
|
||||
.and_then(|e| self.app.world.view_one::<(&mut WindowOptions, &mut LastWindow)>(*e).get())
|
||||
.unwrap();
|
||||
|
||||
// update the window and its cache so the sync system doesn't try to update the window
|
||||
let size = UVec2::new(physical_size.width, physical_size.height);
|
||||
window.set_physical_size(size);
|
||||
last_window.set_physical_size(size);
|
||||
},
|
||||
// Mark the cursor as outside the window when it leaves
|
||||
WindowEvent::CursorLeft { .. } => {
|
||||
let (mut window, mut last_window) = self
|
||||
.app
|
||||
.world
|
||||
.get_resource::<WinitWindows>()
|
||||
.expect("world missing WinitWindows resource")
|
||||
.window_to_entity
|
||||
.get(&window_id)
|
||||
.and_then(|e| self.app.world.view_one::<(&mut WindowOptions, &mut LastWindow)>(*e).get())
|
||||
.unwrap();
|
||||
window.set_physical_cursor_position(None);
|
||||
last_window.set_physical_cursor_position(None);
|
||||
},
|
||||
WindowEvent::Moved(physical_position) => {
|
||||
let mut state = self.app.world.get_resource_or_else(WindowState::new);
|
||||
state.position = IVec2::new(physical_position.x, physical_position.y);
|
||||
},
|
||||
WindowEvent::CloseRequested => {
|
||||
self.app.on_exit();
|
||||
event_loop.exit();
|
||||
},
|
||||
WindowEvent::Destroyed => todo!(),
|
||||
WindowEvent::DroppedFile(_path_buf) => todo!(),
|
||||
WindowEvent::HoveredFile(_path_buf) => todo!(),
|
||||
WindowEvent::HoveredFileCancelled => todo!(),
|
||||
WindowEvent::Focused(focused) => {
|
||||
let mut window_opts = self
|
||||
.app
|
||||
.world
|
||||
.get_resource::<WinitWindows>()
|
||||
.expect("world missing WinitWindows resource")
|
||||
.window_to_entity
|
||||
.get(&window_id)
|
||||
.and_then(|e| self.app.world.view_one::<&mut WindowOptions>(*e).get())
|
||||
.unwrap();
|
||||
window_opts.focused = focused;
|
||||
},
|
||||
WindowEvent::ModifiersChanged(modifiers) => {
|
||||
debug!("modifiers changed: {:?}", modifiers)
|
||||
},
|
||||
WindowEvent::ScaleFactorChanged { scale_factor, .. } => {
|
||||
let mut window_opts = self
|
||||
.app
|
||||
.world
|
||||
.get_resource::<WinitWindows>()
|
||||
.expect("world missing WinitWindows resource")
|
||||
.window_to_entity
|
||||
.get(&window_id)
|
||||
.and_then(|e| self.app.world.view_one::<&mut WindowOptions>(*e).get())
|
||||
.unwrap();
|
||||
window_opts.scale_factor = scale_factor;
|
||||
},
|
||||
WindowEvent::ThemeChanged(theme) => {
|
||||
let mut window_opts = self
|
||||
.app
|
||||
.world
|
||||
.get_resource::<WinitWindows>()
|
||||
.expect("world missing WinitWindows resource")
|
||||
.window_to_entity
|
||||
.get(&window_id)
|
||||
.and_then(|e| self.app.world.view_one::<&mut WindowOptions>(*e).get())
|
||||
.unwrap();
|
||||
window_opts.theme = Some(theme);
|
||||
},
|
||||
WindowEvent::Occluded(occ) => {
|
||||
let mut window_opts = self
|
||||
.app
|
||||
.world
|
||||
.get_resource::<WinitWindows>()
|
||||
.expect("world missing WinitWindows resource")
|
||||
.window_to_entity
|
||||
.get(&window_id)
|
||||
.and_then(|e| self.app.world.view_one::<&mut WindowOptions>(*e).get())
|
||||
.unwrap();
|
||||
window_opts.occluded = occ;
|
||||
},
|
||||
WindowEvent::RedrawRequested => {
|
||||
//debug!("should redraw");
|
||||
},
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,723 +0,0 @@
|
|||
use std::ops::{Deref, DerefMut};
|
||||
|
||||
use glam::{DVec2, IVec2, UVec2, Vec2};
|
||||
use lyra_ecs::{query::{filter::Changed, Entities, Res, View}, Component};
|
||||
use lyra_math::Area;
|
||||
use lyra_reflect::Reflect;
|
||||
use lyra_resource::Image;
|
||||
use tracing::{error, warn};
|
||||
use winit::{dpi::{LogicalSize, PhysicalPosition, PhysicalSize, Position, Size}, monitor::{MonitorHandle, VideoModeHandle}, window::{CustomCursor, Window}};
|
||||
|
||||
pub use winit::window::{CursorGrabMode, CursorIcon, Icon, Theme, WindowButtons, WindowLevel};
|
||||
|
||||
use crate::{plugin::Plugin, winit::WinitWindows, lyra_engine};
|
||||
|
||||
/// Flag component that
|
||||
#[derive(Clone, Component)]
|
||||
pub struct PrimaryWindow;
|
||||
|
||||
#[derive(Clone, PartialEq, Eq)]
|
||||
pub enum CursorAppearance {
|
||||
Icon(CursorIcon),
|
||||
Custom(CustomCursor)
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Default, PartialEq, Eq, Reflect)]
|
||||
pub enum FullscreenMode{
|
||||
#[default]
|
||||
Windowed,
|
||||
BorderlessFullscreen,
|
||||
SizedFullscreen,
|
||||
Fullscreen,
|
||||
}
|
||||
|
||||
impl FullscreenMode {
|
||||
pub fn as_winit_fullscreen(&self, monitor: MonitorHandle, physical_size: UVec2) -> Option<winit::window::Fullscreen> {
|
||||
match &self {
|
||||
FullscreenMode::Windowed => None,
|
||||
FullscreenMode::BorderlessFullscreen => Some(winit::window::Fullscreen::Borderless(None)),
|
||||
// find closest video mode for full screen sizes
|
||||
_ => {
|
||||
let closest = find_closest_video_mode(monitor, physical_size);
|
||||
|
||||
if let Some(closest) = closest {
|
||||
Some(winit::window::Fullscreen::Exclusive(closest))
|
||||
} else {
|
||||
warn!("Could not find closest video mode, falling back to windowed.");
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct Cursor {
|
||||
/// Modifies the cursor icon of the window.
|
||||
///
|
||||
/// Platform-specific
|
||||
/// * **iOS / Android / Orbital:** Unsupported.
|
||||
/// * **Web:** Custom cursors have to be loaded and decoded first, until then the previous cursor is shown.
|
||||
pub appearance: CursorAppearance,
|
||||
|
||||
/// Gets/sets the window's cursor grab mode
|
||||
///
|
||||
/// # Tip:
|
||||
/// First try confining the cursor, and if it fails, try locking it instead.
|
||||
pub grab: CursorGrabMode,
|
||||
|
||||
/// Gets/sets whether the window catches cursor events.
|
||||
///
|
||||
/// If `false`, events are passed through the window such that any other window behind it
|
||||
/// receives them. By default hittest is enabled.
|
||||
///
|
||||
/// Platform-specific
|
||||
/// * **iOS / Android / Web / Orbital:** Unsupported.
|
||||
pub hittest: bool,
|
||||
|
||||
/// Gets/sets the cursor's visibility
|
||||
///
|
||||
/// Platform-specific
|
||||
/// * **Windows / X11 / Wayland:** The cursor is only hidden within the confines of the window.
|
||||
/// * **macOS:** The cursor is hidden as long as the window has input focus, even if the
|
||||
/// cursor is outside of the window.
|
||||
/// * **iOS / Android:** Unsupported.
|
||||
pub visible: bool,
|
||||
//cursor_position: Option<PhysicalPosition<i32>>,
|
||||
}
|
||||
|
||||
/// Options that the window will be created with.
|
||||
#[derive(Clone, Component, Reflect)]
|
||||
pub struct WindowOptions {
|
||||
/// The enabled window buttons.
|
||||
///
|
||||
/// Platform-specific
|
||||
/// * **Wayland / X11 / Orbital:** Not implemented. Always set to [`WindowButtons::all`].
|
||||
/// * **Web / iOS / Android:** Unsupported. Always set to [`WindowButtons::all`].
|
||||
#[reflect(skip)]
|
||||
pub enabled_buttons: WindowButtons,
|
||||
|
||||
/// Gets or sets if the window is in focus.
|
||||
///
|
||||
/// Platform-specific
|
||||
/// * **iOS / Android / Wayland / Orbital:** Unsupported.
|
||||
pub focused: bool,
|
||||
|
||||
/// Gets or sets the fullscreen setting.
|
||||
pub fullscreen_mode: FullscreenMode,
|
||||
|
||||
/// Gets/sets the position of the top-left hand corner of the window relative to
|
||||
/// the top-left hand corner of the desktop.
|
||||
///
|
||||
/// Note that the top-left hand corner of the desktop is not necessarily the same
|
||||
/// as the screen. If the user uses a desktop with multiple monitors, the top-left
|
||||
/// hand corner of the desktop is the top-left hand corner of the monitor at the
|
||||
/// top-left of the desktop.
|
||||
///
|
||||
/// If this is none, the position will be chosen by the windowing manager at creation, then set
|
||||
/// when the window is created.
|
||||
///
|
||||
/// Platform-specific
|
||||
/// * **iOS:** Value is the top left coordinates of the window’s safe area in the screen
|
||||
/// space coordinate system.
|
||||
/// * **Web:** Value is the top-left coordinates relative to the viewport. Note: this will be
|
||||
/// the same value as [`WindowOptions::outer_position`].
|
||||
/// * **Android / Wayland:** Unsupported.
|
||||
#[reflect(skip)]
|
||||
pub position: Option<IVec2>,
|
||||
|
||||
/// Gets/sets the size of the view in the window.
|
||||
///
|
||||
/// The size does not include the window title bars and borders.
|
||||
///
|
||||
/// Platform-specific
|
||||
/// * **Web:** The size of the canvas element. Doesn’t account for CSS `transform`.
|
||||
#[reflect(skip)]
|
||||
physical_size: UVec2,
|
||||
|
||||
/// Gets/sets if the window has decorations.
|
||||
///
|
||||
/// Platform-specific
|
||||
/// * **iOS / Android / Web:** Always set to `true`.
|
||||
pub decorated: bool,
|
||||
|
||||
/// Gets/sets the window's current maximized state
|
||||
///
|
||||
/// Platform-specific
|
||||
/// * **iOS / Android / Web:** Unsupported.
|
||||
pub maximized: bool,
|
||||
|
||||
/// Gets/sets the window's current minimized state.
|
||||
///
|
||||
/// Is `None` if the minimized state could not be determined.
|
||||
///
|
||||
/// Platform-specific
|
||||
/// * **Wayland:** always `None`, un-minimize is unsupported.
|
||||
/// * **iOS / Android / Web / Orbital:** Unsupported.
|
||||
pub minimized: Option<bool>,
|
||||
|
||||
/// Gets/sets the window's current resizable state
|
||||
///
|
||||
/// If this is false, the window can still be resized by changing [`WindowOptions::size`].
|
||||
///
|
||||
/// Platform-specific
|
||||
/// Setting this only has an affect on desktop platforms.
|
||||
///
|
||||
/// * **X11:** Due to a bug in XFCE, setting this has no effect..
|
||||
/// * **iOS / Android / Web:** Unsupported.
|
||||
pub resizable: bool,
|
||||
|
||||
/// Gets/sets the window's current visibility state.
|
||||
///
|
||||
/// `None` means it couldn't be determined.
|
||||
///
|
||||
/// Platform-specific
|
||||
/// * **X11:** Not implemented.
|
||||
/// * **Wayland / Android / Web:** Unsupported.
|
||||
/// * **iOS:** Setting is not implemented, getting is unsupported.
|
||||
pub visible: Option<bool>,
|
||||
|
||||
/// Gets/sets the window resize increments.
|
||||
///
|
||||
/// This is a niche constraint hint usually employed by terminal emulators and other apps
|
||||
/// that need “blocky” resizes.
|
||||
///
|
||||
/// Platform-specific
|
||||
/// * **macOS:** Increments are converted to logical size and then macOS rounds them to whole numbers.
|
||||
/// * **Wayland:** Not implemented, always `None`.
|
||||
/// * **iOS / Android / Web / Orbital:** Unsupported.
|
||||
#[reflect(skip)]
|
||||
pub resize_increments: Option<Size>,
|
||||
|
||||
/// Gets the scale factor.
|
||||
///
|
||||
/// The scale factor is the ratio of physical pixels to logical pixels.
|
||||
/// See [winit docs](https://docs.rs/winit/latest/winit/window/struct.Window.html#method.scale_factor)
|
||||
/// for more information.
|
||||
pub scale_factor: f64,
|
||||
|
||||
/// Gets/sets the window's blur state.
|
||||
///
|
||||
/// Platform-specific
|
||||
/// * **Android / iOS / X11 / Web / Windows:** Unsupported.
|
||||
/// * **Wayland:** Only works with org_kde_kwin_blur_manager protocol.
|
||||
pub blur: bool,
|
||||
|
||||
#[reflect(skip)]
|
||||
pub cursor: Cursor,
|
||||
|
||||
/// Sets whether the window should get IME events
|
||||
///
|
||||
/// When IME is allowed, the window will receive [`Ime`](winit::event::WindowEvent::Ime)
|
||||
/// events, and during the preedit phase the window will NOT get KeyboardInput events.
|
||||
/// The window should allow IME while it is expecting text input.
|
||||
///
|
||||
/// When IME is not allowed, the window won’t receive [`Ime`](winit::event::WindowEvent::Ime)
|
||||
/// events, and will receive [`KeyboardInput`](winit::event::WindowEvent::KeyboardInput) events
|
||||
/// for every keypress instead. Not allowing IME is useful for games for example.
|
||||
/// IME is not allowed by default.
|
||||
///
|
||||
/// Platform-specific
|
||||
/// * **macOS:** IME must be enabled to receive text-input where dead-key sequences are combined.
|
||||
/// * **iOS / Android / Web / Orbital:** Unsupported.
|
||||
/// * **X11:** Enabling IME will disable dead keys reporting during compose.
|
||||
pub ime_allowed: bool,
|
||||
|
||||
/// Sets area of IME box in physical coordinates relative to the top left.
|
||||
///
|
||||
/// Platform-specific
|
||||
/// * **X11:** - area is not supported, only position.
|
||||
/// * **iOS / Android / Web / Orbital:** Unsupported.
|
||||
#[reflect(skip)]
|
||||
physical_ime_cursor_area: Option<Area<Vec2, Vec2>>,
|
||||
|
||||
/// Gets/sets the minimum size of the window.
|
||||
///
|
||||
/// Units are in logical pixels.
|
||||
///
|
||||
/// Platform-specific
|
||||
/// * **iOS / Android / Orbital:** Unsupported.
|
||||
#[reflect(skip)]
|
||||
pub min_size: Option<Vec2>,
|
||||
|
||||
/// Gets/sets the maximum size of the window.
|
||||
///
|
||||
/// Units are in logical pixels.
|
||||
///
|
||||
/// Platform-specific
|
||||
/// * **iOS / Android / Orbital:** Unsupported.
|
||||
#[reflect(skip)]
|
||||
pub max_size: Option<Vec2>,
|
||||
|
||||
/// Gets/sets the current window theme.
|
||||
///
|
||||
/// Specify `None` to reset the theme to the system default. May also be `None` on unsupported
|
||||
/// platforms.
|
||||
///
|
||||
/// Platform-specific
|
||||
/// * **Wayland:** Sets the theme for the client side decorations. Using `None` will use dbus
|
||||
/// to get the system preference.
|
||||
/// * **X11:** Sets `_GTK_THEME_VARIANT` hint to `dark` or `light` and if `None` is used,
|
||||
/// it will default to [`Theme::Dark`](winit::window::Theme::Dark).
|
||||
/// * **iOS / Android / Web / Orbital:** Unsupported.
|
||||
#[reflect(skip)]
|
||||
pub theme: Option<Theme>,
|
||||
|
||||
/// Gets/sets the title of the window.
|
||||
///
|
||||
/// Platform-specific
|
||||
/// * **iOS / Android:** Unsupported.
|
||||
/// * **X11 / Wayland / Web:** Cannot get, will always be an empty string.
|
||||
pub title: String,
|
||||
|
||||
/// Gets/sets the window's transparency state.
|
||||
///
|
||||
/// This is just a hint that may not change anything about the window transparency, however
|
||||
/// doing a mismatch between the content of your window and this hint may result in visual
|
||||
/// artifacts.
|
||||
///
|
||||
/// Platform-specific
|
||||
/// * **macOS:** This will reset the window’s background color.
|
||||
/// * **Web / iOS / Android:** Unsupported.
|
||||
/// * **X11:** Can only be set while building the window.
|
||||
pub transparent: bool,
|
||||
|
||||
/// Sets the window's icon.
|
||||
///
|
||||
/// On Windows and X11, this is typically the small icon in the top-left corner of
|
||||
/// the titlebar.
|
||||
///
|
||||
/// Platform-specific
|
||||
/// * **iOS / Android / Web / Wayland / macOS / Orbital:** Unsupported.
|
||||
/// * **Windows:** Sets `ICON_SMALL`. The base size for a window icon is 16x16, but it’s
|
||||
/// recommended to account for screen scaling and pick a multiple of that, i.e. 32x32.
|
||||
/// * **X11:** Has no universal guidelines for icon sizes, so you’re at the whims of
|
||||
/// the WM. That said, it’s usually in the same ballpark as on Windows.
|
||||
pub window_icon: Option<lyra_resource::ResHandle<Image>>,
|
||||
|
||||
/// Change the window level.
|
||||
///
|
||||
/// This is just a hint to the OS, and the system could ignore it.
|
||||
///
|
||||
/// See [`WindowLevel`] for details.
|
||||
#[reflect(skip)]
|
||||
pub window_level: WindowLevel,
|
||||
|
||||
/// Show [window menu](https://en.wikipedia.org/wiki/Common_menus_in_Microsoft_Windows#System_menu)
|
||||
/// at a specified position in physical coordinates.
|
||||
///
|
||||
/// This is the context menu that is normally shown when interacting with the title bar. This is useful when implementing custom decorations.
|
||||
/// Platform-specific
|
||||
/// * **Android / iOS / macOS / Orbital / Wayland / Web / X11:** Unsupported.
|
||||
//pub physical_window_menu_pos: Option<Vec2>,
|
||||
|
||||
/// Gets the window's occluded state (completely hidden from view).
|
||||
///
|
||||
/// This is different to window visibility as it depends on whether the window is
|
||||
/// closed, minimised, set invisible, or fully occluded by another window.
|
||||
///
|
||||
/// Platform-specific
|
||||
/// * **iOS:** this is set to `false` in response to an applicationWillEnterForeground
|
||||
/// callback which means the application should start preparing its data.
|
||||
/// Its `true` in response to an applicationDidEnterBackground callback which means
|
||||
/// the application should free resources (according to the iOS application lifecycle).
|
||||
/// * **Web:** Doesn't take into account CSS border, padding, or transform.
|
||||
/// * **Android / Wayland / Windows / Orbital:** Unsupported.
|
||||
// TODO: update
|
||||
pub(crate) occluded: bool,
|
||||
|
||||
/// Gets/sets the position of the cursor in physical coordinates.
|
||||
///
|
||||
/// Platform-specific
|
||||
/// * **Wayland:** Cursor must be in [`CursorGrabMode::Locked`].
|
||||
/// * **iOS / Android / Web / Orbital:** Unsupported.
|
||||
#[reflect(skip)]
|
||||
physical_cursor_position: Option<DVec2>,
|
||||
}
|
||||
|
||||
/* fn physical_to_vec2<P: winit::dpi::Pixel>(size: PhysicalSize<P>) -> Vec2 {
|
||||
let size = size.cast::<f32>();
|
||||
Vec2::new(size.width, size.height)
|
||||
} */
|
||||
|
||||
fn logical_to_vec2(size: LogicalSize<f32>) -> Vec2 {
|
||||
Vec2::new(size.width, size.height)
|
||||
}
|
||||
|
||||
impl From<winit::window::WindowAttributes> for WindowOptions {
|
||||
fn from(value: winit::window::WindowAttributes) -> Self {
|
||||
Self {
|
||||
enabled_buttons: value.enabled_buttons,
|
||||
focused: false,
|
||||
fullscreen_mode: value.fullscreen.map(|m| match m {
|
||||
winit::window::Fullscreen::Exclusive(video_mode_handle) => {
|
||||
if video_mode_handle.size() == video_mode_handle.monitor().size() {
|
||||
FullscreenMode::Fullscreen
|
||||
} else {
|
||||
FullscreenMode::SizedFullscreen
|
||||
}
|
||||
},
|
||||
winit::window::Fullscreen::Borderless(_) => FullscreenMode::BorderlessFullscreen,
|
||||
}).unwrap_or(FullscreenMode::Windowed),
|
||||
position: value.position.map(|p| {
|
||||
let s = p.to_physical::<i32>(1.0);
|
||||
IVec2::new(s.x, s.y)
|
||||
}),
|
||||
physical_size: value.inner_size.map(|s| {
|
||||
let s = s.to_physical::<u32>(1.0);
|
||||
UVec2::new(s.width, s.height)
|
||||
}).unwrap_or(UVec2::new(1280, 720)),
|
||||
decorated: value.decorations,
|
||||
maximized: value.maximized,
|
||||
minimized: None,
|
||||
resizable: value.resizable,
|
||||
visible: Some(value.visible),
|
||||
resize_increments: value.resize_increments.map(|r| r.into()),
|
||||
scale_factor: 1.0,
|
||||
blur: value.blur,
|
||||
cursor: Cursor {
|
||||
appearance: match value.cursor {
|
||||
winit::window::Cursor::Icon(icon) => CursorAppearance::Icon(icon),
|
||||
winit::window::Cursor::Custom(custom) => CursorAppearance::Custom(custom),
|
||||
},
|
||||
grab: CursorGrabMode::None,
|
||||
hittest: true,
|
||||
visible: true,
|
||||
},
|
||||
ime_allowed: false,
|
||||
physical_ime_cursor_area: None,
|
||||
min_size: value.min_inner_size.map(|m| logical_to_vec2(m.to_logical(1.0))),
|
||||
max_size: value.max_inner_size.map(|m| logical_to_vec2(m.to_logical(1.0))),
|
||||
theme: value.preferred_theme,
|
||||
title: value.title,
|
||||
transparent: value.transparent,
|
||||
window_icon: None,
|
||||
window_level: value.window_level,
|
||||
occluded: false,
|
||||
physical_cursor_position: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for WindowOptions {
|
||||
fn default() -> Self {
|
||||
Self::from(Window::default_attributes())
|
||||
}
|
||||
}
|
||||
|
||||
fn find_closest_video_mode(monitor: MonitorHandle, physical_size: UVec2) -> Option<VideoModeHandle> {
|
||||
let mut modes = monitor.video_modes();
|
||||
let mut closest = modes.next()?;
|
||||
let closest_size = closest.size();
|
||||
let mut closest_size = UVec2::new(closest_size.width, closest_size.height);
|
||||
|
||||
for mode in modes {
|
||||
let s = closest.size();
|
||||
let s = UVec2::new(s.width, s.height);
|
||||
|
||||
if (physical_size - s).length_squared() < (physical_size - closest_size).length_squared() {
|
||||
closest = mode;
|
||||
closest_size = s;
|
||||
}
|
||||
}
|
||||
|
||||
Some(closest)
|
||||
}
|
||||
|
||||
impl WindowOptions {
|
||||
/// Create winit [`WindowAttributes`] from self.
|
||||
///
|
||||
/// This will ignore [`WindowOptions::fullscreen`] mode on self, defaulting to
|
||||
/// [`FullscreenMode::Windowed`]. It will be updated on first run of the sync system.
|
||||
pub(crate) fn as_attributes(&self) -> winit::window::WindowAttributes {
|
||||
let mut att = winit::window::Window::default_attributes();
|
||||
|
||||
att.enabled_buttons = self.enabled_buttons.clone();
|
||||
att.fullscreen = None;
|
||||
att.inner_size = Some(Size::Physical(PhysicalSize::new(self.physical_size.x, self.physical_size.y)));
|
||||
att.decorations = self.decorated;
|
||||
att.maximized = self.maximized;
|
||||
att.resizable = self.resizable;
|
||||
att.visible = self.visible.unwrap_or(true);
|
||||
att.position = self.position.map(|p| Position::Physical(PhysicalPosition::new(p.x, p.y)));
|
||||
att.resize_increments = self.resize_increments.map(|i| i.into());
|
||||
att.blur = self.blur;
|
||||
att.cursor = match self.cursor.appearance.clone() {
|
||||
CursorAppearance::Icon(icon) => winit::window::Cursor::Icon(icon),
|
||||
CursorAppearance::Custom(custom) => winit::window::Cursor::Custom(custom),
|
||||
};
|
||||
att.min_inner_size = self.min_size.map(|s| Size::Logical(LogicalSize::new(s.x as _, s.y as _)));
|
||||
att.max_inner_size = self.max_size.map(|s| Size::Logical(LogicalSize::new(s.x as _, s.y as _)));
|
||||
att.preferred_theme = self.theme;
|
||||
att.title = self.title.clone();
|
||||
att.transparent = self.transparent;
|
||||
if self.window_icon.is_some() {
|
||||
todo!("cannot set window attribute icon yet");
|
||||
}
|
||||
att.window_level = self.window_level;
|
||||
|
||||
att
|
||||
}
|
||||
|
||||
/// The size of the window in physical coordinates.
|
||||
pub fn physical_size(&self) -> UVec2 {
|
||||
self.physical_size
|
||||
}
|
||||
|
||||
/// Set the size of the window in physical coordinates.
|
||||
pub fn set_physical_size(&mut self, size: UVec2) {
|
||||
self.physical_size = size;
|
||||
}
|
||||
|
||||
/// The size of the window in logical coordinates.
|
||||
pub fn size(&self) -> Vec2 {
|
||||
self.physical_size.as_vec2() / self.scale_factor as f32
|
||||
}
|
||||
|
||||
/// Set the size of the window in logical coordinates.
|
||||
pub fn set_size(&mut self, size: Vec2) {
|
||||
self.physical_size = (size * self.scale_factor as f32).as_uvec2();
|
||||
}
|
||||
|
||||
/// Returns a boolean indicating if the mouse is inside the window.
|
||||
pub fn is_mouse_inside(&self) -> bool {
|
||||
if let Some(pos) = self.physical_cursor_position {
|
||||
let s = self.physical_size;
|
||||
return pos.x >= 0.0 && pos.x <= s.x as f64
|
||||
&& pos.y >= 0.0 && pos.y <= s.y as f64;
|
||||
}
|
||||
|
||||
false
|
||||
}
|
||||
|
||||
/// The cursor position in the window in logical coordinates.
|
||||
///
|
||||
/// Returns `None` if the cursor is not in the window.
|
||||
pub fn cursor_position(&self) -> Option<Vec2> {
|
||||
if !self.is_mouse_inside() {
|
||||
return None;
|
||||
}
|
||||
|
||||
self.physical_cursor_position.map(|p| (p / self.scale_factor).as_vec2())
|
||||
}
|
||||
|
||||
/// The cursor position in the window in physical coordinates.
|
||||
///
|
||||
/// Returns `None` if the cursor is not in the window.
|
||||
pub fn physical_cursor_position(&self) -> Option<Vec2> {
|
||||
if !self.is_mouse_inside() {
|
||||
return None;
|
||||
}
|
||||
|
||||
self.physical_cursor_position.map(|p| p.as_vec2())
|
||||
}
|
||||
|
||||
/// Set the cursor position in logical coordinates.
|
||||
///
|
||||
/// Can be used to mark the cursor outside of the window as well.
|
||||
pub fn set_cursor_position(&mut self, pos: Option<Vec2>) {
|
||||
self.physical_cursor_position = pos.map(|p| p.as_dvec2() * self.scale_factor);
|
||||
}
|
||||
|
||||
/// Set the cursor position in physical coordinates.
|
||||
///
|
||||
/// Can be used to mark the cursor outside of the window as well.
|
||||
pub fn set_physical_cursor_position(&mut self, pos: Option<DVec2>) {
|
||||
self.physical_cursor_position = pos;
|
||||
}
|
||||
|
||||
/// The window's occluded state (completely hidden from view).
|
||||
///
|
||||
/// This is different to window visibility as it depends on whether the window is
|
||||
/// closed, minimised, set invisible, or fully occluded by another window.
|
||||
///
|
||||
/// Platform-specific
|
||||
/// * **iOS:** this is set to `false` in response to an applicationWillEnterForeground
|
||||
/// callback which means the application should start preparing its data.
|
||||
/// Its `true` in response to an applicationDidEnterBackground callback which means
|
||||
/// the application should free resources (according to the iOS application lifecycle).
|
||||
/// * **Web:** Doesn't take into account CSS border, padding, or transform.
|
||||
/// * **Android / Wayland / Windows / Orbital:** Unsupported.
|
||||
pub fn occluded(&self) -> bool {
|
||||
self.occluded
|
||||
}
|
||||
}
|
||||
|
||||
/// The state of the window last time it was changed.
|
||||
///
|
||||
/// This is used in [`window_sync_system`] to see what fields of [`WindowOptions`] changed
|
||||
/// when syncing the winit window with the component.
|
||||
#[derive(Clone, Component)]
|
||||
pub struct LastWindow {
|
||||
pub last: WindowOptions,
|
||||
}
|
||||
|
||||
impl Deref for LastWindow {
|
||||
type Target = WindowOptions;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.last
|
||||
}
|
||||
}
|
||||
|
||||
impl DerefMut for LastWindow {
|
||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||
&mut self.last
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct WindowPlugin {
|
||||
#[allow(dead_code)]
|
||||
create_options: WindowOptions,
|
||||
}
|
||||
|
||||
/// A system that syncs Winit Windows with [`WindowOptions`] components.
|
||||
pub fn window_sync_system(windows: Res<WinitWindows>, view: View<(Entities, &WindowOptions, &mut LastWindow), Changed<WindowOptions>>) -> anyhow::Result<()> {
|
||||
for (entity, opts, mut last) in view.iter() {
|
||||
let window = windows.get_entity_window(entity)
|
||||
.expect("entity's window is missing");
|
||||
|
||||
if opts.enabled_buttons != last.enabled_buttons {
|
||||
window.set_enabled_buttons(opts.enabled_buttons);
|
||||
}
|
||||
|
||||
if opts.focused != last.focused && opts.focused {
|
||||
window.focus_window();
|
||||
}
|
||||
|
||||
if opts.fullscreen_mode != last.fullscreen_mode {
|
||||
let monitor = window.primary_monitor().unwrap_or_else(|| {
|
||||
let mut m = window.available_monitors();
|
||||
m.next().expect("failed to find any available monitor")
|
||||
});
|
||||
|
||||
window.set_fullscreen(opts.fullscreen_mode.as_winit_fullscreen(monitor, opts.physical_size));
|
||||
}
|
||||
|
||||
if opts.physical_size != last.physical_size {
|
||||
let size = PhysicalSize::new(opts.physical_size.x, opts.physical_size.y);
|
||||
if window.request_inner_size(size).is_some() {
|
||||
error!("request to increase window size failed");
|
||||
}
|
||||
}
|
||||
|
||||
if opts.decorated != last.decorated {
|
||||
window.set_decorations(opts.decorated);
|
||||
}
|
||||
|
||||
if opts.maximized != last.maximized {
|
||||
window.set_maximized(opts.maximized);
|
||||
}
|
||||
|
||||
if opts.minimized != last.minimized && opts.minimized.is_some() {
|
||||
window.set_minimized(opts.minimized.unwrap());
|
||||
}
|
||||
|
||||
if opts.visible != last.visible && opts.visible.is_some() {
|
||||
window.set_visible(opts.visible.unwrap());
|
||||
}
|
||||
|
||||
if opts.position != last.position && opts.position.is_some() {
|
||||
let pos = opts.position.unwrap();
|
||||
let pos = PhysicalPosition::new(pos.x, pos.y);
|
||||
window.set_outer_position(pos);
|
||||
}
|
||||
|
||||
if opts.resize_increments != last.resize_increments {
|
||||
window.set_resize_increments(opts.resize_increments);
|
||||
}
|
||||
|
||||
if opts.blur != last.blur {
|
||||
window.set_blur(opts.blur);
|
||||
}
|
||||
|
||||
if opts.cursor.appearance != last.cursor.appearance {
|
||||
match opts.cursor.appearance.clone() {
|
||||
CursorAppearance::Icon(icon) => window.set_cursor(winit::window::Cursor::Icon(icon)),
|
||||
CursorAppearance::Custom(custom) => window.set_cursor(winit::window::Cursor::Custom(custom)),
|
||||
}
|
||||
}
|
||||
|
||||
if opts.cursor.grab != last.cursor.grab {
|
||||
if let Err(e) = window.set_cursor_grab(opts.cursor.grab) {
|
||||
error!("could not set cursor grab mode: {}", e);
|
||||
}
|
||||
}
|
||||
|
||||
if opts.cursor.hittest != last.cursor.hittest {
|
||||
if let Err(e) = window.set_cursor_hittest(opts.cursor.hittest) {
|
||||
error!("could not set cursor hittest: {}", e);
|
||||
}
|
||||
}
|
||||
|
||||
if opts.cursor.visible != last.cursor.visible {
|
||||
window.set_cursor_visible(opts.cursor.visible);
|
||||
}
|
||||
|
||||
if opts.ime_allowed != last.ime_allowed {
|
||||
window.set_ime_allowed(opts.ime_allowed);
|
||||
}
|
||||
|
||||
if opts.physical_ime_cursor_area != last.physical_ime_cursor_area && opts.physical_ime_cursor_area.is_some() {
|
||||
let area = opts.physical_ime_cursor_area.unwrap();
|
||||
let pos = PhysicalPosition::new(area.position.x, area.position.y);
|
||||
let size = PhysicalSize::new(area.size.x, area.size.y);
|
||||
window.set_ime_cursor_area(pos, size);
|
||||
}
|
||||
|
||||
if opts.min_size != last.min_size {
|
||||
let s = opts.min_size.map(|s| LogicalSize::new(s.x, s.y));
|
||||
window.set_min_inner_size(s);
|
||||
}
|
||||
|
||||
if opts.max_size != last.max_size {
|
||||
let s = opts.max_size.map(|s| LogicalSize::new(s.x, s.y));
|
||||
window.set_max_inner_size(s);
|
||||
}
|
||||
|
||||
if opts.theme != last.theme {
|
||||
window.set_theme(opts.theme);
|
||||
}
|
||||
|
||||
if opts.title != last.title {
|
||||
window.set_title(&opts.title);
|
||||
}
|
||||
|
||||
if opts.transparent != last.transparent {
|
||||
window.set_transparent(opts.transparent);
|
||||
}
|
||||
|
||||
// compare the resource version and uuid. These will get changed
|
||||
// when the image is reloaded
|
||||
let opts_icon = opts.window_icon.as_ref()
|
||||
.map(|i| (i.version(), i.uuid()));
|
||||
let last_icon = last.window_icon.as_ref()
|
||||
.map(|i| (i.version(), i.uuid()));
|
||||
if opts_icon != last_icon {
|
||||
todo!("cannot set window icon yet");
|
||||
}
|
||||
|
||||
if opts.window_level != last.window_level {
|
||||
window.set_window_level(opts.window_level);
|
||||
}
|
||||
|
||||
if opts.physical_cursor_position != last.physical_cursor_position && opts.physical_cursor_position.is_some() {
|
||||
let pos = opts.physical_cursor_position.unwrap();
|
||||
let pos = PhysicalPosition::new(pos.x, pos.y);
|
||||
if let Err(e) = window.set_cursor_position(pos) {
|
||||
error!("failed to set cursor position: {}", e);
|
||||
}
|
||||
}
|
||||
|
||||
last.last = opts.clone();
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
impl Plugin for WindowPlugin {
|
||||
fn setup(&mut self, app: &mut crate::game::App) {
|
||||
app.with_system("window_sync", window_sync_system, &[]);
|
||||
}
|
||||
}
|
|
@ -1,34 +0,0 @@
|
|||
[package]
|
||||
name = "lyra-gltf"
|
||||
version = "0.0.1"
|
||||
edition = "2021"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
[dependencies]
|
||||
lyra-ecs = { path = "../lyra-ecs", features = [ "math" ] }
|
||||
lyra-reflect = { path = "../lyra-reflect", features = [ "math" ] }
|
||||
lyra-math = { path = "../lyra-math" }
|
||||
lyra-scene = { path = "../lyra-scene" }
|
||||
lyra-resource = { path = "../lyra-resource" }
|
||||
anyhow = "1.0.75"
|
||||
base64 = "0.21.4"
|
||||
crossbeam = { version = "0.8.4", features = [ "crossbeam-channel" ] }
|
||||
glam = "0.29.0"
|
||||
gltf = { version = "1.3.0", features = ["KHR_materials_pbrSpecularGlossiness", "KHR_materials_specular"] }
|
||||
image = "0.25.2"
|
||||
# not using custom matcher, or file type from file path
|
||||
infer = { version = "0.15.0", default-features = false }
|
||||
mime = "0.3.17"
|
||||
notify = "6.1.1"
|
||||
notify-debouncer-full = "0.3.1"
|
||||
#notify = { version = "6.1.1", default-features = false, features = [ "fsevent-sys", "macos_fsevent" ]} # disables crossbeam-channel
|
||||
percent-encoding = "2.3.0"
|
||||
thiserror = "1.0.48"
|
||||
tracing = "0.1.37"
|
||||
uuid = { version = "1.4.1", features = ["v4"] }
|
||||
instant = "0.1"
|
||||
async-std = "1.12.0"
|
||||
|
||||
[dev-dependencies]
|
||||
rand = "0.8.5"
|
|
@ -1,333 +0,0 @@
|
|||
use std::{ffi::OsStr, path::{Path, PathBuf}, sync::Arc};
|
||||
|
||||
use glam::{Quat, Vec3};
|
||||
use instant::Instant;
|
||||
use lyra_ecs::query;
|
||||
use lyra_math::Transform;
|
||||
use lyra_scene::{SceneGraph, SceneNode, WorldTransform};
|
||||
use thiserror::Error;
|
||||
|
||||
use lyra_resource::{loader::{LoaderError, PinedBoxLoaderFuture, ResourceLoader}, ResHandle, ResourceData, ResourceManager, ResourceStorage};
|
||||
use crate::{gltf_read_buffer_uri, UriReadError};
|
||||
|
||||
use super::{Gltf, GltfNode, Material, Mesh, MeshIndices, MeshVertexAttribute, VertexAttributeData};
|
||||
|
||||
use tracing::debug;
|
||||
|
||||
#[derive(Error, Debug)]
|
||||
enum ModelLoaderError {
|
||||
#[error("The model ({0}) is missing the BIN section in the gltf file")]
|
||||
MissingBin(String),
|
||||
#[error("There was an error with decoding a uri defined in the model: '{0}'")]
|
||||
UriDecodingError(UriReadError),
|
||||
}
|
||||
|
||||
impl From<ModelLoaderError> for LoaderError {
|
||||
fn from(value: ModelLoaderError) -> Self {
|
||||
LoaderError::DecodingError(value.into())
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub(crate) struct GltfLoadContext<'a> {
|
||||
pub resource_manager: ResourceManager,
|
||||
pub gltf: &'a gltf::Gltf,
|
||||
/// Path to the gltf
|
||||
pub gltf_path: &'a str,
|
||||
/// The path to the directory that the gltf is contained in.
|
||||
pub gltf_parent_path: &'a str,
|
||||
/// List of buffers in the gltf
|
||||
pub buffers: &'a Vec<Vec<u8>>,
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct GltfLoader;
|
||||
|
||||
impl GltfLoader {
|
||||
/* fn parse_uri(containing_path: &str, uri: &str) -> Option<Vec<u8>> {
|
||||
let uri = uri.strip_prefix("data")?;
|
||||
let (mime, data) = uri.split_once(",")?;
|
||||
|
||||
let (_mime, is_base64) = match mime.strip_suffix(";base64") {
|
||||
Some(mime) => (mime, true),
|
||||
None => (mime, false),
|
||||
};
|
||||
|
||||
if is_base64 {
|
||||
Some(base64::engine::general_purpose::STANDARD.decode(data).unwrap())
|
||||
} else {
|
||||
let full_path = format!("{containing_path}/{data}");
|
||||
let buf = std::fs::read(&full_path).unwrap();
|
||||
Some(buf)
|
||||
}
|
||||
} */
|
||||
|
||||
fn process_node(ctx: &mut GltfLoadContext, materials: &Vec<ResHandle<Material>>, scene: &mut SceneGraph, scene_parent: &SceneNode, gnode: gltf::Node<'_>) -> GltfNode {
|
||||
let mut node = GltfNode::default();
|
||||
|
||||
node.transform = {
|
||||
let gt = gnode.transform();
|
||||
let (pos, rot, scale) = gt.decomposed();
|
||||
|
||||
Transform::new(Vec3::from(pos), Quat::from_array(rot), Vec3::from(scale))
|
||||
};
|
||||
node.name = gnode.name().map(str::to_string);
|
||||
|
||||
let scene_node = scene.add_node_under(scene_parent, (WorldTransform::from(node.transform), node.transform));
|
||||
|
||||
if let Some(mesh) = gnode.mesh() {
|
||||
let mut new_mesh = Mesh::default();
|
||||
|
||||
for prim in mesh.primitives() {
|
||||
let reader = prim.reader(|buf| Some(ctx.buffers[buf.index()].as_slice()));
|
||||
|
||||
// read the positions
|
||||
if let Some(pos) = reader.read_positions() {
|
||||
if prim.mode() != gltf::mesh::Mode::Triangles {
|
||||
todo!("Load position primitives that aren't triangles"); // TODO
|
||||
}
|
||||
|
||||
let pos: Vec<glam::Vec3> = pos.map(|t| t.into()).collect();
|
||||
new_mesh.add_attribute(MeshVertexAttribute::Position, VertexAttributeData::Vec3(pos));
|
||||
}
|
||||
|
||||
// read the normals
|
||||
if let Some(norms) = reader.read_normals() {
|
||||
let norms: Vec<glam::Vec3> = norms.map(|t| t.into()).collect();
|
||||
new_mesh.add_attribute(MeshVertexAttribute::Normals, VertexAttributeData::Vec3(norms));
|
||||
}
|
||||
|
||||
// read the tangents
|
||||
if let Some(tangents) = reader.read_tangents() {
|
||||
let tangents: Vec<glam::Vec4> = tangents.map(|t| t.into()).collect();
|
||||
new_mesh.add_attribute(MeshVertexAttribute::Tangents, VertexAttributeData::Vec4(tangents));
|
||||
}
|
||||
|
||||
// read tex coords
|
||||
if let Some(tex_coords) = reader.read_tex_coords(0) {
|
||||
let tex_coords: Vec<glam::Vec2> = tex_coords.into_f32().map(|t| t.into()).collect();
|
||||
new_mesh.add_attribute(MeshVertexAttribute::TexCoords, VertexAttributeData::Vec2(tex_coords));
|
||||
}
|
||||
|
||||
// read the indices
|
||||
if let Some(indices) = reader.read_indices() {
|
||||
let indices: MeshIndices = match indices {
|
||||
// wpgu doesn't support u8 indices, so those must be converted to u16
|
||||
gltf::mesh::util::ReadIndices::U8(i) => MeshIndices::U16(i.map(|i| i as u16).collect()),
|
||||
gltf::mesh::util::ReadIndices::U16(i) => MeshIndices::U16(i.collect()),
|
||||
gltf::mesh::util::ReadIndices::U32(i) => MeshIndices::U32(i.collect()),
|
||||
};
|
||||
|
||||
new_mesh.indices = Some(indices);
|
||||
}
|
||||
|
||||
let mat = materials.get(prim.material().index().unwrap()).unwrap();
|
||||
new_mesh.material = Some(mat.clone());
|
||||
}
|
||||
|
||||
let handle = ResHandle::new_ready(None, new_mesh);
|
||||
ctx.resource_manager.store_uuid(handle.clone());
|
||||
node.mesh = Some(handle.clone());
|
||||
scene.insert(&scene_node, (handle.clone(), handle.untyped_clone()));
|
||||
}
|
||||
|
||||
for child in gnode.children() {
|
||||
let cmesh = GltfLoader::process_node(ctx, materials, scene, &scene_node, child);
|
||||
node.children.push(cmesh);
|
||||
}
|
||||
|
||||
node
|
||||
}
|
||||
|
||||
fn extensions() -> &'static [&'static str] {
|
||||
&[
|
||||
"gltf", "glb"
|
||||
]
|
||||
}
|
||||
|
||||
fn does_support_file(path: &str) -> bool {
|
||||
match Path::new(path).extension().and_then(OsStr::to_str) {
|
||||
Some(ext) => {
|
||||
Self::extensions().contains(&ext)
|
||||
},
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ResourceLoader for GltfLoader {
|
||||
fn extensions(&self) -> &[&str] {
|
||||
&[
|
||||
"gltf", "glb"
|
||||
]
|
||||
}
|
||||
|
||||
fn mime_types(&self) -> &[&str] {
|
||||
&[]
|
||||
}
|
||||
|
||||
fn load(&self, resource_manager: ResourceManager, path: &str) -> PinedBoxLoaderFuture {
|
||||
// cant use &str across async
|
||||
let path = path.to_string();
|
||||
|
||||
Box::pin(async move {
|
||||
// check if the file is supported by this loader
|
||||
if !Self::does_support_file(&path) {
|
||||
return Err(LoaderError::UnsupportedExtension(path.to_string()));
|
||||
}
|
||||
|
||||
let mut parent_path = PathBuf::from(&path);
|
||||
parent_path.pop();
|
||||
let parent_path = parent_path.display().to_string();
|
||||
|
||||
let gltf = gltf::Gltf::open(&path)
|
||||
.map_err(|ge| LoaderError::DecodingError(ge.into()))?;
|
||||
|
||||
let mut use_bin = false;
|
||||
let buffers: Vec<Vec<u8>> = gltf.buffers().flat_map(|b| match b.source() {
|
||||
gltf::buffer::Source::Bin => {
|
||||
use_bin = true;
|
||||
gltf.blob.as_deref().map(|v| v.to_vec())
|
||||
.ok_or(ModelLoaderError::MissingBin(path.to_string()))
|
||||
},
|
||||
gltf::buffer::Source::Uri(uri) => gltf_read_buffer_uri(&parent_path, uri)
|
||||
.map_err(ModelLoaderError::UriDecodingError),
|
||||
}).collect();
|
||||
|
||||
let mut gltf_out = super::Gltf::default();
|
||||
|
||||
let mut context = GltfLoadContext {
|
||||
resource_manager: resource_manager.clone(),
|
||||
gltf: &gltf,
|
||||
gltf_path: &path,
|
||||
gltf_parent_path: &parent_path,
|
||||
buffers: &buffers,
|
||||
};
|
||||
|
||||
let start_inst = Instant::now();
|
||||
let materials: Vec<ResHandle<Material>> = gltf.materials()
|
||||
.map(|mat| ResHandle::new_ready(None, Material::from_gltf(&mut context, mat)))
|
||||
.collect();
|
||||
let mat_time = Instant::now() - start_inst;
|
||||
debug!("Loaded {} materials in {}s", materials.len(), mat_time.as_secs_f32());
|
||||
|
||||
for (_idx, scene) in gltf.scenes().enumerate() {
|
||||
let mut graph = SceneGraph::new();
|
||||
let root_node = graph.root_node();
|
||||
|
||||
for node in scene.nodes() {
|
||||
let n = GltfLoader::process_node(&mut context, &materials, &mut graph, &root_node, node);
|
||||
|
||||
if let Some(mesh) = n.mesh {
|
||||
gltf_out.meshes.push(mesh.clone());
|
||||
}
|
||||
}
|
||||
|
||||
for en in graph.world().view_iter::<query::Entities>() {
|
||||
graph.world().view_one::<(&WorldTransform, &Transform)>(en).get().expect("Scene node is missing world and local transform bundle!");
|
||||
}
|
||||
|
||||
let graph = ResHandle::new_ready(Some(path.as_str()), graph);
|
||||
gltf_out.scenes.push(graph);
|
||||
|
||||
/* let start_inst = Instant::now();
|
||||
let nodes: Vec<GltfNode> = scene.nodes()
|
||||
.map(|node| ModelLoader::process_node(&mut context, &materials, node))
|
||||
.collect();
|
||||
let node_time = Instant::now() - start_inst;
|
||||
|
||||
debug!("Loaded {} nodes in the scene in {}s", nodes.len(), node_time.as_secs_f32());
|
||||
|
||||
for mesh in nodes.iter().map(|n| &n.mesh) {
|
||||
if let Some(mesh) = mesh {
|
||||
gltf_out.meshes.push(mesh.clone());
|
||||
}
|
||||
}
|
||||
|
||||
let scene = GltfScene {
|
||||
nodes,
|
||||
};
|
||||
let scene = ResHandle::new_ready(Some(path.as_str()), scene);
|
||||
gltf_out.scenes.push(scene); */
|
||||
}
|
||||
|
||||
gltf_out.materials = materials;
|
||||
|
||||
Ok(Box::new(gltf_out) as Box<dyn ResourceData>)
|
||||
})
|
||||
}
|
||||
|
||||
#[allow(unused_variables)]
|
||||
fn load_bytes(&self, resource_manager: ResourceManager, bytes: Vec<u8>, offset: usize, length: usize) -> PinedBoxLoaderFuture {
|
||||
todo!()
|
||||
}
|
||||
|
||||
fn create_erased_handle(&self) -> Arc<dyn ResourceStorage> {
|
||||
Arc::from(ResHandle::<Gltf>::new_loading(None))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::time::Duration;
|
||||
|
||||
use lyra_ecs::{query::Entities, relation::ChildOf};
|
||||
use lyra_scene::WorldTransform;
|
||||
|
||||
//use lyra_resource::tests::busy_wait_resource;
|
||||
|
||||
use super::*;
|
||||
|
||||
fn test_file_path(path: &str) -> String {
|
||||
let manifest = std::env::var("CARGO_MANIFEST_DIR").unwrap();
|
||||
|
||||
format!("{manifest}/test_files/gltf/{path}")
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_loading() {
|
||||
let path = test_file_path("texture-embedded.gltf");
|
||||
|
||||
let manager = ResourceManager::new();
|
||||
let gltf = manager.request::<Gltf>(&path).unwrap();
|
||||
assert!(gltf.wait_for_load_timeout(Duration::from_secs(10)), "failed to load gltf, hit 10 second timeout");
|
||||
let gltf = gltf.data_ref().unwrap();
|
||||
|
||||
assert_eq!(gltf.scenes.len(), 1);
|
||||
let scene = &gltf.scenes[0]
|
||||
.data_ref().unwrap();
|
||||
|
||||
let mut node = None;
|
||||
//scene.world().view::<SceneNodeFlag()
|
||||
scene.traverse_down::<_, &WorldTransform>(|_, no, tran| {
|
||||
tran.get().expect("scene node is missing a WorldTransform");
|
||||
node = Some(no.clone());
|
||||
});
|
||||
|
||||
let world = scene.world();
|
||||
let node = node.unwrap();
|
||||
|
||||
let data = world.view_one::<(&ResHandle<Mesh>, &Transform)>(node.entity()).get();
|
||||
debug_assert!(data.is_some(), "The mesh was not loaded"); // transform will always be there
|
||||
let data = data.unwrap();
|
||||
|
||||
// ensure there are no children of the node
|
||||
assert_eq!(
|
||||
world.view::<Entities>()
|
||||
.relates_to::<ChildOf>(node.entity())
|
||||
.into_iter()
|
||||
.count(),
|
||||
0
|
||||
);
|
||||
|
||||
assert_eq!(*data.1, Transform::from_xyz(0.0, 0.0, 0.0));
|
||||
|
||||
let mesh = data.0;
|
||||
let mesh = mesh.data_ref().unwrap();
|
||||
assert!(mesh.position().unwrap().len() > 0);
|
||||
assert!(mesh.normals().unwrap().len() > 0);
|
||||
assert!(mesh.tex_coords().unwrap().len() > 0);
|
||||
assert!(mesh.indices.clone().unwrap().len() > 0);
|
||||
assert!(mesh.material.as_ref().unwrap().data_ref().unwrap().base_color_texture.is_some());
|
||||
}
|
||||
}
|
|
@ -1,34 +0,0 @@
|
|||
use lyra_math::Transform;
|
||||
use lyra_resource::{optionally_add_to_dep, ResourceData, UntypedResHandle};
|
||||
|
||||
use super::Mesh;
|
||||
use crate::ResHandle;
|
||||
|
||||
/// A Node in the Gltf file
|
||||
#[derive(Clone, Default)]
|
||||
pub struct GltfNode {
|
||||
pub name: Option<String>,
|
||||
pub mesh: Option<ResHandle<Mesh>>,
|
||||
pub transform: Transform,
|
||||
pub children: Vec<GltfNode>,
|
||||
}
|
||||
|
||||
impl ResourceData for GltfNode {
|
||||
fn dependencies(&self) -> Vec<crate::UntypedResHandle> {
|
||||
let mut deps: Vec<UntypedResHandle> = self.children.iter()
|
||||
.flat_map(|c| c.mesh.as_ref().map(|h| h.untyped_clone()))
|
||||
.collect();
|
||||
|
||||
optionally_add_to_dep(&mut deps, &self.mesh);
|
||||
|
||||
deps
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn std::any::Any {
|
||||
self
|
||||
}
|
||||
|
||||
fn as_any_mut(&mut self) -> &mut dyn std::any::Any {
|
||||
self
|
||||
}
|
||||
}
|
|
@ -1,22 +0,0 @@
|
|||
#[derive(Clone, Copy, PartialEq)]
|
||||
pub struct Area<P, S>
|
||||
where
|
||||
P: Clone + Copy + PartialEq,
|
||||
S: Clone + Copy + PartialEq,
|
||||
{
|
||||
pub position: P,
|
||||
pub size: S
|
||||
}
|
||||
|
||||
impl<P, S> Area<P, S>
|
||||
where
|
||||
P: Clone + Copy + PartialEq,
|
||||
S: Clone + Copy + PartialEq,
|
||||
{
|
||||
pub fn new(pos: P, size: S) -> Self {
|
||||
Self {
|
||||
position: pos,
|
||||
size,
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,148 +0,0 @@
|
|||
use lyra_math::Angle;
|
||||
use lyra_reflect_derive::{impl_reflect_simple_struct, impl_reflect_trait_value};
|
||||
|
||||
use crate::{lyra_engine, Enum, Method, Reflect, ReflectMut, ReflectRef};
|
||||
|
||||
impl_reflect_simple_struct!(lyra_math::Vec2, fields(x = f32, y = f32));
|
||||
impl_reflect_simple_struct!(lyra_math::Vec3, fields(x = f32, y = f32, z = f32));
|
||||
impl_reflect_simple_struct!(lyra_math::Vec4, fields(x = f32, y = f32, z = f32, w = f32));
|
||||
impl_reflect_simple_struct!(lyra_math::Quat, fields(x = f32, y = f32, z = f32, w = f32));
|
||||
|
||||
impl_reflect_simple_struct!(
|
||||
lyra_math::Transform,
|
||||
fields(
|
||||
translation = lyra_math::Vec3,
|
||||
rotation = lyra_math::Quat,
|
||||
scale = lyra_math::Vec3
|
||||
)
|
||||
);
|
||||
|
||||
impl_reflect_trait_value!(lyra_math::Mat4);
|
||||
|
||||
impl Reflect for Angle {
|
||||
fn name(&self) -> String {
|
||||
"Angle".into()
|
||||
}
|
||||
|
||||
fn type_id(&self) -> std::any::TypeId {
|
||||
std::any::TypeId::of::<Self>()
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn std::any::Any {
|
||||
self
|
||||
}
|
||||
|
||||
fn as_any_mut(&mut self) -> &mut dyn std::any::Any {
|
||||
self
|
||||
}
|
||||
|
||||
fn as_boxed_any(self: Box<Self>) -> Box<dyn std::any::Any> {
|
||||
self
|
||||
}
|
||||
|
||||
fn apply(&mut self, val: &dyn Reflect) {
|
||||
if let ReflectRef::Enum(e) = val.reflect_ref() {
|
||||
let s = e.as_any().downcast_ref::<Self>()
|
||||
.expect("cannot apply mismatched reflected enum");
|
||||
*self = *s;
|
||||
} else {
|
||||
panic!("Provided value was not an enum!");
|
||||
}
|
||||
}
|
||||
|
||||
fn clone_inner(&self) -> Box<dyn Reflect> {
|
||||
Box::new(self.clone())
|
||||
}
|
||||
|
||||
fn reflect_ref(&self) -> crate::ReflectRef {
|
||||
ReflectRef::Enum(self)
|
||||
}
|
||||
|
||||
fn reflect_mut(&mut self) -> crate::ReflectMut {
|
||||
ReflectMut::Enum(self)
|
||||
}
|
||||
|
||||
fn reflect_val(&self) -> &dyn Reflect {
|
||||
self
|
||||
}
|
||||
|
||||
fn reflect_val_mut(&mut self) -> &mut dyn Reflect {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl Enum for Angle {
|
||||
fn field(&self, _: &str) -> Option<&dyn Reflect> {
|
||||
// no struct variants
|
||||
None
|
||||
}
|
||||
|
||||
fn field_mut(&mut self, _: &str) -> Option<&mut dyn Reflect> {
|
||||
// no struct variants
|
||||
None
|
||||
}
|
||||
|
||||
fn field_at(&self, idx: usize) -> Option<&dyn Reflect> {
|
||||
// all variants only have one tuple field
|
||||
if idx != 0 {
|
||||
return None;
|
||||
}
|
||||
|
||||
match self {
|
||||
Angle::Degrees(v) => Some(v),
|
||||
Angle::Radians(v) => Some(v),
|
||||
}
|
||||
}
|
||||
|
||||
fn field_at_mut(&mut self, idx: usize) -> Option<&mut dyn Reflect> {
|
||||
// all variants only have one tuple field
|
||||
if idx != 0 {
|
||||
return None;
|
||||
}
|
||||
|
||||
match self {
|
||||
Angle::Degrees(v) => Some(v),
|
||||
Angle::Radians(v) => Some(v),
|
||||
}
|
||||
}
|
||||
|
||||
fn field_name_at(&self, _: usize) -> Option<String> {
|
||||
// no struct variants
|
||||
None
|
||||
}
|
||||
|
||||
fn has_field(&self, _: &str) -> bool {
|
||||
// no struct variants
|
||||
false
|
||||
}
|
||||
|
||||
fn fields_len(&self) -> usize {
|
||||
1
|
||||
}
|
||||
|
||||
fn variants_len(&self) -> usize {
|
||||
2
|
||||
}
|
||||
|
||||
fn variant_name(&self) -> String {
|
||||
match self {
|
||||
Angle::Degrees(_) => "degrees".into(),
|
||||
Angle::Radians(_) => "radians".into(),
|
||||
}
|
||||
}
|
||||
|
||||
fn variant_index(&self) -> usize {
|
||||
match self {
|
||||
Angle::Degrees(_) => 0,
|
||||
Angle::Radians(_) => 1,
|
||||
}
|
||||
}
|
||||
|
||||
fn is_variant_name(&self, name: &str) -> bool {
|
||||
self.variant_name() == name
|
||||
}
|
||||
|
||||
fn variant_type(&self) -> crate::EnumType {
|
||||
crate::EnumType::Tuple
|
||||
}
|
||||
}
|
|
@ -1,129 +0,0 @@
|
|||
use std::{any::TypeId, collections::HashMap, hash::Hash};
|
||||
|
||||
use crate::Reflect;
|
||||
|
||||
pub trait ReflectedMap: Reflect {
|
||||
/// Get the value at the provided `key` in the map.
|
||||
///
|
||||
/// `key` must be the same type as the key in the map.
|
||||
fn reflect_get(&self, key: &dyn Reflect) -> Option<&dyn Reflect>;
|
||||
|
||||
/// Get the nth value in the map.
|
||||
fn reflect_get_nth_value(&self, n: usize) -> Option<&dyn Reflect>;
|
||||
|
||||
/// Get a mutable borrow to the nth value in the map.
|
||||
fn reflect_get_nth_value_mut(&mut self, n: usize) -> Option<&mut dyn Reflect>;
|
||||
|
||||
/// Insert a value at the provided `key` in the map.
|
||||
///
|
||||
/// If there is already a value at `key`, the old value will be returned.
|
||||
fn reflect_insert(&mut self, key: Box<dyn Reflect>, val: Box<dyn Reflect>) -> Option<Box<dyn Reflect>>;
|
||||
|
||||
/// Returns a boolean indicating if the map contains a key as `key`.
|
||||
///
|
||||
/// `key` must be the same type as the key in the map.
|
||||
fn reflect_contains_key(&self, key: &dyn Reflect) -> bool;
|
||||
|
||||
/// Returns the length of the map
|
||||
fn reflect_len(&self) -> usize;
|
||||
|
||||
fn reflect_capacity(&self) -> usize;
|
||||
}
|
||||
|
||||
impl<K: PartialEq + Eq + Hash + Clone + Reflect, V: Clone + Reflect> Reflect for HashMap<K, V> {
|
||||
fn name(&self) -> String {
|
||||
format!("HashMap<?, ?>") // TODO: get types of the generics
|
||||
}
|
||||
|
||||
fn type_id(&self) -> std::any::TypeId {
|
||||
TypeId::of::<Self>()
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn std::any::Any {
|
||||
self
|
||||
}
|
||||
|
||||
fn as_any_mut(&mut self) -> &mut dyn std::any::Any {
|
||||
self
|
||||
}
|
||||
|
||||
fn as_boxed_any(self: Box<Self>) -> Box<dyn std::any::Any> {
|
||||
self
|
||||
}
|
||||
|
||||
fn apply(&mut self, val: &dyn Reflect) {
|
||||
let val = val.as_any().downcast_ref::<Self>()
|
||||
.expect("The provided value is not the same type the HashMap");
|
||||
|
||||
for (k, v) in val.iter() {
|
||||
let (k, v) = (k.clone(), v.clone());
|
||||
self.insert(k, v);
|
||||
}
|
||||
}
|
||||
|
||||
fn clone_inner(&self) -> Box<dyn Reflect> {
|
||||
Box::new(self.clone())
|
||||
}
|
||||
|
||||
fn reflect_ref(&self) -> crate::ReflectRef {
|
||||
crate::ReflectRef::Map(self)
|
||||
}
|
||||
|
||||
fn reflect_mut(&mut self) -> crate::ReflectMut {
|
||||
crate::ReflectMut::Map(self)
|
||||
}
|
||||
|
||||
fn reflect_val(&self) -> &dyn Reflect {
|
||||
self
|
||||
}
|
||||
|
||||
fn reflect_val_mut(&mut self) -> &mut dyn Reflect {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl<K: PartialEq + Eq + Hash + Clone + Reflect, V: Clone + Reflect> ReflectedMap for HashMap<K, V> {
|
||||
fn reflect_get(&self, key: &dyn Reflect) -> Option<&dyn Reflect> {
|
||||
let key = key.as_any().downcast_ref::<K>()
|
||||
.expect("The provided key is not the same type as the HashMap's key");
|
||||
|
||||
self.get(key)
|
||||
.map(|v| v.reflect_val())
|
||||
}
|
||||
|
||||
fn reflect_get_nth_value(&self, n: usize) -> Option<&dyn Reflect> {
|
||||
self.values().nth(n)
|
||||
.map(|v| v.reflect_val())
|
||||
}
|
||||
|
||||
fn reflect_get_nth_value_mut(&mut self, n: usize) -> Option<&mut dyn Reflect> {
|
||||
self.values_mut().nth(n)
|
||||
.map(|v| v.reflect_val_mut())
|
||||
}
|
||||
|
||||
fn reflect_insert(&mut self, key: Box<dyn Reflect>, val: Box<dyn Reflect>) -> Option<Box<dyn Reflect>> {
|
||||
let key = key.as_boxed_any();
|
||||
let key = *key.downcast::<K>().expect("The provided key is not the same type as the HashMap's key");
|
||||
|
||||
let val = val.as_boxed_any();
|
||||
let val = *val.downcast::<V>().expect("The provided value is not the same type as the HashMap's value");
|
||||
|
||||
self.insert(key, val)
|
||||
.map(|v| Box::new(v) as Box<dyn Reflect>)
|
||||
}
|
||||
|
||||
fn reflect_contains_key(&self, key: &dyn Reflect) -> bool {
|
||||
let key = key.as_any().downcast_ref::<K>()
|
||||
.expect("The provided key is not the same type as the HashMap's key");
|
||||
|
||||
self.contains_key(key)
|
||||
}
|
||||
|
||||
fn reflect_len(&self) -> usize {
|
||||
self.len()
|
||||
}
|
||||
|
||||
fn reflect_capacity(&self) -> usize {
|
||||
self.capacity()
|
||||
}
|
||||
}
|
|
@ -1,109 +0,0 @@
|
|||
use std::sync::Arc;
|
||||
|
||||
use crate::{loader::LoaderError, ResourceState, UntypedResHandle};
|
||||
|
||||
#[derive(Clone)]
|
||||
pub enum DependencyState {
|
||||
Loading,
|
||||
Error {
|
||||
/// The resource that had the error.
|
||||
handle: UntypedResHandle,
|
||||
/// The error that the resource ran into when loading.
|
||||
error: Arc<LoaderError>,
|
||||
},
|
||||
Ready,
|
||||
}
|
||||
|
||||
impl DependencyState {
|
||||
/// Creates a DependencyState from a resource by retrieving its state. Does not include
|
||||
/// the states of the dependencies.
|
||||
pub fn shallow_from_res(handle: &UntypedResHandle) -> DependencyState {
|
||||
let res = handle.read();
|
||||
match &res.state {
|
||||
ResourceState::Loading => DependencyState::Loading,
|
||||
ResourceState::Error(er) => DependencyState::Error {
|
||||
handle: handle.clone(),
|
||||
error: er.clone(),
|
||||
},
|
||||
ResourceState::Ready(_) => DependencyState::Ready,
|
||||
}
|
||||
}
|
||||
|
||||
/// Retrieve the state of the handle and its dependencies, does not recursively retrieve.
|
||||
pub fn from_res(handle: &UntypedResHandle) -> DependencyState {
|
||||
let res = handle.read();
|
||||
match &res.state {
|
||||
ResourceState::Loading => DependencyState::Loading,
|
||||
ResourceState::Error(er) => DependencyState::Error {
|
||||
handle: handle.clone(),
|
||||
error: er.clone(),
|
||||
},
|
||||
ResourceState::Ready(res) => {
|
||||
let mut lowest_state = DependencyState::Ready;
|
||||
|
||||
for dep in res.dependencies() {
|
||||
let state = DependencyState::shallow_from_res(&dep);
|
||||
|
||||
// try to find the "lowest" dependency. Meaning the state of a dependency
|
||||
// that would stop the parent from being ready.
|
||||
if state.is_loading() {
|
||||
lowest_state = state;
|
||||
break;
|
||||
} else if state.is_error() {
|
||||
lowest_state = state;
|
||||
break;
|
||||
}
|
||||
|
||||
// anything else would be loaded, so no need to update `lowest_state`
|
||||
}
|
||||
|
||||
lowest_state
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// Retrieve the state of the handle and its dependencies, does not recursively retrieve.
|
||||
pub fn from_res_recurse(handle: &UntypedResHandle) -> DependencyState {
|
||||
let res = handle.read();
|
||||
match &res.state {
|
||||
ResourceState::Loading => DependencyState::Loading,
|
||||
ResourceState::Error(er) => DependencyState::Error {
|
||||
handle: handle.clone(),
|
||||
error: er.clone(),
|
||||
},
|
||||
ResourceState::Ready(res) => {
|
||||
let mut lowest_state = DependencyState::Ready;
|
||||
|
||||
for dep in res.dependencies() {
|
||||
let state = DependencyState::from_res_recurse(&dep);
|
||||
|
||||
// try to find the "lowest" dependency. Meaning the state of a dependency
|
||||
// that would stop the parent from being ready.
|
||||
if state.is_loading() {
|
||||
lowest_state = state;
|
||||
break;
|
||||
} else if state.is_error() {
|
||||
lowest_state = state;
|
||||
break;
|
||||
}
|
||||
|
||||
// anything else would be loaded, so no need to update `lowest_state`
|
||||
}
|
||||
|
||||
lowest_state
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_ready(&self) -> bool {
|
||||
matches!(self, DependencyState::Ready)
|
||||
}
|
||||
|
||||
pub fn is_error(&self) -> bool {
|
||||
matches!(self, DependencyState::Error { handle: _, error: _ })
|
||||
}
|
||||
|
||||
pub fn is_loading(&self) -> bool {
|
||||
matches!(self, DependencyState::Loading)
|
||||
}
|
||||
}
|
|
@ -1,148 +0,0 @@
|
|||
use std::{ffi::OsStr, path::Path, sync::Arc};
|
||||
|
||||
use async_std::io::ReadExt;
|
||||
use image::ImageError;
|
||||
use tracing::debug;
|
||||
|
||||
use crate::{Image, ResHandle, ResourceData, ResourceManager};
|
||||
|
||||
use super::{LoaderError, PinedBoxLoaderFuture, ResourceLoader};
|
||||
|
||||
impl From<ImageError> for LoaderError {
|
||||
fn from(value: ImageError) -> Self {
|
||||
LoaderError::DecodingError(value.into())
|
||||
}
|
||||
}
|
||||
|
||||
/// A struct that implements the `ResourceLoader` trait used for loading images.
|
||||
#[derive(Default)]
|
||||
pub struct ImageLoader;
|
||||
|
||||
impl ImageLoader {
|
||||
fn extensions() -> &'static [&'static str] {
|
||||
&[
|
||||
// the extensions of these are the names of the formats
|
||||
"bmp", "dds", "gif", "ico", "jpeg", "jpg", "png", "qoi", "tga", "tiff", "webp",
|
||||
|
||||
// farbfeld
|
||||
"ff",
|
||||
|
||||
// pnm
|
||||
"pnm", "pbm", "pgm", "ppm",
|
||||
]
|
||||
}
|
||||
|
||||
fn does_support_file(path: &str) -> bool {
|
||||
match Path::new(path).extension().and_then(OsStr::to_str) {
|
||||
Some(ext) => {
|
||||
Self::extensions().contains(&ext)
|
||||
},
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ResourceLoader for ImageLoader {
|
||||
fn extensions(&self) -> &[&str] {
|
||||
Self::extensions()
|
||||
}
|
||||
|
||||
fn mime_types(&self) -> &[&str] {
|
||||
&[
|
||||
"image/bmp", "image/vnd.ms-dds", "image/gif", "image/x-icon", "image/jpeg",
|
||||
"image/png", "image/qoi", "image/tga", "image/tiff", "image/webp",
|
||||
|
||||
// no known mime for farbfeld
|
||||
|
||||
// pnm, pbm, pgm, ppm
|
||||
"image/x-portable-anymap", "image/x-portable-bitmap", "image/x-portable-graymap", "image/x-portable-pixmap",
|
||||
]
|
||||
}
|
||||
|
||||
fn load(&self, _resource_manager: ResourceManager, path: &str) -> PinedBoxLoaderFuture {
|
||||
let path = path.to_string();
|
||||
Box::pin(async move {
|
||||
// check if the file is supported by this loader
|
||||
if !Self::does_support_file(&path) {
|
||||
return Err(LoaderError::UnsupportedExtension(path.to_string()));
|
||||
}
|
||||
|
||||
// read file bytes
|
||||
let mut file = async_std::fs::File::open(path).await?;
|
||||
let mut buf = vec![];
|
||||
file.read_to_end(&mut buf).await?;
|
||||
|
||||
// load the image and construct Resource<Texture>
|
||||
let image = image::load_from_memory(&buf)
|
||||
.map_err(|e| match e {
|
||||
ImageError::IoError(e) => LoaderError::IoError(e),
|
||||
_ => LoaderError::DecodingError(e.into()),
|
||||
})?;
|
||||
let image = Image::from(image);
|
||||
let image = Box::new(image) as Box<dyn ResourceData>;
|
||||
|
||||
Ok(image)
|
||||
})
|
||||
}
|
||||
|
||||
fn load_bytes(&self, _resource_manager: ResourceManager, bytes: Vec<u8>, offset: usize, length: usize) -> PinedBoxLoaderFuture {
|
||||
Box::pin(async move {
|
||||
let image = image::load_from_memory(&bytes[offset..(length-offset)])
|
||||
.map_err(|e| match e {
|
||||
ImageError::IoError(e) => LoaderError::IoError(e),
|
||||
_ => LoaderError::DecodingError(e.into()),
|
||||
})?;
|
||||
let image = Image::from(image);
|
||||
debug!("Finished loading image ({} bytes)", length);
|
||||
|
||||
Ok(Box::new(image) as Box<dyn ResourceData>)
|
||||
})
|
||||
}
|
||||
|
||||
fn create_erased_handle(&self) -> Arc<dyn crate::ResourceStorage> {
|
||||
Arc::from(ResHandle::<Image>::new_loading(None))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use async_std::task;
|
||||
|
||||
use super::*;
|
||||
|
||||
fn get_image(path: &str) -> String {
|
||||
let manifest = std::env::var("CARGO_MANIFEST_DIR").unwrap();
|
||||
|
||||
format!("{manifest}/test_files/img/{path}")
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn check_unsupport() {
|
||||
let loader = ImageLoader::default();
|
||||
assert_eq!(loader.does_support_file("test.gltf"), false);
|
||||
}
|
||||
|
||||
/// Tests loading an image
|
||||
#[test]
|
||||
fn image_load() {
|
||||
let manager = ResourceManager::new();
|
||||
let loader = ImageLoader::default();
|
||||
|
||||
task::block_on(async move {
|
||||
let r = loader.load(manager, &get_image("squiggles.png")).await.unwrap();
|
||||
let a = r.as_ref();
|
||||
a.as_any().downcast_ref::<Image>().unwrap();
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn image_load_unsupported() {
|
||||
let manager = ResourceManager::new();
|
||||
let loader = ImageLoader::default();
|
||||
|
||||
task::block_on(async move {
|
||||
// this file doesn't exist and is also not supported
|
||||
assert!(loader.load(manager, &get_image("squiggles.jfeh")).await.is_err())
|
||||
});
|
||||
}
|
||||
}
|
|
@ -1,599 +0,0 @@
|
|||
use std::{any::{Any, TypeId}, marker::PhantomData, ops::{Deref, DerefMut}, sync::{Arc, Condvar, Mutex, RwLock, RwLockReadGuard, RwLockWriteGuard}, time::Duration};
|
||||
|
||||
use lyra_ecs::Component;
|
||||
use lyra_reflect::Reflect;
|
||||
use crate::{loader::LoaderError, lyra_engine, DependencyState};
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::ResourceStorage;
|
||||
|
||||
pub fn optionally_add_to_dep<R: ResourceData>(deps: &mut Vec<UntypedResHandle>, handle: &Option<ResHandle<R>>) {
|
||||
if let Some(h) = handle {
|
||||
deps.push(h.untyped_clone());
|
||||
}
|
||||
}
|
||||
|
||||
/// A trait that that each resource type should implement.
|
||||
pub trait ResourceData: Send + Sync + Any + 'static {
|
||||
fn as_any(&self) -> &dyn Any;
|
||||
fn as_any_mut(&mut self) -> &mut dyn Any;
|
||||
|
||||
/// Collect the dependencies of the Resource.
|
||||
fn dependencies(&self) -> Vec<UntypedResHandle>;
|
||||
|
||||
/// Recursively collect the dependencies of the Resource.
|
||||
///
|
||||
/// If a dependency has a child dependency, it will not show up in this list until its
|
||||
/// parent is loaded.
|
||||
fn recur_dependencies(&self) -> Vec<UntypedResHandle> {
|
||||
let deps = self.dependencies();
|
||||
let mut all_deps = deps.clone();
|
||||
|
||||
for dep in deps.into_iter() {
|
||||
let dep = dep.read();
|
||||
match &dep.state {
|
||||
ResourceState::Ready(data) => {
|
||||
let mut deps_dep = data.dependencies();
|
||||
all_deps.append(&mut deps_dep);
|
||||
},
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
all_deps
|
||||
}
|
||||
}
|
||||
|
||||
//impl<T: Send + Sync + Reflect> ResourceData for T { }
|
||||
|
||||
pub enum ResourceState {
|
||||
Loading,
|
||||
Error(Arc<LoaderError>),
|
||||
Ready(Box<dyn ResourceData>),
|
||||
}
|
||||
|
||||
impl ResourceState {
|
||||
/// Returns a boolean indicating if the state of still loading
|
||||
pub fn is_loading(&self) -> bool {
|
||||
matches!(self, ResourceState::Loading)
|
||||
}
|
||||
|
||||
pub fn is_error(&self) -> bool {
|
||||
matches!(self, ResourceState::Error(_))
|
||||
}
|
||||
|
||||
pub fn is_ready(&self) -> bool {
|
||||
matches!(self, ResourceState::Ready(_))
|
||||
}
|
||||
}
|
||||
|
||||
pub struct ResourceDataRefMut<'a, T> {
|
||||
guard: std::sync::RwLockWriteGuard<'a, UntypedResource>,
|
||||
_marker: PhantomData<T>,
|
||||
}
|
||||
|
||||
impl<'a, T: 'static> std::ops::Deref for ResourceDataRefMut<'a, T> {
|
||||
type Target = T;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
match &self.guard.state {
|
||||
ResourceState::Ready(d) => {
|
||||
// for some reason, if I didn't use `.as_ref`, the downcast would fail.
|
||||
let d = d.as_ref().as_any();
|
||||
d.downcast_ref::<T>().unwrap()
|
||||
},
|
||||
_ => unreachable!() // ResHandler::data_ref shouldn't allow this to run
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T: 'static> std::ops::DerefMut for ResourceDataRefMut<'a, T> {
|
||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||
match &mut self.guard.state {
|
||||
ResourceState::Ready(d) => {
|
||||
// for some reason, if I didn't use `.as_ref`, the downcast would fail.
|
||||
let d = d.as_mut().as_any_mut();
|
||||
d.downcast_mut::<T>().unwrap()
|
||||
},
|
||||
_ => unreachable!() // ResHandler::data_ref shouldn't allow this to run
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct ResourceDataRef<'a, T> {
|
||||
guard: std::sync::RwLockReadGuard<'a, UntypedResource>,
|
||||
_marker: PhantomData<T>,
|
||||
}
|
||||
|
||||
impl<'a, T: 'static> std::ops::Deref for ResourceDataRef<'a, T> {
|
||||
type Target = T;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
match &self.guard.state {
|
||||
ResourceState::Ready(d) => {
|
||||
// for some reason, if I didn't use `.as_ref`, the downcast would fail.
|
||||
let d = d.as_ref().as_any();
|
||||
d.downcast_ref::<T>().unwrap()
|
||||
},
|
||||
_ => unreachable!() // ResHandler::data_ref shouldn't allow this to run
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct UntypedResource {
|
||||
pub(crate) version: usize,
|
||||
pub(crate) state: ResourceState,
|
||||
uuid: Uuid,
|
||||
pub(crate) path: Option<String>,
|
||||
pub(crate) is_watched: bool,
|
||||
/// can be used to wait for the resource to load.
|
||||
pub(crate) condvar: Arc<(Mutex<bool>, Condvar)>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Component, Reflect)]
|
||||
pub struct UntypedResHandle{
|
||||
#[reflect(skip)]
|
||||
pub(crate) res: Arc<RwLock<UntypedResource>>,
|
||||
tyid: TypeId,
|
||||
}
|
||||
|
||||
impl UntypedResHandle {
|
||||
pub fn new(res: UntypedResource, tyid: TypeId) -> Self {
|
||||
Self {
|
||||
res: Arc::new(RwLock::new(res)),
|
||||
tyid
|
||||
}
|
||||
}
|
||||
|
||||
pub fn read(&self) -> RwLockReadGuard<UntypedResource> {
|
||||
self.res.read().unwrap()
|
||||
}
|
||||
|
||||
pub fn write(&self) -> RwLockWriteGuard<UntypedResource> {
|
||||
self.res.write().unwrap()
|
||||
}
|
||||
|
||||
/// Returns a boolean indicating if this resource's path is being watched.
|
||||
pub fn is_watched(&self) -> bool {
|
||||
let d = self.read();
|
||||
d.is_watched
|
||||
}
|
||||
|
||||
/// Returns a boolean indicating if this resource is loaded
|
||||
pub fn is_loaded(&self) -> bool {
|
||||
let d = self.read();
|
||||
matches!(d.state, ResourceState::Ready(_))
|
||||
}
|
||||
|
||||
pub fn get_error(&self) -> Option<Arc<LoaderError>> {
|
||||
let d = self.read();
|
||||
|
||||
match &d.state {
|
||||
ResourceState::Error(e) => Some(e.clone()),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the uuid of the resource.
|
||||
pub fn uuid(&self) -> Uuid {
|
||||
let d = self.read();
|
||||
d.uuid
|
||||
}
|
||||
|
||||
pub fn path(&self) -> Option<String> {
|
||||
let d = self.read();
|
||||
d.path.clone()
|
||||
}
|
||||
|
||||
/// Retrieves the current version of the resource. This gets incremented when the resource
|
||||
/// is reloaded.
|
||||
pub fn version(&self) -> usize {
|
||||
let d = self.read();
|
||||
d.version
|
||||
}
|
||||
|
||||
/// Wait for the resource to be loaded, not including its dependencies
|
||||
/// (see[`UntypedResHandle::wait_recurse_dependencies_load`]).
|
||||
///
|
||||
/// This blocks the thread without consuming CPU time; its backed by a
|
||||
/// [`Condvar`](std::sync::Condvar).
|
||||
pub fn wait_for_load(&self) {
|
||||
self.wait_for_load_timeout_option_impl(None);
|
||||
}
|
||||
|
||||
/// Does the same as [`UntypedResHandle::wait_for_load`] but has a timeout.
|
||||
///
|
||||
/// Returns true if the resource was loaded before hitting the timeout.
|
||||
pub fn wait_for_load_timeout(&self, timeout: Duration) -> bool {
|
||||
self.wait_for_load_timeout_option_impl(Some(timeout))
|
||||
}
|
||||
|
||||
/// Wait for the entire resource, including its dependencies to be loaded.
|
||||
///
|
||||
/// This blocks the thread without consuming CPU time; its backed by a
|
||||
/// [`Condvar`](std::sync::Condvar).
|
||||
pub fn wait_recurse_dependencies_load(&self) {
|
||||
self.wait_recurse_dependencies_load_timeout_option_impl(None);
|
||||
}
|
||||
|
||||
/// Does the same as [`UntypedResHandle::wait_recurse_dependencies_load`] but has a timeout.
|
||||
///
|
||||
/// Returns true if the resource was loaded before hitting the timeout.
|
||||
pub fn wait_recurse_dependencies_load_timeout(&self, timeout: Duration) -> bool {
|
||||
self.wait_recurse_dependencies_load_timeout_option_impl(Some(timeout))
|
||||
}
|
||||
|
||||
fn wait_for_load_timeout_option_impl(&self, timeout: Option<Duration>) -> bool {
|
||||
let d = self.read();
|
||||
|
||||
if matches!(d.state, ResourceState::Ready(_)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
let cv = d.condvar.clone();
|
||||
drop(d);
|
||||
|
||||
let l = cv.0.lock().unwrap();
|
||||
|
||||
if let Some(timeout) = timeout {
|
||||
let (_unused, timeout) = cv.1.wait_timeout(l, timeout).unwrap();
|
||||
!timeout.timed_out()
|
||||
} else {
|
||||
let _unused = cv.1.wait(l).unwrap();
|
||||
true
|
||||
}
|
||||
}
|
||||
|
||||
fn wait_recurse_dependencies_load_timeout_option_impl(&self, timeout: Option<Duration>) -> bool {
|
||||
if !self.wait_for_load_timeout_option_impl(timeout) {
|
||||
return false;
|
||||
}
|
||||
|
||||
let res = self.read();
|
||||
match &res.state {
|
||||
ResourceState::Ready(data) => {
|
||||
// `recur_dependencies` wont return resources that are not loaded in yet
|
||||
// if we did not check if the resource was finished loading, we could miss
|
||||
// waiting for some resources and finish early.
|
||||
while self.recurse_dependency_state().is_loading() {
|
||||
for dep in data.recur_dependencies() {
|
||||
if !dep.wait_for_load_timeout_option_impl(timeout) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
true
|
||||
},
|
||||
// self.wait_for_load at the start ensures that the state is ready
|
||||
_ => unreachable!()
|
||||
}
|
||||
}
|
||||
|
||||
/// Recursively get the state of the dependencies.
|
||||
///
|
||||
/// This doesn't return any resource data, it can be used to check if the resource and its
|
||||
/// dependencies are loaded.
|
||||
pub fn recurse_dependency_state(&self) -> DependencyState {
|
||||
DependencyState::from_res_recurse(self)
|
||||
}
|
||||
|
||||
/// Retrieve a typed handle to the resource.
|
||||
///
|
||||
/// Returns `None` if the types do not match
|
||||
pub fn as_typed<T: ResourceData>(&self) -> Option<ResHandle<T>> {
|
||||
self.clone().into_typed()
|
||||
}
|
||||
|
||||
/// Convert `self` into a typed handle.
|
||||
///
|
||||
/// Returns `None` if the types do not match
|
||||
pub fn into_typed<T: ResourceData>(self) -> Option<ResHandle<T>> {
|
||||
if self.tyid == TypeId::of::<T>() {
|
||||
Some(ResHandle {
|
||||
handle: self,
|
||||
_marker: PhantomData::<T>,
|
||||
})
|
||||
} else { None }
|
||||
}
|
||||
|
||||
/// Retrieve the type id of the resource in the handle.
|
||||
pub fn resource_type_id(&self) -> TypeId {
|
||||
self.tyid
|
||||
}
|
||||
}
|
||||
|
||||
/// A handle to a resource.
|
||||
///
|
||||
/// # Note
|
||||
/// This struct has an inner [`RwLock`] to the resource data, so most methods may be blocking.
|
||||
/// However, the only times it will be blocking is if another thread is reloading the resource
|
||||
/// and has a write lock on the data. This means that most of the time, it is not blocking.
|
||||
#[derive(Component, Reflect)]
|
||||
pub struct ResHandle<T: ResourceData> {
|
||||
pub(crate) handle: UntypedResHandle,
|
||||
_marker: PhantomData<T>,
|
||||
}
|
||||
|
||||
impl<T: ResourceData> Clone for ResHandle<T> {
|
||||
fn clone(&self) -> Self {
|
||||
Self {
|
||||
handle: self.handle.clone(),
|
||||
_marker: PhantomData::<T>
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: ResourceData> Deref for ResHandle<T> {
|
||||
type Target = UntypedResHandle;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.handle
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: ResourceData> DerefMut for ResHandle<T> {
|
||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||
&mut self.handle
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: ResourceData> ResHandle<T> {
|
||||
pub fn new_loading(path: Option<&str>) -> Self {
|
||||
let res_version = UntypedResource {
|
||||
version: 0,
|
||||
path: path.map(str::to_string),
|
||||
state: ResourceState::Loading,
|
||||
uuid: Uuid::new_v4(),
|
||||
is_watched: false,
|
||||
condvar: Arc::new((Mutex::new(false), Condvar::new())),
|
||||
};
|
||||
|
||||
Self {
|
||||
handle: UntypedResHandle::new(res_version, TypeId::of::<T>()),
|
||||
_marker: PhantomData::<T>,
|
||||
}
|
||||
}
|
||||
|
||||
/// Create the resource with data, its assumed the state is `Ready`
|
||||
pub fn new_ready(path: Option<&str>, data: T) -> Self {
|
||||
let han = Self::new_loading(path);
|
||||
han.set_state(ResourceState::Ready(Box::new(data)));
|
||||
han
|
||||
}
|
||||
|
||||
/// Retrieve an untyped clone of the handle
|
||||
pub fn untyped_clone(&self) -> UntypedResHandle {
|
||||
self.handle.clone()
|
||||
}
|
||||
|
||||
/// Get a reference to the data in the resource
|
||||
pub fn data_ref<'a>(&'a self) -> Option<ResourceDataRef<'a, T>> {
|
||||
if self.is_loaded() {
|
||||
let d = self.handle.read();
|
||||
Some(ResourceDataRef {
|
||||
guard: d,
|
||||
_marker: PhantomData::<T>
|
||||
})
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
pub fn data_mut<'a>(&'a self) -> Option<ResourceDataRefMut<'a, T>> {
|
||||
if self.is_loaded() {
|
||||
let d = self.handle.write();
|
||||
Some(ResourceDataRefMut {
|
||||
guard: d,
|
||||
_marker: PhantomData::<T>
|
||||
})
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: ResourceData> ResourceStorage for ResHandle<T> {
|
||||
fn as_any(&self) -> &dyn Any {
|
||||
self
|
||||
}
|
||||
|
||||
fn as_any_mut(&mut self) -> &mut dyn Any {
|
||||
self
|
||||
}
|
||||
|
||||
fn as_arc_any(self: Arc<Self>) -> Arc<dyn Any + Send + Sync> {
|
||||
self.clone()
|
||||
}
|
||||
|
||||
fn as_box_any(self: Box<Self>) -> Box<dyn Any + Send + Sync> {
|
||||
self
|
||||
}
|
||||
|
||||
fn set_watched(&self, watched: bool) {
|
||||
let mut d = self.handle.write();
|
||||
d.is_watched = watched;
|
||||
}
|
||||
|
||||
fn version(&self) -> usize {
|
||||
self.handle.version()
|
||||
}
|
||||
|
||||
fn uuid(&self) -> Uuid {
|
||||
self.handle.uuid()
|
||||
}
|
||||
|
||||
fn path(&self) -> Option<String> {
|
||||
self.handle.path()
|
||||
}
|
||||
|
||||
fn is_watched(&self) -> bool {
|
||||
self.handle.is_watched()
|
||||
}
|
||||
|
||||
fn is_loaded(&self) -> bool {
|
||||
self.handle.is_loaded()
|
||||
}
|
||||
|
||||
fn set_state(&self, new: ResourceState) {
|
||||
let mut d = self.handle.write();
|
||||
d.state = new;
|
||||
}
|
||||
|
||||
fn clone_untyped(&self) -> UntypedResHandle {
|
||||
self.handle.clone()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::{path::PathBuf, str::FromStr, sync::Arc};
|
||||
|
||||
use async_std::task;
|
||||
use instant::Duration;
|
||||
use rand::Rng;
|
||||
|
||||
use crate::{loader::ResourceLoader, ResHandle, ResourceData, ResourceManager};
|
||||
|
||||
#[allow(dead_code)]
|
||||
struct SimpleDepend {
|
||||
file_name: String,
|
||||
ext: String,
|
||||
}
|
||||
|
||||
impl ResourceData for SimpleDepend {
|
||||
fn as_any(&self) -> &dyn std::any::Any {
|
||||
self
|
||||
}
|
||||
|
||||
fn as_any_mut(&mut self) -> &mut dyn std::any::Any {
|
||||
self
|
||||
}
|
||||
|
||||
fn dependencies(&self) -> Vec<crate::UntypedResHandle> {
|
||||
vec![]
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
struct SlowSimpleDependLoader;
|
||||
|
||||
impl ResourceLoader for SlowSimpleDependLoader {
|
||||
fn extensions(&self) -> &[&str] {
|
||||
&["txt", "buf"]
|
||||
}
|
||||
|
||||
fn mime_types(&self) -> &[&str] {
|
||||
&[]
|
||||
}
|
||||
|
||||
fn load(&self, _: crate::ResourceManager, path: &str) -> crate::loader::PinedBoxLoaderFuture {
|
||||
let path = path.to_string();
|
||||
Box::pin(async move {
|
||||
let path = PathBuf::from_str(&path).unwrap();
|
||||
|
||||
let file_name = path.file_name()
|
||||
.and_then(|os| os.to_str())
|
||||
.unwrap();
|
||||
let path_ext = path.extension()
|
||||
.and_then(|os| os.to_str())
|
||||
.unwrap();
|
||||
|
||||
let res = rand::thread_rng().gen_range(500..1000);
|
||||
|
||||
task::sleep(Duration::from_millis(res)).await;
|
||||
|
||||
let simple = SimpleDepend {
|
||||
file_name: file_name.to_string(),
|
||||
ext: path_ext.to_string(),
|
||||
};
|
||||
|
||||
Ok(Box::new(simple) as Box<dyn ResourceData>)
|
||||
})
|
||||
}
|
||||
|
||||
fn load_bytes(&self, _: crate::ResourceManager, _: Vec<u8>, _: usize, _: usize) -> crate::loader::PinedBoxLoaderFuture {
|
||||
unreachable!()
|
||||
}
|
||||
|
||||
fn create_erased_handle(&self) -> std::sync::Arc<dyn crate::ResourceStorage> {
|
||||
Arc::from(ResHandle::<SimpleDepend>::new_loading(None))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
struct SimpleResource {
|
||||
depend_a: ResHandle<SimpleDepend>,
|
||||
}
|
||||
|
||||
impl ResourceData for SimpleResource {
|
||||
fn as_any(&self) -> &dyn std::any::Any {
|
||||
self
|
||||
}
|
||||
|
||||
fn as_any_mut(&mut self) -> &mut dyn std::any::Any {
|
||||
self
|
||||
}
|
||||
|
||||
fn dependencies(&self) -> Vec<crate::UntypedResHandle> {
|
||||
vec![self.depend_a.untyped_clone()]
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
struct SlowSimpleResourceLoader;
|
||||
|
||||
impl ResourceLoader for SlowSimpleResourceLoader {
|
||||
fn extensions(&self) -> &[&str] {
|
||||
&["res", "large"]
|
||||
}
|
||||
|
||||
fn mime_types(&self) -> &[&str] {
|
||||
&[]
|
||||
}
|
||||
|
||||
fn load(&self, res_man: crate::ResourceManager, _: &str) -> crate::loader::PinedBoxLoaderFuture {
|
||||
Box::pin(async move {
|
||||
let res = rand::thread_rng().gen_range(500..1000);
|
||||
|
||||
task::sleep(Duration::from_millis(res)).await;
|
||||
|
||||
// load dummy dependency that will take a bit
|
||||
let depend_path = "depend.txt";
|
||||
let depend_han = res_man.request::<SimpleDepend>(depend_path).unwrap();
|
||||
|
||||
let simple = SimpleResource {
|
||||
depend_a: depend_han,
|
||||
};
|
||||
|
||||
Ok(Box::new(simple) as Box<dyn ResourceData>)
|
||||
})
|
||||
}
|
||||
|
||||
fn load_bytes(&self, _: crate::ResourceManager, _: Vec<u8>, _: usize, _: usize) -> crate::loader::PinedBoxLoaderFuture {
|
||||
unreachable!()
|
||||
}
|
||||
|
||||
fn create_erased_handle(&self) -> std::sync::Arc<dyn crate::ResourceStorage> {
|
||||
Arc::from(ResHandle::<SimpleResource>::new_loading(None))
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn recursive() {
|
||||
let man = ResourceManager::new();
|
||||
man.register_loader::<SlowSimpleDependLoader>();
|
||||
man.register_loader::<SlowSimpleResourceLoader>();
|
||||
|
||||
let res = man.request::<SimpleResource>("massive_asset.res").unwrap();
|
||||
|
||||
let state = res.recurse_dependency_state();
|
||||
assert!(state.is_loading());
|
||||
|
||||
// this will take a bit
|
||||
res.wait_recurse_dependencies_load();
|
||||
|
||||
let state = res.recurse_dependency_state();
|
||||
assert!(!state.is_loading());
|
||||
}
|
||||
}
|
|
@ -1,506 +0,0 @@
|
|||
use std::{any::Any, collections::HashMap, path::Path, sync::{Arc, RwLock, RwLockReadGuard, RwLockWriteGuard}, time::Duration};
|
||||
|
||||
use async_std::task;
|
||||
use crossbeam::channel::Receiver;
|
||||
use notify::{Watcher, RecommendedWatcher};
|
||||
use notify_debouncer_full::{DebouncedEvent, FileIdMap};
|
||||
use thiserror::Error;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::{loader::{ImageLoader, LoaderError, ResourceLoader}, resource::ResHandle, ResourceData, ResourceState, UntypedResHandle};
|
||||
|
||||
/// A trait for type erased storage of a resource.
|
||||
/// Implemented for [`ResHandle<T>`]
|
||||
pub trait ResourceStorage: Send + Sync + Any + 'static {
|
||||
fn as_any(&self) -> &dyn Any;
|
||||
fn as_any_mut(&mut self) -> &mut dyn Any;
|
||||
fn as_arc_any(self: Arc<Self>) -> Arc<dyn Any + Send + Sync>;
|
||||
fn as_box_any(self: Box<Self>) -> Box<dyn Any + Send + Sync>;
|
||||
/// Do not set a resource to watched if it is not actually watched.
|
||||
/// This is used internally.
|
||||
fn set_watched(&self, watched: bool);
|
||||
|
||||
fn version(&self) -> usize;
|
||||
fn uuid(&self) -> Uuid;
|
||||
fn path(&self) -> Option<String>;
|
||||
fn is_watched(&self) -> bool;
|
||||
fn is_loaded(&self) -> bool;
|
||||
fn set_state(&self, new: ResourceState);
|
||||
fn clone_untyped(&self) -> UntypedResHandle;
|
||||
}
|
||||
|
||||
#[derive(Error, Debug)]
|
||||
pub enum RequestError {
|
||||
#[error("{0}")]
|
||||
Loader(LoaderError),
|
||||
|
||||
#[error("The file extension is unsupported: '{0}'")]
|
||||
UnsupportedFileExtension(String),
|
||||
|
||||
#[error("The mimetype is unsupported: '{0}'")]
|
||||
UnsupportedMime(String),
|
||||
|
||||
#[error("The identifier is not found: '{0}'")]
|
||||
IdentNotFound(String),
|
||||
|
||||
#[error("The resource was not loaded from a path so cannot be reloaded")]
|
||||
NoReloadPath
|
||||
}
|
||||
|
||||
impl From<LoaderError> for RequestError {
|
||||
fn from(value: LoaderError) -> Self {
|
||||
RequestError::Loader(value)
|
||||
}
|
||||
}
|
||||
|
||||
/// A struct that stores some things used for watching resources.
|
||||
pub struct ResourceWatcher {
|
||||
debouncer: Arc<RwLock<notify_debouncer_full::Debouncer<RecommendedWatcher, FileIdMap>>>,
|
||||
events_recv: Receiver<Result<Vec<DebouncedEvent>, Vec<notify::Error>>>,
|
||||
}
|
||||
|
||||
/// The state of the ResourceManager
|
||||
pub struct ResourceManagerState {
|
||||
resources: HashMap<String, Arc<dyn ResourceStorage>>,
|
||||
uuid_resources: HashMap<Uuid, Arc<dyn ResourceStorage>>,
|
||||
loaders: Vec<Arc<dyn ResourceLoader>>,
|
||||
watchers: HashMap<String, ResourceWatcher>,
|
||||
}
|
||||
|
||||
/// The ResourceManager
|
||||
///
|
||||
/// This exists since we need the manager to be `Send + Sync`.
|
||||
#[derive(Clone)]
|
||||
pub struct ResourceManager {
|
||||
inner: Arc<RwLock<ResourceManagerState>>,
|
||||
}
|
||||
|
||||
impl Default for ResourceManager {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
inner: Arc::new(RwLock::new(
|
||||
ResourceManagerState {
|
||||
resources: HashMap::new(),
|
||||
uuid_resources: HashMap::new(),
|
||||
loaders: vec![ Arc::new(ImageLoader), ],
|
||||
watchers: HashMap::new(),
|
||||
}
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ResourceManager {
|
||||
pub fn new() -> Self {
|
||||
Self::default()
|
||||
}
|
||||
|
||||
/// Retrieves a non-mutable guard of the manager's state.
|
||||
pub fn state(&self) -> RwLockReadGuard<ResourceManagerState> {
|
||||
self.inner.read().unwrap()
|
||||
}
|
||||
|
||||
/// Retrieves a mutable guard of the manager's state.
|
||||
pub fn state_mut(&self) -> RwLockWriteGuard<ResourceManagerState> {
|
||||
self.inner.write().unwrap()
|
||||
}
|
||||
|
||||
/// Registers a loader to the manager.
|
||||
pub fn register_loader<L>(&self)
|
||||
where
|
||||
L: ResourceLoader + Default + 'static
|
||||
{
|
||||
let mut state = self.state_mut();
|
||||
state.loaders.push(Arc::new(L::default()));
|
||||
}
|
||||
|
||||
/// Request a resource at `path`.
|
||||
///
|
||||
/// When a resource for a path is requested for the first time, the resource will be loaded
|
||||
/// and cached. In the future, the cached version will be returned. There is only ever one copy
|
||||
/// of the resource's data in memory at a time.
|
||||
///
|
||||
/// Loading resources is done asynchronously on a task spawned by `async-std`. You can use the
|
||||
/// handle to check if the resource is loaded.
|
||||
#[inline(always)]
|
||||
pub fn request<T>(&self, path: &str) -> Result<ResHandle<T>, RequestError>
|
||||
where
|
||||
T: ResourceData
|
||||
{
|
||||
self.request_raw(path)
|
||||
.map(|res| res.as_typed::<T>()
|
||||
.expect("mismatched asset type, cannot downcast"))
|
||||
}
|
||||
|
||||
/// Request a resource without downcasting to a `ResHandle<T>`.
|
||||
/// Whenever you're ready to downcast, you can do so like this:
|
||||
/// ```nobuild
|
||||
/// let arc_any = res_arc.as_arc_any();
|
||||
/// let res: Arc<ResHandle<T>> = res.downcast::<ResHandle<T>>().expect("Failure to downcast resource");
|
||||
/// ```
|
||||
pub fn request_raw(&self, path: &str) -> Result<UntypedResHandle, RequestError> {
|
||||
let mut state = self.state_mut();
|
||||
match state.resources.get(&path.to_string()) {
|
||||
Some(res) => {
|
||||
Ok(res.clone().clone_untyped())
|
||||
},
|
||||
None => {
|
||||
if let Some(loader) = state.loaders.iter()
|
||||
.find(|l| l.does_support_file(path)) {
|
||||
|
||||
// Load the resource and store it
|
||||
let loader = Arc::clone(loader); // stop borrowing from self
|
||||
let res = loader.load(self.clone(), path);
|
||||
|
||||
let handle = loader.create_erased_handle();
|
||||
|
||||
let untyped = handle.clone_untyped();
|
||||
untyped.write().path = Some(path.to_string());
|
||||
|
||||
task::spawn(async move {
|
||||
match res.await {
|
||||
Ok(data) => {
|
||||
let mut d = untyped.write();
|
||||
d.state = ResourceState::Ready(data);
|
||||
d.condvar.1.notify_all();
|
||||
}
|
||||
Err(err) => {
|
||||
let mut d = untyped.write();
|
||||
d.state = ResourceState::Error(Arc::new(err));
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
let res: Arc<dyn ResourceStorage> = Arc::from(handle.clone());
|
||||
state.resources.insert(path.to_string(), res.clone());
|
||||
state.uuid_resources.insert(res.uuid(), res);
|
||||
|
||||
Ok(handle.clone_untyped())
|
||||
} else {
|
||||
Err(RequestError::UnsupportedFileExtension(path.to_string()))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Store a resource using its uuid.
|
||||
///
|
||||
/// The resource cannot be requested with [`ResourceManager::request`], it can only be
|
||||
/// retrieved with [`ResourceManager::request_uuid`].
|
||||
pub fn store_uuid<T: ResourceData>(&self, res: ResHandle<T>) {
|
||||
let mut state = self.state_mut();
|
||||
state.resources.insert(res.uuid().to_string(), Arc::new(res));
|
||||
}
|
||||
|
||||
/// Request a resource via its uuid.
|
||||
///
|
||||
/// Returns `None` if the resource was not found. The resource must of had been
|
||||
/// stored with [`ResourceManager::request`] to return `Some`.
|
||||
pub fn request_uuid<T: ResourceData>(&self, uuid: &Uuid) -> Option<ResHandle<T>> {
|
||||
let state = self.state();
|
||||
match state.resources.get(&uuid.to_string())
|
||||
.or_else(|| state.uuid_resources.get(&uuid))
|
||||
{
|
||||
Some(res) => {
|
||||
let res = res.clone().as_arc_any();
|
||||
let res: Arc<ResHandle<T>> = res.downcast::<ResHandle<T>>().expect("Failure to downcast resource");
|
||||
Some(ResHandle::<T>::clone(&res))
|
||||
},
|
||||
None => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Store bytes in the manager. If there is already an entry with the same identifier it will be updated.
|
||||
///
|
||||
/// Panics: If there is already an entry with the same `ident`, and the entry is not bytes, this function will panic.
|
||||
///
|
||||
/// Parameters:
|
||||
/// * `ident` - The identifier to store along with these bytes. Make sure its unique to avoid overriding something.
|
||||
/// * `bytes` - The bytes to store.
|
||||
///
|
||||
/// Returns: The `Arc` to the now stored resource
|
||||
pub fn load_bytes<T>(&self, ident: &str, mime_type: &str, bytes: Vec<u8>, offset: usize, length: usize) -> Result<ResHandle<T>, RequestError>
|
||||
where
|
||||
T: ResourceData
|
||||
{
|
||||
let mut state = self.state_mut();
|
||||
if let Some(loader) = state.loaders.iter()
|
||||
.find(|l| l.does_support_mime(mime_type)) {
|
||||
let loader = loader.clone();
|
||||
let res = loader.load_bytes(self.clone(), bytes, offset, length);
|
||||
|
||||
let handle = ResHandle::<T>::new_loading(None);
|
||||
let thand = handle.clone();
|
||||
task::spawn(async move {
|
||||
match res.await {
|
||||
Ok(data) => {
|
||||
let mut d = thand.write();
|
||||
d.state = ResourceState::Ready(data);
|
||||
}
|
||||
Err(err) => {
|
||||
let mut d = thand.write();
|
||||
d.state = ResourceState::Error(Arc::new(err));
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
let res: Arc<dyn ResourceStorage> = Arc::from(handle.clone());
|
||||
state.resources.insert(ident.to_string(), res.clone());
|
||||
state.uuid_resources.insert(res.uuid(), res);
|
||||
|
||||
Ok(handle)
|
||||
} else {
|
||||
Err(RequestError::UnsupportedMime(mime_type.to_string()))
|
||||
}
|
||||
}
|
||||
|
||||
/// Requests bytes from the manager.
|
||||
pub fn request_loaded_bytes<T>(&self, ident: &str) -> Result<Arc<ResHandle<T>>, RequestError>
|
||||
where
|
||||
T: ResourceData
|
||||
{
|
||||
let state = self.state();
|
||||
match state.resources.get(&ident.to_string()) {
|
||||
Some(res) => {
|
||||
let res = res.clone().as_arc_any();
|
||||
let res = res.downcast::<ResHandle<T>>().expect("Failure to downcast resource");
|
||||
|
||||
Ok(res)
|
||||
},
|
||||
None => {
|
||||
Err(RequestError::IdentNotFound(ident.to_string()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Start watching a path for changes. Returns a mspc channel that will send events
|
||||
pub fn watch(&self, path: &str, recursive: bool) -> notify::Result<Receiver<Result<Vec<DebouncedEvent>, Vec<notify::Error>>>> {
|
||||
let (send, recv) = crossbeam::channel::bounded(15);
|
||||
let mut watcher = notify_debouncer_full::new_debouncer(Duration::from_millis(1000), None, send)?;
|
||||
|
||||
let recurse_mode = match recursive {
|
||||
true => notify::RecursiveMode::Recursive,
|
||||
false => notify::RecursiveMode::NonRecursive,
|
||||
};
|
||||
watcher.watcher().watch(path.as_ref(), recurse_mode)?;
|
||||
|
||||
let watcher = Arc::new(RwLock::new(watcher));
|
||||
let watcher = ResourceWatcher {
|
||||
debouncer: watcher,
|
||||
events_recv: recv.clone(),
|
||||
};
|
||||
|
||||
let mut state = self.state_mut();
|
||||
state.watchers.insert(path.to_string(), watcher);
|
||||
|
||||
let res = state.resources.get(&path.to_string())
|
||||
.expect("The path that was watched has not been loaded as a resource yet");
|
||||
res.set_watched(true);
|
||||
|
||||
Ok(recv)
|
||||
}
|
||||
|
||||
/// Stops watching a path
|
||||
pub fn stop_watching(&self, path: &str) -> notify::Result<()> {
|
||||
let state = self.state();
|
||||
if let Some(watcher) = state.watchers.get(path) {
|
||||
let mut watcher = watcher.debouncer.write().unwrap();
|
||||
watcher.watcher().unwatch(Path::new(path))?;
|
||||
|
||||
// unwrap is safe since only loaded resources can be watched
|
||||
let res = state.resources.get(&path.to_string()).unwrap();
|
||||
res.set_watched(false);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Returns a mspc receiver for watcher events of a specific path. The path must already
|
||||
/// be watched with [`ResourceManager::watch`] for this to return `Some`.
|
||||
pub fn watcher_event_recv(&self, path: &str) -> Option<Receiver<Result<Vec<DebouncedEvent>, Vec<notify::Error>>>> {
|
||||
let state = self.state();
|
||||
state.watchers.get(&path.to_string())
|
||||
.map(|w| w.events_recv.clone())
|
||||
}
|
||||
|
||||
/// Trigger a reload of a resource.
|
||||
///
|
||||
/// The version of the resource will be incremented by one.
|
||||
///
|
||||
/// > Note: Since reloading is done asynchronously, the reloaded data will not be immediately
|
||||
/// accessible. Until the resource is reloaded, the previous data will stay inside of
|
||||
/// the handle.
|
||||
pub fn reload<T>(&self, resource: ResHandle<T>) -> Result<(), RequestError>
|
||||
where
|
||||
T: ResourceData
|
||||
{
|
||||
let state = self.state();
|
||||
|
||||
let path = resource.path()
|
||||
.ok_or(RequestError::NoReloadPath)?;
|
||||
if let Some(loader) = state.loaders.iter()
|
||||
.find(|l| l.does_support_file(&path)) {
|
||||
let loader = Arc::clone(loader); // stop borrowing from self
|
||||
let res = loader.load(self.clone(), &path);
|
||||
|
||||
/* let res_lock = &resource.data;
|
||||
let mut res_lock = res_lock.write().unwrap();
|
||||
res_lock.state = ResourceState::Loading;
|
||||
drop(res_lock); */
|
||||
|
||||
let thand = resource.clone();
|
||||
task::spawn(async move {
|
||||
match res.await {
|
||||
Ok(data) => {
|
||||
let mut d = thand.write();
|
||||
d.state = ResourceState::Ready(data);
|
||||
d.version += 1;
|
||||
}
|
||||
Err(err) => {
|
||||
let mut d = thand.write();
|
||||
d.state = ResourceState::Error(Arc::new(err));
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod tests {
|
||||
use std::{io, ops::Deref};
|
||||
|
||||
use instant::Instant;
|
||||
|
||||
use crate::{Image, ResourceData};
|
||||
|
||||
use super::*;
|
||||
|
||||
fn get_image(path: &str) -> String {
|
||||
let manifest = std::env::var("CARGO_MANIFEST_DIR").unwrap();
|
||||
|
||||
format!("{manifest}/test_files/img/{path}")
|
||||
}
|
||||
|
||||
pub fn busy_wait_resource<R: ResourceData>(handle: &ResHandle<R>, timeout: f32) {
|
||||
let start = Instant::now();
|
||||
while !handle.is_loaded() {
|
||||
// loop until the image is loaded
|
||||
let now = Instant::now();
|
||||
|
||||
let diff = now - start;
|
||||
|
||||
if diff.as_secs_f32() >= timeout {
|
||||
panic!("Image never loaded");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn busy_wait_resource_reload<R: ResourceData>(handle: &ResHandle<R>, timeout: f32) {
|
||||
let version = handle.version();
|
||||
let start = Instant::now();
|
||||
|
||||
while !handle.is_loaded() || handle.version() == version {
|
||||
// loop until the image is loaded
|
||||
let now = Instant::now();
|
||||
|
||||
let diff = now - start;
|
||||
|
||||
if diff.as_secs_f32() >= timeout {
|
||||
panic!("Image never loaded");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_image() {
|
||||
let man = ResourceManager::new();
|
||||
let res = man.request::<Image>(&get_image("squiggles.png")).unwrap();
|
||||
assert!(!res.is_loaded());
|
||||
|
||||
res.wait_for_load();
|
||||
//busy_wait_resource(&res, 10.0);
|
||||
|
||||
// shouldn't panic because of the loop
|
||||
res.data_ref().unwrap();
|
||||
}
|
||||
|
||||
/// Ensures that only one copy of the same data was made
|
||||
#[test]
|
||||
fn ensure_single() {
|
||||
let man = ResourceManager::new();
|
||||
let res = man.request::<Image>(&get_image("squiggles.png")).unwrap();
|
||||
assert_eq!(Arc::strong_count(&res.handle.res), 3);
|
||||
|
||||
let resagain = man.request::<Image>(&get_image("squiggles.png")).unwrap();
|
||||
assert_eq!(Arc::strong_count(&resagain.handle.res), 4);
|
||||
}
|
||||
|
||||
/// Ensures that an error is returned when a file that doesn't exist is requested
|
||||
#[test]
|
||||
fn ensure_none() {
|
||||
let man = ResourceManager::new();
|
||||
let res = man.request::<Image>(&get_image("squigglesfff.png")).unwrap();
|
||||
//let err = res.err().unwrap();
|
||||
|
||||
// 1 second should be enough to run into an error
|
||||
std::thread::sleep(Duration::from_secs(1));
|
||||
//busy_wait_resource(&res, 10.0);
|
||||
let state = &res.read().state;
|
||||
|
||||
assert!(
|
||||
match state {
|
||||
// make sure the error is NotFound
|
||||
//RequestError::Loader(LoaderError::IoError(e)) if e.kind() == io::ErrorKind::NotFound => true,
|
||||
ResourceState::Error(err) => {
|
||||
match err.deref() {
|
||||
LoaderError::IoError(e) if e.kind() == io::ErrorKind::NotFound => true,
|
||||
_ => false,
|
||||
}
|
||||
},
|
||||
_ => false
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn reload_image() {
|
||||
let man = ResourceManager::new();
|
||||
let res = man.request::<Image>(&get_image("squiggles.png")).unwrap();
|
||||
busy_wait_resource(&res, 10.0);
|
||||
let img = res.data_ref();
|
||||
img.unwrap();
|
||||
|
||||
man.reload(res.clone()).unwrap();
|
||||
busy_wait_resource_reload(&res, 10.0);
|
||||
assert_eq!(res.version(), 1);
|
||||
|
||||
man.reload(res.clone()).unwrap();
|
||||
busy_wait_resource_reload(&res, 10.0);
|
||||
assert_eq!(res.version(), 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn watch_image() {
|
||||
let orig_path = get_image("squiggles.png");
|
||||
let image_path = get_image("squiggles_test.png");
|
||||
std::fs::copy(orig_path, &image_path).unwrap();
|
||||
|
||||
let man = ResourceManager::new();
|
||||
let res = man.request::<Image>(&image_path).unwrap();
|
||||
busy_wait_resource(&res, 10.0);
|
||||
let img = res.data_ref();
|
||||
img.unwrap();
|
||||
|
||||
let recv = man.watch(&image_path, false).unwrap();
|
||||
|
||||
std::fs::remove_file(&image_path).unwrap();
|
||||
|
||||
let event = recv.recv().unwrap();
|
||||
let event = event.unwrap();
|
||||
|
||||
assert!(event.iter().any(|ev| ev.kind.is_remove() || ev.kind.is_modify()));
|
||||
}
|
||||
}
|
|
@ -1,108 +0,0 @@
|
|||
use std::ops::{Deref, DerefMut};
|
||||
|
||||
//pub use gltf::texture::{MagFilter, MinFilter, WrappingMode};
|
||||
use image::DynamicImage;
|
||||
use lyra_reflect::Reflect;
|
||||
use crate::lyra_engine;
|
||||
|
||||
use crate::ResHandle;
|
||||
use crate::ResourceData;
|
||||
|
||||
/// The filter mode of the sampler.
|
||||
///
|
||||
/// This is used for minification, magnification, and mipmap filters
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Reflect)]
|
||||
pub enum FilterMode {
|
||||
Nearest,
|
||||
Linear,
|
||||
}
|
||||
|
||||
/// The wrapping mode of the Texture coordinates
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Reflect)]
|
||||
pub enum WrappingMode {
|
||||
ClampToEdge,
|
||||
MirroredRepeat,
|
||||
Repeat,
|
||||
}
|
||||
|
||||
/// The descriptor of the sampler for a Texture.
|
||||
#[derive(Clone, Reflect)]
|
||||
pub struct TextureSampler {
|
||||
pub mag_filter: Option<FilterMode>,
|
||||
pub min_filter: Option<FilterMode>,
|
||||
pub mipmap_filter: Option<FilterMode>,
|
||||
pub wrap_u: WrappingMode,
|
||||
pub wrap_v: WrappingMode,
|
||||
pub wrap_w: WrappingMode,
|
||||
}
|
||||
|
||||
#[derive(Clone, Reflect)]
|
||||
pub struct Image(#[reflect(skip)] DynamicImage);
|
||||
|
||||
impl ResourceData for Image {
|
||||
fn dependencies(&self) -> Vec<crate::UntypedResHandle> {
|
||||
vec![]
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn std::any::Any {
|
||||
self
|
||||
}
|
||||
|
||||
fn as_any_mut(&mut self) -> &mut dyn std::any::Any {
|
||||
self
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
impl Deref for Image {
|
||||
type Target = DynamicImage;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl DerefMut for Image {
|
||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||
&mut self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl From<DynamicImage> for Image {
|
||||
fn from(value: DynamicImage) -> Self {
|
||||
Self(value)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Reflect)]
|
||||
pub struct Texture {
|
||||
pub image: ResHandle<Image>,
|
||||
pub sampler: Option<TextureSampler>,
|
||||
}
|
||||
|
||||
impl ResourceData for Texture {
|
||||
fn dependencies(&self) -> Vec<crate::UntypedResHandle> {
|
||||
vec![self.image.untyped_clone()]
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn std::any::Any {
|
||||
self
|
||||
}
|
||||
|
||||
fn as_any_mut(&mut self) -> &mut dyn std::any::Any {
|
||||
self
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
impl Texture {
|
||||
/// Create a texture from an image.
|
||||
pub fn from_image(image: ResHandle<Image>) -> Self {
|
||||
Self {
|
||||
image,
|
||||
sampler: None,
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1 +0,0 @@
|
|||
use base64::Engine;
|
|
@ -1,225 +0,0 @@
|
|||
mod node;
|
||||
use lyra_reflect::Reflect;
|
||||
use lyra_resource::{ResourceData, UntypedResHandle};
|
||||
pub use node::*;
|
||||
|
||||
mod world_transform;
|
||||
pub use world_transform::*;
|
||||
|
||||
use lyra_ecs::{query::{Entities, ViewOne}, relation::ChildOf, Bundle, Component, World};
|
||||
use lyra_math::Transform;
|
||||
|
||||
// So we can use lyra_ecs::Component derive macro
|
||||
pub(crate) mod lyra_engine {
|
||||
pub(crate) mod ecs {
|
||||
pub use lyra_ecs::*;
|
||||
}
|
||||
|
||||
pub(crate) mod reflect {
|
||||
pub use lyra_reflect::*;
|
||||
}
|
||||
}
|
||||
|
||||
/// A flag spawned on all scene node entities
|
||||
#[derive(Component)]
|
||||
pub struct SceneNodeFlag;
|
||||
|
||||
/// A flag spawned on only the scene root node
|
||||
#[derive(Component)]
|
||||
pub struct SceneNodeRoot;
|
||||
|
||||
/// A Graph of nodes that represents the hierarchy of a Scene.
|
||||
///
|
||||
/// This SceneGraph is special in the sense that it is literally just an ECS world with methods
|
||||
/// implemented for it that make it easier to use for a SceneGraph.
|
||||
//#[derive(Default)]
|
||||
#[derive(Clone, Reflect)]
|
||||
pub struct SceneGraph {
|
||||
#[reflect(skip)]
|
||||
pub(crate) world: World,
|
||||
root_node: SceneNode,
|
||||
}
|
||||
|
||||
impl SceneGraph {
|
||||
/// Create a new SceneGraph with its own ECS World.
|
||||
pub fn new() -> Self {
|
||||
let world = World::new();
|
||||
|
||||
Self::from_world(world)
|
||||
}
|
||||
|
||||
/// Create a new SceneGraph inside an existing ECS World.
|
||||
pub fn from_world(mut world: World) -> Self {
|
||||
let root_en = world.spawn((WorldTransform::default(), Transform::default(), SceneNodeRoot));
|
||||
let root = SceneNode::new(None, root_en);
|
||||
|
||||
Self {
|
||||
world,
|
||||
root_node: root,
|
||||
}
|
||||
}
|
||||
|
||||
/// Adds a node to the root node of the SceneGraph.
|
||||
///
|
||||
/// The spawned entity will have a `ChildOf` relation targeting the root node, the
|
||||
/// `SceneNodeFlag` component is also added to the entity.
|
||||
pub fn add_node<B: Bundle>(&mut self, bundle: B) -> SceneNode {
|
||||
let node = self.root_node.clone();
|
||||
self.add_node_under(&node, bundle)
|
||||
}
|
||||
|
||||
/// Add a node under a parent node.
|
||||
///
|
||||
/// The spawned entity will have a `ChildOf` relation targeting the provided parent node,
|
||||
/// the `SceneNodeFlag` component is also added to the entity.
|
||||
pub fn add_node_under<B: Bundle>(&mut self, parent: &SceneNode, bundle: B) -> SceneNode {
|
||||
world_add_child_node(&mut self.world, parent, bundle)
|
||||
}
|
||||
|
||||
/// Insert a component bundle to a SceneNode.
|
||||
///
|
||||
/// See [`lyra_ecs::World::insert`].
|
||||
pub fn insert<B: Bundle>(&mut self, node: &SceneNode, bundle: B) {
|
||||
self.world.insert(node.entity(), bundle);
|
||||
}
|
||||
|
||||
pub fn add_empty_node_under(&mut self, parent: &SceneNode, local_transform: Transform) -> SceneNode {
|
||||
let e = self.world.spawn((SceneNodeFlag, local_transform));
|
||||
self.world.add_relation(e, ChildOf, parent.entity());
|
||||
|
||||
SceneNode::new(Some(parent.entity()), e)
|
||||
}
|
||||
|
||||
/// Traverses down the SceneGraph, calling `callback` with each SceneNode and its world transform.
|
||||
///
|
||||
/// The traversal does not include the root scene node.
|
||||
pub fn traverse_down<F, Q>(&self, mut callback: F)
|
||||
where
|
||||
F: FnMut(&World, &SceneNode, ViewOne<Q::Query>),
|
||||
Q: lyra_ecs::query::AsQuery,
|
||||
{
|
||||
self.traverse_down_from::<F, Q>(self.root_node.clone(), &mut callback);
|
||||
}
|
||||
|
||||
/// Recursively Traverses down the SceneGraph from a starting node, calling `callback` with each
|
||||
/// SceneNode and its world transform.
|
||||
fn traverse_down_from<F, Q>(&self, start: SceneNode, callback: &mut F)
|
||||
where
|
||||
F: FnMut(&World, &SceneNode, ViewOne<Q::Query>),
|
||||
Q: lyra_ecs::query::AsQuery,
|
||||
{
|
||||
let v = self.world
|
||||
.view::<Entities>()
|
||||
.relates_to::<ChildOf>(start.entity());
|
||||
|
||||
for (e, _rel) in v.iter() {
|
||||
let node = SceneNode::new(Some(start.entity()), e);
|
||||
//let world_pos = node.world_transform(self);
|
||||
let v = self.world.view_one::<Q>(e);
|
||||
callback(&self.world, &node, v);
|
||||
|
||||
self.traverse_down_from::<F, Q>(node, callback);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn root_node(&self) -> SceneNode {
|
||||
self.root_node.clone()
|
||||
}
|
||||
|
||||
/// Retrieve a borrow of the world that backs the Scene
|
||||
pub fn world(&self) -> &World {
|
||||
&self.world
|
||||
}
|
||||
|
||||
pub fn world_mut(&mut self) -> &mut World {
|
||||
&mut self.world
|
||||
}
|
||||
}
|
||||
|
||||
impl ResourceData for SceneGraph {
|
||||
fn dependencies(&self) -> Vec<UntypedResHandle> {
|
||||
self.world().view::<&UntypedResHandle>()
|
||||
.iter()
|
||||
.map(|han| han.clone())
|
||||
.collect()
|
||||
}
|
||||
|
||||
fn as_any(&self) -> &dyn std::any::Any {
|
||||
self
|
||||
}
|
||||
|
||||
fn as_any_mut(&mut self) -> &mut dyn std::any::Any {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
/// Add a node under a parent node.
|
||||
///
|
||||
/// The spawned entity will have a `ChildOf` relation targeting the provided parent node,
|
||||
/// the `SceneNodeFlag` component is also added to the entity.
|
||||
pub(crate) fn world_add_child_node<B: Bundle>(world: &mut World, parent: &SceneNode, bundle: B) -> SceneNode {
|
||||
let e = world.spawn(bundle);
|
||||
world.insert(e, (SceneNodeFlag,));
|
||||
world.add_relation(e, ChildOf, parent.entity());
|
||||
|
||||
SceneNode::new(Some(parent.entity()), e)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub mod tests {
|
||||
use lyra_ecs::{query::{filter::{Has, Not}, Entities}, relation::{ChildOf, RelationOriginComponent}, Component};
|
||||
use lyra_math::{Transform, Vec3};
|
||||
use lyra_resource::ResHandle;
|
||||
|
||||
use crate::{lyra_engine, WorldTransform, SceneGraph};
|
||||
|
||||
#[derive(Component)]
|
||||
pub struct FakeMesh;
|
||||
|
||||
#[test]
|
||||
fn single_node_hierarchy() {
|
||||
let mut scene = SceneGraph::new();
|
||||
|
||||
let a = scene.add_node((Transform::from_translation(Vec3::new(10.0, 10.0, 10.0)), FakeMesh));
|
||||
assert!(a.parent(&scene).unwrap() == scene.root_node);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn double_node_hierarchy() {
|
||||
let mut scene = SceneGraph::new();
|
||||
|
||||
let a = scene.add_node((Transform::from_translation(Vec3::new(10.0, 10.0, 10.0)), FakeMesh));
|
||||
assert!(a.parent(&scene).unwrap() == scene.root_node);
|
||||
|
||||
let b = a.add_node(&mut scene, (Transform::from_translation(Vec3::new(50.0, 50.0, 50.0)), FakeMesh));
|
||||
assert!(b.parent(&scene).unwrap() == a);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn traverse_down() {
|
||||
let v2s = vec![Vec3::new(10.0, 10.0, 10.0), Vec3::new(50.0, 50.0, 50.0)];
|
||||
|
||||
let mut scene = SceneGraph::new();
|
||||
|
||||
let a = scene.add_node((WorldTransform::default(), Transform::from_translation(v2s[0]), FakeMesh));
|
||||
assert!(a.parent(&scene).unwrap() == scene.root_node);
|
||||
let b = a.add_node(&mut scene, (WorldTransform::default(), Transform::from_translation(v2s[1]), FakeMesh));
|
||||
assert!(b.parent(&scene).unwrap() == a);
|
||||
|
||||
let view = scene.world.filtered_view::<(Entities, &mut WorldTransform, &Transform, Option<&ResHandle<SceneGraph>>), Not<Has<RelationOriginComponent<ChildOf>>>>();
|
||||
crate::system_update_world_transforms(&scene.world, view).unwrap();
|
||||
|
||||
let mut idx = 0;
|
||||
scene.traverse_down::<_, &WorldTransform>(|_, _, v| {
|
||||
let pos = v.get().unwrap();
|
||||
if idx == 0 {
|
||||
assert_eq!(**pos, Transform::from_translation(v2s[idx]));
|
||||
} else if idx == 1 {
|
||||
let t = v2s.iter().sum();
|
||||
assert_eq!(**pos, Transform::from_translation(t));
|
||||
}
|
||||
|
||||
idx += 1;
|
||||
});
|
||||
}
|
||||
}
|
|
@ -1,201 +0,0 @@
|
|||
use std::ops::Deref;
|
||||
|
||||
use lyra_ecs::{query::{filter::{Has, Not}, Entities, View}, relation::{ChildOf, RelationOriginComponent}, Component, Entity, World};
|
||||
use lyra_math::Transform;
|
||||
use lyra_reflect::Reflect;
|
||||
use lyra_resource::ResHandle;
|
||||
use crate::{lyra_engine, SceneGraph};
|
||||
|
||||
/// The world transform of an entity.
|
||||
///
|
||||
/// A Transform represents the relative position of the entity to its parent entity, while
|
||||
/// a world transform is the position relative to the World. When wanting to move an entity,
|
||||
/// you should use its [`Transform`]. You cannot mutate [`WorldTransform`] as its managed completey
|
||||
/// by the [`system_update_world_transforms`] system. For the WorldTransform to work properly, you
|
||||
/// must have both a [`Transform`] and [`WorldTransform`] on the entities in the scene.
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Default, Component, Reflect)]
|
||||
pub struct WorldTransform(pub(crate) Transform);
|
||||
|
||||
impl Deref for WorldTransform {
|
||||
type Target = Transform;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Transform> for WorldTransform {
|
||||
fn from(value: Transform) -> Self {
|
||||
Self(value)
|
||||
}
|
||||
}
|
||||
|
||||
/// A system that updates the [`WorldTransform`]'s for entities and their children.
|
||||
///
|
||||
/// For entities without parents, this will update world transform to match local transform.
|
||||
/// For any children entities, their [`WorldTransform`]s will be updated to reflect the changes
|
||||
/// of its parent entity.
|
||||
pub fn system_update_world_transforms(world: &World, view: View<(Entities, &mut WorldTransform, &Transform, Option<&ResHandle<SceneGraph>>), Not<Has<RelationOriginComponent<ChildOf>>>>) -> anyhow::Result<()> {
|
||||
for (en, mut world_tran, tran, scene) in view.into_iter() {
|
||||
world_tran.0 = *tran;
|
||||
recurse_update_trans(world, &world_tran, en)?;
|
||||
|
||||
// if there was a scene, update it as well
|
||||
if let Some(scene) = scene {
|
||||
if let Some(scene) = scene.data_ref() {
|
||||
let sworld = &scene.world;
|
||||
let view = sworld.filtered_view::<(Entities, &mut WorldTransform, &Transform, Option<&ResHandle<SceneGraph>>), Not<Has<RelationOriginComponent<ChildOf>>>>();
|
||||
system_update_world_transforms(&scene.world, view)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn recurse_update_trans(world: &World, parent_transform: &WorldTransform, entity: Entity) -> anyhow::Result<()> {
|
||||
// store entities and their world transform to process outside of the view.
|
||||
// it must be done after to avoid attempts of multiple mutable borrows to the archetype column
|
||||
// with WorldTransform.
|
||||
let mut next_entities = vec![];
|
||||
|
||||
for ((en, mut world_tran, tran, scene), _) in
|
||||
world.view::<(Entities, &mut WorldTransform, &Transform, Option<&ResHandle<SceneGraph>>)>()
|
||||
.relates_to::<ChildOf>(entity)
|
||||
.into_iter()
|
||||
{
|
||||
world_tran.0 = *tran;
|
||||
world_tran.0.translation = parent_transform.0.translation + tran.translation;
|
||||
next_entities.push((en, world_tran.0.clone()));
|
||||
|
||||
// if there was a scene, update it as well
|
||||
if let Some(scene) = scene {
|
||||
if let Some(scene) = scene.data_ref() {
|
||||
let sworld = &scene.world;
|
||||
let view = sworld.filtered_view::<(Entities, &mut WorldTransform, &Transform, Option<&ResHandle<SceneGraph>>), Not<Has<RelationOriginComponent<ChildOf>>>>();
|
||||
system_update_world_transforms(&scene.world, view)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (en, pos) in next_entities.into_iter() {
|
||||
recurse_update_trans(world, &WorldTransform(pos), en)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use lyra_ecs::{query::{filter::{Has, Not}, Entities}, relation::{ChildOf, RelationOriginComponent}, World};
|
||||
use lyra_math::Transform;
|
||||
use lyra_resource::ResHandle;
|
||||
|
||||
use crate::{system_update_world_transforms, SceneGraph, WorldTransform};
|
||||
|
||||
#[test]
|
||||
fn test_system() {
|
||||
let mut world = World::new();
|
||||
|
||||
let parent = world.spawn((WorldTransform::default(), Transform::from_xyz(10.0, 10.0, 10.0)));
|
||||
let child = world.spawn((WorldTransform::default(), Transform::from_xyz(15.0, 15.0, 15.0)));
|
||||
world.add_relation(child, ChildOf, parent);
|
||||
|
||||
let view = world.filtered_view::<(Entities, &mut WorldTransform, &Transform, Option<&ResHandle<SceneGraph>>), Not<Has<RelationOriginComponent<ChildOf>>>>();
|
||||
system_update_world_transforms(&world, view).unwrap();
|
||||
|
||||
let g = world.view_one::<&WorldTransform>(child).get().unwrap();
|
||||
assert_eq!(**g, Transform::from_xyz(25.0, 25.0, 25.0));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_system_many_entities() {
|
||||
let mut world = World::new();
|
||||
|
||||
let parent = world.spawn((WorldTransform::default(), Transform::from_xyz(10.0, 10.0, 10.0)));
|
||||
|
||||
let mut children = vec![];
|
||||
let mut base_offset = 15.0;
|
||||
for _ in 0..10 {
|
||||
let en = world.spawn((WorldTransform::default(), Transform::from_xyz(base_offset, base_offset, base_offset)));
|
||||
world.add_relation(en, ChildOf, parent);
|
||||
base_offset += 10.0;
|
||||
children.push(en);
|
||||
}
|
||||
|
||||
let second_child = world.spawn((WorldTransform::default(), Transform::from_xyz(5.0, 3.0, 8.0)));
|
||||
world.add_relation(second_child, ChildOf, parent);
|
||||
|
||||
let view = world.filtered_view::<(Entities, &mut WorldTransform, &Transform, Option<&ResHandle<SceneGraph>>), Not<Has<RelationOriginComponent<ChildOf>>>>();
|
||||
system_update_world_transforms(&world, view).unwrap();
|
||||
|
||||
let mut base_offset = 25.0;
|
||||
for child in children.into_iter() {
|
||||
let g = world.view_one::<&WorldTransform>(child).get().unwrap();
|
||||
println!("Child {:?} at {:?}", child, g.translation);
|
||||
assert_eq!(**g, Transform::from_xyz(base_offset, base_offset, base_offset));
|
||||
|
||||
base_offset += 10.0;
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_system_many_children() {
|
||||
let mut world = World::new();
|
||||
|
||||
let parent = world.spawn((WorldTransform::default(), Transform::from_xyz(10.0, 10.0, 10.0)));
|
||||
let first_child = world.spawn((WorldTransform::default(), Transform::from_xyz(15.0, 15.0, 15.0)));
|
||||
world.add_relation(first_child, ChildOf, parent);
|
||||
|
||||
let sec_chi = world.spawn((WorldTransform::default(), Transform::from_xyz(155.0, 23.0, 6.0)));
|
||||
world.add_relation(sec_chi, ChildOf, first_child);
|
||||
|
||||
let thir_chi = world.spawn((WorldTransform::default(), Transform::from_xyz(51.0, 85.0, 17.0)));
|
||||
world.add_relation(thir_chi, ChildOf, sec_chi);
|
||||
|
||||
let four_child = world.spawn((WorldTransform::default(), Transform::from_xyz(24.0, 61.0, 65.0)));
|
||||
world.add_relation(four_child, ChildOf, thir_chi);
|
||||
|
||||
let five_child = world.spawn((WorldTransform::default(), Transform::from_xyz(356.0, 54.0, 786.0)));
|
||||
world.add_relation(five_child, ChildOf, four_child);
|
||||
|
||||
let view = world.filtered_view::<(Entities, &mut WorldTransform, &Transform, Option<&ResHandle<SceneGraph>>), Not<Has<RelationOriginComponent<ChildOf>>>>();
|
||||
system_update_world_transforms(&world, view).unwrap();
|
||||
|
||||
let g = world.view_one::<&WorldTransform>(five_child).get().unwrap();
|
||||
assert_eq!(**g, Transform::from_xyz(611.0, 248.0, 899.0));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_system_branched_children() {
|
||||
let mut world = World::new();
|
||||
|
||||
let parent = world.spawn((WorldTransform::default(), Transform::from_xyz(10.0, 10.0, 10.0)));
|
||||
let first_child = world.spawn((WorldTransform::default(), Transform::from_xyz(15.0, 15.0, 15.0)));
|
||||
world.add_relation(first_child, ChildOf, parent);
|
||||
|
||||
let sec_chi = world.spawn((WorldTransform::default(), Transform::from_xyz(155.0, 23.0, 6.0)));
|
||||
world.add_relation(sec_chi, ChildOf, first_child);
|
||||
|
||||
let thir_chi = world.spawn((WorldTransform::default(), Transform::from_xyz(51.0, 85.0, 17.0)));
|
||||
world.add_relation(thir_chi, ChildOf, first_child);
|
||||
|
||||
let four_child = world.spawn((WorldTransform::default(), Transform::from_xyz(24.0, 61.0, 65.0)));
|
||||
world.add_relation(four_child, ChildOf, thir_chi);
|
||||
|
||||
let five_child = world.spawn((WorldTransform::default(), Transform::from_xyz(356.0, 54.0, 786.0)));
|
||||
world.add_relation(five_child, ChildOf, sec_chi);
|
||||
|
||||
let view = world.filtered_view::<(Entities, &mut WorldTransform, &Transform, Option<&ResHandle<SceneGraph>>), Not<Has<RelationOriginComponent<ChildOf>>>>();
|
||||
system_update_world_transforms(&world, view).unwrap();
|
||||
|
||||
let g = world.view_one::<&WorldTransform>(five_child).get().unwrap();
|
||||
assert_eq!(**g, Transform::from_xyz(536.0, 102.0, 817.0));
|
||||
|
||||
let g = world.view_one::<&WorldTransform>(thir_chi).get().unwrap();
|
||||
assert_eq!(**g, Transform::from_xyz(76.0, 110.0, 42.0));
|
||||
|
||||
let g = world.view_one::<&WorldTransform>(four_child).get().unwrap();
|
||||
assert_eq!(**g, Transform::from_xyz(100.0, 171.0, 107.0));
|
||||
}
|
||||
}
|
|
@ -1 +0,0 @@
|
|||
Subproject commit a761f4094bc18190285b4687ec804161fea874b6
|
|
@ -1,143 +0,0 @@
|
|||
use syn::{parenthesized, token, Token};
|
||||
|
||||
pub(crate) enum FieldType {
|
||||
Unknown,
|
||||
Type(syn::Path),
|
||||
Wrapped(syn::Path),
|
||||
}
|
||||
|
||||
impl FieldType {
|
||||
pub fn is_unknown(&self) -> bool {
|
||||
matches!(self, FieldType::Unknown)
|
||||
}
|
||||
|
||||
pub fn is_wrapped(&self) -> bool {
|
||||
matches!(self, FieldType::Wrapped(_))
|
||||
}
|
||||
|
||||
pub fn get_type_path(&self) -> Option<&syn::Path> {
|
||||
match self {
|
||||
FieldType::Unknown => None,
|
||||
FieldType::Type(path) => Some(path),
|
||||
FieldType::Wrapped(path) => Some(path),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) struct Field {
|
||||
pub field: syn::Ident,
|
||||
pub field_ty: FieldType,
|
||||
pub skip_setter: bool,
|
||||
pub setter: Option<syn::Block>,
|
||||
pub getter: Option<syn::Block>,
|
||||
}
|
||||
|
||||
impl Field {
|
||||
fn parse_extended(input: syn::parse::ParseStream) -> syn::Result<Self> {
|
||||
let field_name = input.parse()?;
|
||||
|
||||
let fty = if input.peek(Token![:]) {
|
||||
let _col: Token![:] = input.parse()?;
|
||||
let s: syn::Path = input.parse()?;
|
||||
let mut fty = FieldType::Type(s.clone());
|
||||
|
||||
if let Some(ident) = s.get_ident() {
|
||||
if ident.to_string() == "wrap" {
|
||||
let content;
|
||||
let _parens: token::Paren = parenthesized!(content in input);
|
||||
fty = FieldType::Wrapped(content.parse()?);
|
||||
}
|
||||
}
|
||||
|
||||
fty
|
||||
} else {
|
||||
FieldType::Unknown
|
||||
};
|
||||
|
||||
let mut s = Self {
|
||||
field: field_name,
|
||||
field_ty: fty,
|
||||
skip_setter: false,
|
||||
setter: None,
|
||||
getter: None,
|
||||
};
|
||||
|
||||
while input.peek(Token![,]) {
|
||||
let _: Token![,] = input.parse()?;
|
||||
|
||||
if input.peek(syn::Ident) {
|
||||
let ident: syn::Ident = input.parse()?;
|
||||
let ident_str = ident.to_string();
|
||||
let ident_str = ident_str.as_str();
|
||||
|
||||
match ident_str {
|
||||
"skip_set" => {
|
||||
s.skip_setter = true;
|
||||
}
|
||||
"set" => {
|
||||
let _eq: Token![=] = input.parse()?;
|
||||
|
||||
s.setter = Some(input.parse()?);
|
||||
}
|
||||
"get" => {
|
||||
let _eq: Token![=] = input.parse()?;
|
||||
|
||||
s.getter = Some(input.parse()?);
|
||||
}
|
||||
_ => {
|
||||
return Err(syn::Error::new_spanned(ident, "unknown wrapper command"));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (s.getter.is_some() || s.setter.is_some()) && s.field_ty.is_wrapped() {
|
||||
return Err(syn::Error::new(
|
||||
input.span(),
|
||||
"cannot specify custom getter or setter \
|
||||
with wrapped type",
|
||||
));
|
||||
}
|
||||
|
||||
Ok(s)
|
||||
}
|
||||
}
|
||||
|
||||
impl syn::parse::Parse for Field {
|
||||
fn parse(input: syn::parse::ParseStream) -> syn::Result<Self> {
|
||||
if input.peek(token::Paren) {
|
||||
let content;
|
||||
let _parens: token::Paren = parenthesized!(content in input);
|
||||
|
||||
Self::parse_extended(&content)
|
||||
} else {
|
||||
let field_name = input.parse()?;
|
||||
|
||||
let fty = if input.peek(Token![:]) {
|
||||
let _col: Token![:] = input.parse()?;
|
||||
let s: syn::Path = input.parse()?;
|
||||
let mut fty = FieldType::Type(s.clone());
|
||||
|
||||
if let Some(ident) = s.get_ident() {
|
||||
if ident.to_string() == "wrap" {
|
||||
let content;
|
||||
let _parens: token::Paren = parenthesized!(content in input);
|
||||
fty = FieldType::Wrapped(content.parse()?);
|
||||
}
|
||||
}
|
||||
|
||||
fty
|
||||
} else {
|
||||
FieldType::Unknown
|
||||
};
|
||||
|
||||
Ok(Self {
|
||||
field: field_name,
|
||||
field_ty: fty,
|
||||
skip_setter: false,
|
||||
setter: None,
|
||||
getter: None,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,269 +0,0 @@
|
|||
use quote::{format_ident, quote};
|
||||
use syn::{braced, parenthesized, parse_macro_input, token, Ident, Path, Token};
|
||||
|
||||
pub(crate) struct FieldGetter {
|
||||
pub field: Ident,
|
||||
pub body: Option<syn::Block>,
|
||||
pub wrapper_type: Option<syn::Path>,
|
||||
}
|
||||
|
||||
impl FieldGetter {
|
||||
fn parse_extended(input: syn::parse::ParseStream) -> syn::Result<Self> {
|
||||
let field_name = input.parse()?;
|
||||
|
||||
let mut s = Self {
|
||||
field: field_name,
|
||||
body: None,
|
||||
wrapper_type: None
|
||||
};
|
||||
|
||||
while input.peek(Token![,]) {
|
||||
let _: Token![,] = input.parse()?;
|
||||
|
||||
if input.peek(syn::Ident) {
|
||||
let ident: syn::Ident = input.parse()?;
|
||||
let ident_str = ident.to_string();
|
||||
let ident_str = ident_str.as_str();
|
||||
|
||||
match ident_str {
|
||||
"wrapper" => {
|
||||
let _eq: Token![=] = input.parse()?;
|
||||
|
||||
let name: Path = input.parse()?;
|
||||
s.wrapper_type = Some(name);
|
||||
},
|
||||
_ => {
|
||||
return Err(syn::Error::new_spanned(ident, "unknown wrapper command"));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if let Ok(block) = input.parse::<syn::Block>() {
|
||||
s.body = Some(block);
|
||||
}
|
||||
}
|
||||
|
||||
if s.body.is_some() && s.wrapper_type.is_some() {
|
||||
return Err(syn::Error::new(input.span(), "cannot use body and wrapper_type, choose one"));
|
||||
}
|
||||
|
||||
Ok(s)
|
||||
}
|
||||
}
|
||||
|
||||
impl syn::parse::Parse for FieldGetter {
|
||||
fn parse(input: syn::parse::ParseStream) -> syn::Result<Self> {
|
||||
if input.peek(token::Paren) {
|
||||
let content;
|
||||
let _parens: token::Paren = parenthesized!(content in input);
|
||||
|
||||
Self::parse_extended(&content)
|
||||
} else {
|
||||
let field_name = input.parse()?;
|
||||
Ok(Self {
|
||||
field: field_name,
|
||||
body: None,
|
||||
wrapper_type: None,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct HandleWrapUsage {
|
||||
type_path: syn::Path,
|
||||
/// The extra derives of the type.
|
||||
override_name: Option<Ident>,
|
||||
field_getters: Vec<FieldGetter>,
|
||||
|
||||
extra_builds: Option<syn::Block>,
|
||||
}
|
||||
|
||||
impl syn::parse::Parse for HandleWrapUsage {
|
||||
fn parse(input: syn::parse::ParseStream) -> syn::Result<Self> {
|
||||
let type_path: syn::Path = input.parse()?;
|
||||
let mut s = Self {
|
||||
type_path,
|
||||
override_name: None,
|
||||
field_getters: vec![],
|
||||
extra_builds: None,
|
||||
};
|
||||
|
||||
while input.peek(Token![,]) {
|
||||
let _: Token![,] = input.parse()?;
|
||||
|
||||
if input.peek(syn::Ident) {
|
||||
let ident: syn::Ident = input.parse()?;
|
||||
let ident_str = ident.to_string();
|
||||
let ident_str = ident_str.as_str();
|
||||
|
||||
match ident_str {
|
||||
"name" => {
|
||||
let _eq: Token![=] = input.parse()?;
|
||||
|
||||
s.override_name = Some(input.parse()?);
|
||||
},
|
||||
"field_getters" => {
|
||||
let _eq: Token![=] = input.parse()?;
|
||||
|
||||
if input.peek(token::Brace) {
|
||||
let content;
|
||||
let _braced: token::Brace = braced!(content in input);
|
||||
|
||||
let terminated = content.parse_terminated(FieldGetter::parse, Token![,])?;
|
||||
s.field_getters = terminated.into_iter().collect();
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
return Err(syn::Error::new_spanned(ident, "unknown wrapper command"));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if let Ok(block) = input.parse::<syn::Block>() {
|
||||
s.extra_builds = Some(block);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(s)
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn lua_wrap_handle_impl(input: proc_macro::TokenStream) -> proc_macro::TokenStream {
|
||||
let input = parse_macro_input!(input as HandleWrapUsage);
|
||||
|
||||
let handle_path = &input.type_path;
|
||||
let handle_name = &input.type_path.segments.last().unwrap().ident;
|
||||
|
||||
let base_name = input.override_name.unwrap_or_else(|| handle_name.clone());
|
||||
let wrapper_name = format_ident!("Lua{}Handle", base_name);
|
||||
//let wrapper_name = Ident::new(&format!("Lua{}", handle_name.to_string()), handle_name.span());
|
||||
let ud_name = format!("{}Handle", base_name.to_string());
|
||||
|
||||
let extras = input.extra_builds;
|
||||
|
||||
let custom_getters = input.field_getters.iter().map(|g| {
|
||||
let field = &g.field;
|
||||
|
||||
let field_creator = match &g.wrapper_type {
|
||||
Some(wrap) => {
|
||||
quote!(#wrap(data.#field.clone()).into_lua(lua))
|
||||
},
|
||||
None => match &g.body {
|
||||
Some(body) => {
|
||||
quote!(#body)
|
||||
},
|
||||
None => {
|
||||
quote!(data.#field.clone().into_lua(lua))
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
quote! {
|
||||
fields.add_field_method_get(stringify!($field), |lua, this| {
|
||||
if let Some(data) = this.0.data_ref() {
|
||||
#field_creator
|
||||
} else {
|
||||
Ok(mlua::Value::Nil)
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
quote! {
|
||||
#[derive(Clone, Reflect)]
|
||||
pub struct #wrapper_name(pub ResHandle<#handle_path>);
|
||||
|
||||
impl Deref for #wrapper_name {
|
||||
type Target = ResHandle<#handle_path>;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ResHandle<#handle_path>> for #wrapper_name {
|
||||
fn from(value: ResHandle<#handle_path>) -> Self {
|
||||
#wrapper_name(value)
|
||||
}
|
||||
}
|
||||
|
||||
impl mlua::UserData for #wrapper_name {
|
||||
fn add_fields<F: mlua::UserDataFields<Self>>(fields: &mut F) {
|
||||
fields.add_field_method_get("path", |_, this| Ok(this.path()));
|
||||
fields.add_field_method_get("version", |_, this| Ok(this.version()));
|
||||
fields.add_field_method_get("uuid", |_, this| Ok(this.uuid().to_string()));
|
||||
fields.add_field_method_get("state", |_, this| {
|
||||
let name = if this.is_loaded() {
|
||||
"ready"
|
||||
} else if this.get_error().is_some() {
|
||||
"error"
|
||||
} else { "loading" };
|
||||
|
||||
Ok(name)
|
||||
});
|
||||
|
||||
#(#custom_getters)*
|
||||
}
|
||||
|
||||
fn add_methods<M: mlua::UserDataMethods<Self>>(methods: &mut M) {
|
||||
methods.add_method("is_watched", |_, this, ()| {
|
||||
Ok(this.is_watched())
|
||||
});
|
||||
|
||||
methods.add_method("is_loaded", |_, this, ()| {
|
||||
Ok(this.is_loaded())
|
||||
});
|
||||
|
||||
methods.add_method("is_loaded", |_, this, ()| {
|
||||
Ok(this.is_loaded())
|
||||
});
|
||||
|
||||
methods.add_method("wait_until_loaded", |_, this, ()| {
|
||||
this.wait_recurse_dependencies_load();
|
||||
|
||||
Ok(())
|
||||
});
|
||||
|
||||
methods.add_function(FN_NAME_INTERNAL_REFLECT_TYPE, |_, ()| {
|
||||
Ok(ScriptBorrow::from_component::<ResHandle<#handle_path>>(None))
|
||||
});
|
||||
methods.add_method(FN_NAME_INTERNAL_REFLECT, |_, this, ()| {
|
||||
Ok(ScriptBorrow::from_component(Some(this.0.clone())))
|
||||
});
|
||||
|
||||
#extras
|
||||
}
|
||||
}
|
||||
|
||||
impl mlua::FromLua for #wrapper_name {
|
||||
fn from_lua(val: mlua::Value, _: &mlua::Lua) -> mlua::Result<Self> {
|
||||
let tyname = val.type_name();
|
||||
let ud = val.as_userdata()
|
||||
.ok_or(mlua::Error::external(crate::lua::Error::type_mismatch(#ud_name, &tyname)))?;
|
||||
let ud = ud.borrow::<#wrapper_name>()?;
|
||||
|
||||
Ok(ud.clone())
|
||||
}
|
||||
}
|
||||
|
||||
impl LuaWrapper for #wrapper_name {
|
||||
type Wrap = ResHandle<#handle_path>;
|
||||
|
||||
fn wrapped_type_id() -> std::any::TypeId {
|
||||
TypeId::of::<ResHandle<#handle_path>>()
|
||||
}
|
||||
|
||||
fn into_wrapped(self) -> Self::Wrap {
|
||||
self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl LuaHandleWrapper for #wrapper_name {
|
||||
type ResourceType = #handle_path;
|
||||
|
||||
fn from_handle(handle: ResHandle<Self::ResourceType>) -> Self {
|
||||
Self(handle)
|
||||
}
|
||||
}
|
||||
}.into()
|
||||
}
|
|
@ -1,64 +0,0 @@
|
|||
use handle_macro::lua_wrap_handle_impl;
|
||||
use lua_macro::wrap_lua_struct_impl;
|
||||
use quote::quote;
|
||||
use syn::{parse_macro_input, Token};
|
||||
|
||||
mod mat_wrapper;
|
||||
mod vec_wrapper;
|
||||
use to_lua_macro::to_lua_struct_impl;
|
||||
use vec_wrapper::VecWrapper;
|
||||
|
||||
mod lua_macro;
|
||||
mod to_lua_macro;
|
||||
mod field;
|
||||
|
||||
mod handle_macro;
|
||||
|
||||
pub(crate) const FN_NAME_INTERNAL_REFLECT_TYPE: &str = "__lyra_internal_reflect_type";
|
||||
pub(crate) const FN_NAME_INTERNAL_REFLECT: &str = "__lyra_internal_reflect";
|
||||
|
||||
#[proc_macro]
|
||||
pub fn wrap_lua_struct(input: proc_macro::TokenStream) -> proc_macro::TokenStream {
|
||||
wrap_lua_struct_impl(input)
|
||||
}
|
||||
|
||||
#[proc_macro]
|
||||
pub fn lua_wrap_handle(input: proc_macro::TokenStream) -> proc_macro::TokenStream {
|
||||
lua_wrap_handle_impl(input)
|
||||
}
|
||||
|
||||
#[proc_macro]
|
||||
pub fn to_lua_convert(input: proc_macro::TokenStream) -> proc_macro::TokenStream {
|
||||
to_lua_struct_impl(input)
|
||||
}
|
||||
|
||||
pub(crate) struct VecExtensionInputs {
|
||||
#[allow(dead_code)]
|
||||
pub type_path: syn::Path,
|
||||
pub wrapper_ident: syn::Ident,
|
||||
}
|
||||
|
||||
impl syn::parse::Parse for VecExtensionInputs {
|
||||
fn parse(input: syn::parse::ParseStream) -> syn::Result<Self> {
|
||||
let type_path: syn::Path = input.parse()?;
|
||||
let _comma: Token![,] = input.parse()?;
|
||||
let wrapper_ident: syn::Ident = input.parse()?;
|
||||
|
||||
Ok(Self {
|
||||
type_path,
|
||||
wrapper_ident
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[proc_macro]
|
||||
pub fn lua_vec_wrap_extension(input: proc_macro::TokenStream) -> proc_macro::TokenStream {
|
||||
let input = parse_macro_input!(input as VecExtensionInputs);
|
||||
|
||||
let wrapper = VecWrapper;
|
||||
let method_tokens = wrapper.to_method_tokens(&input.wrapper_ident);
|
||||
|
||||
quote! {
|
||||
#method_tokens
|
||||
}.into()
|
||||
}
|
|
@ -1,627 +0,0 @@
|
|||
use proc_macro2::Span;
|
||||
use quote::quote;
|
||||
use syn::{
|
||||
braced, parenthesized, parse_macro_input, punctuated::Punctuated, token, Ident, Path, Token,
|
||||
};
|
||||
|
||||
use crate::{field::{Field, FieldType}, FN_NAME_INTERNAL_REFLECT, FN_NAME_INTERNAL_REFLECT_TYPE};
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq)]
|
||||
enum SkipType {
|
||||
/// Skips implementing
|
||||
LuaReflect,
|
||||
LuaWrapper,
|
||||
}
|
||||
|
||||
impl syn::parse::Parse for SkipType {
|
||||
fn parse(input: syn::parse::ParseStream) -> syn::Result<Self> {
|
||||
let name: Ident = input.parse()?;
|
||||
let name_str = name.to_string();
|
||||
|
||||
match name_str.as_str() {
|
||||
"lua_reflect" => Ok(Self::LuaReflect),
|
||||
"lua_wrapper" => Ok(Self::LuaWrapper),
|
||||
_ => Err(syn::Error::new_spanned(name, "unknown skip type")),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) struct MetaMethod {
|
||||
name: Ident,
|
||||
// If empty, assume `Self`
|
||||
arg: Vec<Ident>,
|
||||
}
|
||||
|
||||
impl MetaMethod {
|
||||
/// Returns a boolean if an identifier is a lua wrapper, and therefore also userdata
|
||||
fn is_arg_wrapper(ident: &Ident) -> bool {
|
||||
let s = ident.to_string();
|
||||
s.starts_with("Lua")
|
||||
}
|
||||
|
||||
/// Returns a boolean indiciating if the metamethod has takes in any arguments
|
||||
fn does_metamethod_have_arg(metamethod: &Ident) -> bool {
|
||||
let mm_str = metamethod.to_string();
|
||||
let mm_str = mm_str.as_str();
|
||||
match mm_str {
|
||||
"Add" | "Sub" | "Div" | "Mul" | "Mod" | "Eq" | "Shl" | "Shr" | "BAnd" | "BOr"
|
||||
| "BXor" => true,
|
||||
"Unm" | "BNot" | "ToString" => false,
|
||||
_ => todo!(),
|
||||
}
|
||||
}
|
||||
|
||||
/// returns the tokens of the body of the metamethod
|
||||
///
|
||||
/// Parameters
|
||||
/// * `metamethod` - The ident of the metamethod that is being implemented.
|
||||
/// * `other` - The tokens of the argument used in the metamethod.
|
||||
fn get_method_body(
|
||||
metamethod: &Ident,
|
||||
other: proc_macro2::TokenStream,
|
||||
) -> proc_macro2::TokenStream {
|
||||
let mm_str = metamethod.to_string();
|
||||
let mm_str = mm_str.as_str();
|
||||
match mm_str {
|
||||
"Add" | "Sub" | "Div" | "Mul" | "Mod" => {
|
||||
let symbol = match mm_str {
|
||||
"Add" => quote!(+),
|
||||
"Sub" => quote!(-),
|
||||
"Div" => quote!(/),
|
||||
"Mul" => quote!(*),
|
||||
"Mod" => quote!(%),
|
||||
_ => unreachable!(), // the string was just checked to be one of these
|
||||
};
|
||||
|
||||
quote! {
|
||||
Ok(Self(this.0 #symbol #other))
|
||||
}
|
||||
}
|
||||
"Unm" => {
|
||||
quote! {
|
||||
Ok(Self(-this.0))
|
||||
}
|
||||
}
|
||||
"Eq" => {
|
||||
quote! {
|
||||
Ok(this.0 == #other)
|
||||
}
|
||||
}
|
||||
"Shl" => {
|
||||
quote! {
|
||||
Ok(Self(this.0 << #other))
|
||||
}
|
||||
}
|
||||
"Shr" => {
|
||||
quote! {
|
||||
Ok(Self(this.0 >> #other))
|
||||
}
|
||||
}
|
||||
"BAnd" | "BOr" | "BXor" => {
|
||||
let symbol = match mm_str {
|
||||
"BAnd" => {
|
||||
quote!(&)
|
||||
}
|
||||
"BOr" => {
|
||||
quote!(|)
|
||||
}
|
||||
"BXor" => {
|
||||
quote!(^)
|
||||
}
|
||||
_ => unreachable!(), // the string was just checked to be one of these
|
||||
};
|
||||
|
||||
quote! {
|
||||
Ok(Self(this.0 #symbol #other))
|
||||
}
|
||||
}
|
||||
"BNot" => {
|
||||
quote! {
|
||||
Ok(Self(!this.0))
|
||||
}
|
||||
}
|
||||
"ToString" => {
|
||||
quote! {
|
||||
Ok(format!("{:?}", this.0))
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
syn::Error::new_spanned(metamethod, "unsupported auto implementation of metamethod")
|
||||
.to_compile_error()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn get_body_for_arg(
|
||||
mt_ident: &Ident,
|
||||
arg_ident: &Ident,
|
||||
arg_param: proc_macro2::TokenStream,
|
||||
) -> proc_macro2::TokenStream {
|
||||
let other: proc_macro2::TokenStream = if Self::is_arg_wrapper(arg_ident) {
|
||||
// Lua wrappers must be dereferenced
|
||||
quote! {
|
||||
#arg_param.0
|
||||
}
|
||||
} else {
|
||||
quote! {
|
||||
#arg_param
|
||||
}
|
||||
};
|
||||
Self::get_method_body(&mt_ident, other)
|
||||
}
|
||||
|
||||
pub fn to_tokens(&self, wrapper_ident: &Ident) -> proc_macro2::TokenStream {
|
||||
let wrapped_str = &wrapper_ident.to_string()[3..]; // removes starting 'Lua' from name
|
||||
let mt_ident = &self.name;
|
||||
let mt_lua_name = mt_ident.to_string().to_lowercase();
|
||||
|
||||
if self.arg.is_empty() {
|
||||
let other = quote! {
|
||||
v.0
|
||||
};
|
||||
let body = Self::get_method_body(&self.name, other);
|
||||
|
||||
if Self::does_metamethod_have_arg(&self.name) {
|
||||
quote! {
|
||||
methods.add_meta_method(mlua::MetaMethod::#mt_ident, |_, this, (v,): (#wrapper_ident,)| {
|
||||
#body
|
||||
});
|
||||
}
|
||||
} else {
|
||||
quote! {
|
||||
methods.add_meta_method(mlua::MetaMethod::#mt_ident, |_, this, ()| {
|
||||
#body
|
||||
});
|
||||
}
|
||||
}
|
||||
} else if self.arg.len() == 1 {
|
||||
let first = self.arg.iter().next().unwrap();
|
||||
let body = Self::get_body_for_arg(&self.name, first, quote!(v));
|
||||
|
||||
quote! {
|
||||
methods.add_meta_method(mlua::MetaMethod::#mt_ident, |_, this, (v,): (#first,)| {
|
||||
#body
|
||||
});
|
||||
}
|
||||
} else {
|
||||
// an optional match arm that matches mlua::Value:Number
|
||||
let number_arm = {
|
||||
let num_ident = self.arg.iter().find(|i| {
|
||||
let is = i.to_string();
|
||||
let is = is.as_str();
|
||||
match is {
|
||||
"u8" | "u16" | "u32" | "u64" | "u128" | "i8" | "i16" | "i32" | "i64"
|
||||
| "i128" | "f32" | "f64" => true,
|
||||
_ => false,
|
||||
}
|
||||
});
|
||||
|
||||
if let Some(num_ident) = num_ident {
|
||||
let body =
|
||||
Self::get_body_for_arg(&self.name, num_ident, quote!(n as #num_ident));
|
||||
|
||||
quote! {
|
||||
mlua::Value::Number(n) => {
|
||||
#body
|
||||
},
|
||||
}
|
||||
} else {
|
||||
quote!()
|
||||
}
|
||||
};
|
||||
|
||||
let userdata_arm = {
|
||||
let wrappers: Vec<&Ident> = self
|
||||
.arg
|
||||
.iter()
|
||||
.filter(|i| Self::is_arg_wrapper(i))
|
||||
.collect();
|
||||
|
||||
let if_statements = wrappers.iter().map(|i| {
|
||||
let body = Self::get_method_body(&self.name, quote!(other.0));
|
||||
|
||||
quote! {
|
||||
if let Ok(other) = ud.borrow::<#i>() {
|
||||
#body
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
quote! {
|
||||
mlua::Value::UserData(ud) => {
|
||||
#(#if_statements else)*
|
||||
// this is the body of the else statement
|
||||
{
|
||||
// try to get the name of the userdata for the error message
|
||||
if let Ok(mt) = ud.metatable() {
|
||||
if let Ok(name) = mt.get::<String>("__name") {
|
||||
return Err(mlua::Error::BadArgument {
|
||||
to: Some(format!("{}.__{}", #wrapped_str, #mt_lua_name)),
|
||||
pos: 2,
|
||||
name: Some("rhs".to_string()),
|
||||
cause: std::sync::Arc::new(mlua::Error::runtime(
|
||||
format!("cannot multiply with unknown userdata named {}", name)
|
||||
))
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
Err(mlua::Error::BadArgument {
|
||||
to: Some(format!("{}.__{}", #wrapped_str, #mt_lua_name)),
|
||||
pos: 2,
|
||||
name: Some("rhs".to_string()),
|
||||
cause: std::sync::Arc::new(
|
||||
mlua::Error::runtime("cannot multiply with unknown userdata")
|
||||
)
|
||||
})
|
||||
}
|
||||
},
|
||||
}
|
||||
};
|
||||
|
||||
quote! {
|
||||
methods.add_meta_method(mlua::MetaMethod::#mt_ident, |_, this, (v,): (mlua::Value,)| {
|
||||
match v {
|
||||
#number_arm
|
||||
#userdata_arm
|
||||
_ => Err(mlua::Error::BadArgument {
|
||||
to: Some(format!("{}.__{}", #wrapped_str, #mt_lua_name)),
|
||||
pos: 2,
|
||||
name: Some("rhs".to_string()),
|
||||
cause: std::sync::Arc::new(
|
||||
mlua::Error::runtime(format!("cannot multiply with {}", v.type_name()))
|
||||
)
|
||||
})
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl syn::parse::Parse for MetaMethod {
|
||||
fn parse(input: syn::parse::ParseStream) -> syn::Result<Self> {
|
||||
let name: Ident = input.parse()?;
|
||||
|
||||
let mut s = Self { name, arg: vec![] };
|
||||
|
||||
// try to parse args
|
||||
if input.peek(syn::token::Paren) {
|
||||
let content;
|
||||
let _parens: syn::token::Paren = parenthesized!(content in input);
|
||||
|
||||
let arg: Punctuated<Ident, Token![,]> =
|
||||
content.parse_terminated(Ident::parse, Token![,])?;
|
||||
s.arg = arg.into_iter().collect(); // convert to Vec<Ident>
|
||||
}
|
||||
|
||||
Ok(s)
|
||||
}
|
||||
}
|
||||
|
||||
struct WrapUsage {
|
||||
type_path: syn::Path,
|
||||
/// The extra derives of the type.
|
||||
override_name: Option<Ident>,
|
||||
auto_fields: Vec<Field>,
|
||||
auto_derives: Vec<Ident>,
|
||||
auto_new: bool,
|
||||
meta_methods: Vec<MetaMethod>,
|
||||
skips: Vec<SkipType>,
|
||||
|
||||
extra_fields: Option<syn::Block>,
|
||||
extra_methods: Option<syn::Block>,
|
||||
}
|
||||
|
||||
impl syn::parse::Parse for WrapUsage {
|
||||
fn parse(input: syn::parse::ParseStream) -> syn::Result<Self> {
|
||||
let type_path: syn::Path = input.parse()?;
|
||||
let mut s = Self {
|
||||
type_path,
|
||||
override_name: None,
|
||||
auto_fields: vec![],
|
||||
auto_derives: vec![],
|
||||
extra_fields: None,
|
||||
extra_methods: None,
|
||||
auto_new: false,
|
||||
meta_methods: vec![],
|
||||
skips: vec![],
|
||||
};
|
||||
|
||||
let mut new_ident = None;
|
||||
|
||||
while input.peek(Token![,]) {
|
||||
let _: Token![,] = input.parse()?;
|
||||
|
||||
if input.peek(syn::Ident) {
|
||||
let ident: syn::Ident = input.parse()?;
|
||||
let ident_str = ident.to_string();
|
||||
let ident_str = ident_str.as_str();
|
||||
|
||||
match ident_str {
|
||||
"name" => {
|
||||
let _eq: Token![=] = input.parse()?;
|
||||
|
||||
let name: Ident = input.parse()?;
|
||||
s.override_name = Some(name);
|
||||
}
|
||||
"new" => {
|
||||
s.auto_new = true;
|
||||
new_ident = Some(ident.clone());
|
||||
}
|
||||
"skip" => {
|
||||
let content;
|
||||
let _parens: token::Paren = parenthesized!(content in input);
|
||||
|
||||
let terminated = content.parse_terminated(SkipType::parse, Token![,])?;
|
||||
s.skips = terminated.into_iter().collect();
|
||||
}
|
||||
"derives" => {
|
||||
if input.peek(token::Paren) {
|
||||
let content;
|
||||
let _parens: token::Paren = parenthesized!(content in input);
|
||||
|
||||
let derives: Punctuated<Ident, Token![,]> =
|
||||
content.parse_terminated(Ident::parse, Token![,])?;
|
||||
s.auto_derives = derives.into_iter().collect();
|
||||
}
|
||||
}
|
||||
"fields" => {
|
||||
let _eq: Token![=] = input.parse()?;
|
||||
|
||||
if input.peek(token::Brace) {
|
||||
let content;
|
||||
let _braced: token::Brace = braced!(content in input);
|
||||
|
||||
let terminated = content.parse_terminated(Field::parse, Token![,])?;
|
||||
s.auto_fields.extend(terminated.into_iter());
|
||||
}
|
||||
}
|
||||
"metamethods" => {
|
||||
if input.peek(token::Paren) {
|
||||
let content;
|
||||
let _bracket: token::Paren = parenthesized!(content in input);
|
||||
|
||||
let meta_methods: Punctuated<MetaMethod, Token![,]> =
|
||||
content.parse_terminated(MetaMethod::parse, Token![,])?;
|
||||
s.meta_methods = meta_methods.into_iter().collect();
|
||||
}
|
||||
}
|
||||
"extra_fields" => {
|
||||
let _eq: Token![=] = input.parse()?;
|
||||
s.extra_fields = Some(input.parse::<syn::Block>()?);
|
||||
}
|
||||
"extra_methods" => {
|
||||
let _eq: Token![=] = input.parse()?;
|
||||
s.extra_methods = Some(input.parse::<syn::Block>()?);
|
||||
}
|
||||
_ => {
|
||||
return Err(syn::Error::new_spanned(
|
||||
ident,
|
||||
format!("unknown wrapper command: '{}'", ident_str),
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if s.auto_new && s.auto_fields.is_empty() {
|
||||
return Err(syn::Error::new_spanned(
|
||||
new_ident.unwrap(),
|
||||
"must specify 'fields' when auto creating new function",
|
||||
));
|
||||
}
|
||||
|
||||
Ok(s)
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a wrapper type for a VecN from the engine math library.
|
||||
pub fn wrap_lua_struct_impl(input: proc_macro::TokenStream) -> proc_macro::TokenStream {
|
||||
let input = parse_macro_input!(input as WrapUsage);
|
||||
|
||||
let path: Path = input.type_path;
|
||||
let type_name = &path
|
||||
.segments
|
||||
.last()
|
||||
.expect("Failure to find typename in macro usage!")
|
||||
.ident;
|
||||
let wrapper_typename = input
|
||||
.override_name
|
||||
.unwrap_or_else(|| Ident::new(&format!("Lua{}", type_name), Span::call_site()));
|
||||
|
||||
let derive_idents_iter = input.auto_derives.iter();
|
||||
let extra_fields = input.extra_fields;
|
||||
let extra_methods = input.extra_methods;
|
||||
|
||||
// the tokens for the new function
|
||||
let new_func_tokens = if input.auto_new {
|
||||
let arg_names = input
|
||||
.auto_fields
|
||||
.iter()
|
||||
.map(|i| Ident::new(&i.field.to_string().to_lowercase(), Span::call_site()));
|
||||
|
||||
let arg_types = input
|
||||
.auto_fields
|
||||
.iter()
|
||||
.map(|i| i.field_ty.get_type_path().unwrap());
|
||||
|
||||
let arg_names_clone = arg_names.clone();
|
||||
let arg_types_clone = arg_types.clone();
|
||||
|
||||
quote! {
|
||||
// arguments for function are not specified since they can be implied from the call
|
||||
// to new(...)
|
||||
methods.add_function("new", |_, ( #(#arg_names_clone),* ): ( #(#arg_types_clone),* ) | {
|
||||
Ok(#wrapper_typename(#path::new( #(#arg_names),* )))
|
||||
});
|
||||
}
|
||||
} else {
|
||||
quote!()
|
||||
};
|
||||
|
||||
let meta_methods_tokens = {
|
||||
let method_tokens = input
|
||||
.meta_methods
|
||||
.iter()
|
||||
.map(|mm| mm.to_tokens(&wrapper_typename));
|
||||
|
||||
quote! {
|
||||
#(#method_tokens)*
|
||||
}
|
||||
};
|
||||
|
||||
let lua_reflects = if input.skips.contains(&SkipType::LuaReflect) {
|
||||
quote!()
|
||||
} else {
|
||||
quote! {
|
||||
methods.add_method(#FN_NAME_INTERNAL_REFLECT, |_, this, ()| {
|
||||
Ok(crate::ScriptBorrow::from_component::<#path>(Some(this.0.clone())))
|
||||
});
|
||||
|
||||
methods.add_function(#FN_NAME_INTERNAL_REFLECT_TYPE, |_, ()| {
|
||||
Ok(crate::ScriptBorrow::from_component::<#path>(None))
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
let lua_wrapper = if input.skips.contains(&SkipType::LuaWrapper) {
|
||||
quote!()
|
||||
} else {
|
||||
quote! {
|
||||
impl lyra_scripting::lua::LuaWrapper for #wrapper_typename {
|
||||
type Wrap = #path;
|
||||
|
||||
fn wrapped_type_id() -> std::any::TypeId {
|
||||
std::any::TypeId::of::<#path>()
|
||||
}
|
||||
|
||||
fn into_wrapped(self) -> Self::Wrap {
|
||||
self.0
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
for field in input.auto_fields.iter() {
|
||||
if field.field_ty.is_unknown() && !field.skip_setter {
|
||||
return syn::Error::new(
|
||||
field.field.span(),
|
||||
"missing type of field, must be specified to generate setters",
|
||||
)
|
||||
.to_compile_error()
|
||||
.into();
|
||||
}
|
||||
}
|
||||
|
||||
let fields = input.auto_fields.iter().map(|g| {
|
||||
let field = &g.field;
|
||||
|
||||
let field_getter = match &g.field_ty {
|
||||
FieldType::Wrapped(wrap) => {
|
||||
quote!(#wrap(this.#field.clone()).into_lua(lua))
|
||||
}
|
||||
_ => match &g.getter {
|
||||
Some(body) => {
|
||||
quote!(#body)
|
||||
}
|
||||
None => {
|
||||
quote!(this.#field.clone().into_lua(lua))
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
let field_setter = if g.skip_setter {
|
||||
quote! {}
|
||||
} else {
|
||||
let fty = g
|
||||
.field_ty
|
||||
.get_type_path()
|
||||
// should be unreachable due to the checks right before this closure
|
||||
.expect("no field type specified");
|
||||
let s = if g.field_ty.is_wrapped() {
|
||||
quote! {
|
||||
this.#field = #field.0.clone();
|
||||
Ok(())
|
||||
}
|
||||
} else {
|
||||
match &g.setter {
|
||||
Some(body) => {
|
||||
quote!(#body)
|
||||
}
|
||||
None => {
|
||||
quote! {
|
||||
this.#field = #field.clone();
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
quote! {
|
||||
fields.add_field_method_set(stringify!(#field), |_, this, #field: #fty| {
|
||||
#s
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
quote! {
|
||||
fields.add_field_method_get(stringify!(#field), |lua, this| {
|
||||
#field_getter
|
||||
});
|
||||
#field_setter
|
||||
}
|
||||
});
|
||||
|
||||
proc_macro::TokenStream::from(quote! {
|
||||
#[derive(Clone, lyra_reflect::Reflect, #(#derive_idents_iter),*)]
|
||||
pub struct #wrapper_typename(#[reflect(skip)] pub(crate) #path);
|
||||
|
||||
impl std::ops::Deref for #wrapper_typename {
|
||||
type Target = #path;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl std::ops::DerefMut for #wrapper_typename {
|
||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||
&mut self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl mlua::FromLua for #wrapper_typename {
|
||||
fn from_lua(value: mlua::Value, _: &mlua::Lua) -> mlua::Result<Self> {
|
||||
match value {
|
||||
mlua::Value::UserData(ud) => Ok(ud.borrow::<Self>()?.clone()),
|
||||
_ => panic!("Attempt to get {} from a {} value", stringify!(#wrapper_typename), value.type_name()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl mlua::UserData for #wrapper_typename {
|
||||
fn add_fields<F: mlua::UserDataFields<Self>>(fields: &mut F) {
|
||||
use mlua::IntoLua;
|
||||
use mlua::FromLua;
|
||||
|
||||
#(#fields)*
|
||||
|
||||
#extra_fields
|
||||
}
|
||||
|
||||
fn add_methods<M: mlua::UserDataMethods<Self>>(methods: &mut M) {
|
||||
use mlua::IntoLua;
|
||||
use mlua::FromLua;
|
||||
|
||||
#lua_reflects
|
||||
|
||||
#new_func_tokens
|
||||
#meta_methods_tokens
|
||||
#extra_methods
|
||||
}
|
||||
}
|
||||
|
||||
#lua_wrapper
|
||||
})
|
||||
}
|
|
@ -1,362 +0,0 @@
|
|||
use proc_macro2::Span;
|
||||
use syn::{braced, parenthesized, parse_macro_input, punctuated::Punctuated, token, Token};
|
||||
use quote::{quote, ToTokens};
|
||||
use crate::{field::Field, FN_NAME_INTERNAL_REFLECT, FN_NAME_INTERNAL_REFLECT_TYPE};
|
||||
|
||||
fn field_table_setter(field: &Field) -> proc_macro2::TokenStream {
|
||||
let ident = &field.field;
|
||||
|
||||
match &field.setter {
|
||||
Some(set) => {
|
||||
quote! {
|
||||
table.set(stringify!(#ident), #set)?;
|
||||
}
|
||||
},
|
||||
None => {
|
||||
let ty = field.field_ty.get_type_path()
|
||||
.expect("no field type specified");
|
||||
|
||||
let arg = if field.field_ty.is_wrapped() {
|
||||
quote!(#ty(self.#ident))
|
||||
} else { quote!(self.#ident) };
|
||||
|
||||
quote! {
|
||||
table.set(stringify!(#ident), #arg)?;
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn field_table_getter(field: &Field) -> proc_macro2::TokenStream {
|
||||
let ident = &field.field;
|
||||
|
||||
match &field.getter {
|
||||
Some(get) => {
|
||||
quote! {
|
||||
let #ident = #get;
|
||||
}
|
||||
},
|
||||
None => {
|
||||
let ty = field.field_ty.get_type_path()
|
||||
.expect("no field type specified");
|
||||
|
||||
quote! {
|
||||
let #ident: #ty = table.get(stringify!(#ident))?;
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn wrapper_creation(wrapper: &syn::Ident, type_path: &syn::Path, struct_type: StructType, create: Option<&syn::Block>, fields: &Vec<Field>) -> proc_macro2::TokenStream {
|
||||
|
||||
match create {
|
||||
Some(b) => quote!(#b),
|
||||
None => {
|
||||
/* let field_iter = fields.iter().map(|f| match &f.field_ty {
|
||||
crate::field::FieldType::Type(path) => quote!(#path),
|
||||
crate::field::FieldType::Wrapped(path) => quote!(*#path),
|
||||
_ => todo!()
|
||||
}); */
|
||||
let field_iter = fields.iter().map(|f| {
|
||||
let ident = &f.field;
|
||||
if f.field_ty.is_wrapped() && struct_type == StructType::Fields {
|
||||
quote!(#ident: (*#ident).clone())
|
||||
} else {
|
||||
quote!(#ident)
|
||||
}
|
||||
});
|
||||
|
||||
match struct_type {
|
||||
StructType::Fields => {
|
||||
quote! {
|
||||
#wrapper(#type_path {
|
||||
#(
|
||||
#field_iter
|
||||
),*
|
||||
})
|
||||
}
|
||||
},
|
||||
StructType::Tuple => {
|
||||
quote! {
|
||||
#wrapper(#type_path( #(#field_iter),* ))
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
fn get_reflect_lua_functions(ty: &ReflectType, type_path: &syn::Path, set_data: bool) -> proc_macro2::TokenStream {
|
||||
let data = if set_data {
|
||||
quote!(Some(this.into_wrapped()))
|
||||
} else { quote!(None) };
|
||||
|
||||
match ty {
|
||||
ReflectType::Component => {
|
||||
quote! {
|
||||
Ok(ScriptBorrow::from_component::<#type_path>(#data))
|
||||
}
|
||||
},
|
||||
ReflectType::Resource => {
|
||||
quote! {
|
||||
Ok(ScriptBorrow::from_component::<#type_path>(#data))
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
enum ReflectType {
|
||||
//Unknown,
|
||||
Component,
|
||||
Resource,
|
||||
}
|
||||
|
||||
/// The type of the wrapping struct
|
||||
#[derive(Debug, Default, Clone, Copy, PartialEq, Eq)]
|
||||
enum StructType {
|
||||
#[default]
|
||||
Fields,
|
||||
Tuple,
|
||||
}
|
||||
|
||||
struct IntoLuaUsage {
|
||||
type_path: syn::Path,
|
||||
struct_type: StructType,
|
||||
override_name: Option<syn::Ident>,
|
||||
table_name: String,
|
||||
derives: Vec<syn::Ident>,
|
||||
fields: Vec<Field>,
|
||||
create: Option<syn::Block>,
|
||||
reflection_type: Option<ReflectType>,
|
||||
}
|
||||
|
||||
impl syn::parse::Parse for IntoLuaUsage {
|
||||
fn parse(input: syn::parse::ParseStream) -> syn::Result<Self> {
|
||||
let type_path: syn::Path = input.parse()?;
|
||||
let type_ident = &type_path
|
||||
.segments
|
||||
.last()
|
||||
.expect("Failure to find typename in macro usage!")
|
||||
.ident;
|
||||
let lua_name = type_ident.to_string();
|
||||
|
||||
let mut s = Self {
|
||||
type_path,
|
||||
struct_type: StructType::Fields,
|
||||
override_name: None,
|
||||
table_name: lua_name,
|
||||
derives: vec![],
|
||||
fields: vec![],
|
||||
create: None,
|
||||
reflection_type: None,
|
||||
};
|
||||
|
||||
while input.peek(Token![,]) {
|
||||
let _: Token![,] = input.parse()?;
|
||||
|
||||
if input.peek(syn::Ident) {
|
||||
let ident: syn::Ident = input.parse()?;
|
||||
let ident_str = ident.to_string();
|
||||
let ident_str = ident_str.as_str();
|
||||
|
||||
match ident_str {
|
||||
"name" => {
|
||||
let _eq: Token![=] = input.parse()?;
|
||||
|
||||
let name: syn::Ident = input.parse()?;
|
||||
s.override_name = Some(name);
|
||||
},
|
||||
"struct_type" => {
|
||||
let _eq: Token![=] = input.parse()?;
|
||||
|
||||
let st_token = input.parse::<syn::LitStr>()?;
|
||||
let st_str = st_token.value().to_lowercase();
|
||||
let st_str = st_str.as_str();
|
||||
|
||||
let st = match st_str {
|
||||
"fields" => StructType::Fields,
|
||||
"tuple" => StructType::Tuple,
|
||||
_ => return Err(syn::Error::new_spanned(
|
||||
st_token,
|
||||
format!("unknown struct type: '{}', expected 'fields', or `tuple`", st_str),
|
||||
)),
|
||||
};
|
||||
s.struct_type = st;
|
||||
},
|
||||
"lua_name" => {
|
||||
let _eq: Token![=] = input.parse()?;
|
||||
s.table_name = input.parse::<syn::LitStr>()?.value();
|
||||
},
|
||||
"derives" => {
|
||||
if input.peek(token::Paren) {
|
||||
let content;
|
||||
let _parens: token::Paren = parenthesized!(content in input);
|
||||
|
||||
let derives: Punctuated<syn::Ident, Token![,]> =
|
||||
content.parse_terminated(syn::Ident::parse, Token![,])?;
|
||||
s.derives = derives.into_iter().collect();
|
||||
}
|
||||
},
|
||||
"fields" => {
|
||||
let _eq: Token![=] = input.parse()?;
|
||||
|
||||
if input.peek(token::Brace) {
|
||||
let content;
|
||||
let _braced: token::Brace = braced!(content in input);
|
||||
|
||||
let terminated = content.parse_terminated(Field::parse, Token![,])?;
|
||||
s.fields.extend(terminated.into_iter());
|
||||
}
|
||||
},
|
||||
"create" => {
|
||||
let _eq: Token![=] = input.parse()?;
|
||||
s.create = Some(input.parse()?);
|
||||
},
|
||||
"reflect" => {
|
||||
let _eq: Token![=] = input.parse()?;
|
||||
let ty: syn::Ident = input.parse()?;
|
||||
let ty_str = ty.to_string();
|
||||
let ty_str = ty_str.as_str();
|
||||
|
||||
let ty = match ty_str {
|
||||
"component" => ReflectType::Component,
|
||||
"resource" => ReflectType::Resource,
|
||||
_ => return Err(syn::Error::new_spanned(
|
||||
ident,
|
||||
format!("unknown wrapper type: '{}', expected 'component' or 'resource'", ty_str),
|
||||
)),
|
||||
};
|
||||
|
||||
s.reflection_type = Some(ty);
|
||||
},
|
||||
_ => {
|
||||
return Err(syn::Error::new_spanned(
|
||||
ident,
|
||||
format!("unknown wrapper command: '{}'", ident_str),
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if s.reflection_type.is_none() {
|
||||
return Err(syn::Error::new(
|
||||
input.span(),
|
||||
format!("Wrapper type not specified! Expected 'type=component' or 'type=resource'"),
|
||||
));
|
||||
}
|
||||
|
||||
if s.table_name.is_empty() {
|
||||
return Err(syn::Error::new(
|
||||
input.span(),
|
||||
format!("No lua table specified. Use 'lua_name=\"Camera\"'"),
|
||||
))
|
||||
}
|
||||
|
||||
Ok(s)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn to_lua_struct_impl(input: proc_macro::TokenStream) -> proc_macro::TokenStream {
|
||||
let input = parse_macro_input!(input as IntoLuaUsage);
|
||||
|
||||
// unwrap is fine since `Some` is ensured in parse impl
|
||||
let reflect_type = input.reflection_type.as_ref().unwrap();
|
||||
let type_path = &input.type_path;
|
||||
let type_name = &type_path
|
||||
.segments
|
||||
.last()
|
||||
.expect("Failure to find typename in macro usage!")
|
||||
.ident;
|
||||
let wrapper = input.override_name
|
||||
.unwrap_or_else(|| syn::Ident::new(&format!("Lua{}", type_name), Span::call_site()));
|
||||
|
||||
let derives_iter = input.derives.into_iter();
|
||||
|
||||
let lua_name = &input.table_name;
|
||||
let field_getters_iter = input.fields.iter().map(|f| field_table_getter(f));
|
||||
let field_setters_iter = input.fields.iter().map(|f| field_table_setter(f));
|
||||
let struct_creator = wrapper_creation(&wrapper, type_path, input.struct_type, input.create.as_ref(), &input.fields);
|
||||
let reflect_fn = get_reflect_lua_functions(reflect_type, &input.type_path, true);
|
||||
let reflect_type_fn = get_reflect_lua_functions(reflect_type, &input.type_path, false);
|
||||
|
||||
quote! {
|
||||
#[derive(Clone, #(#derives_iter),*)]
|
||||
pub struct #wrapper(pub(crate) #type_path);
|
||||
|
||||
impl std::ops::Deref for #wrapper {
|
||||
type Target = #type_path;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl std::ops::DerefMut for #wrapper {
|
||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||
&mut self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl mlua::FromLua for #wrapper {
|
||||
fn from_lua(val: mlua::Value, _: &mlua::Lua) -> mlua::Result<Self> {
|
||||
let ty = val.type_name();
|
||||
let table = val.as_table().ok_or(mlua::Error::FromLuaConversionError {
|
||||
from: ty,
|
||||
to: "Table".into(),
|
||||
message: Some("expected Table".into()),
|
||||
})?;
|
||||
|
||||
#(
|
||||
#field_getters_iter
|
||||
)*
|
||||
|
||||
Ok(#struct_creator)
|
||||
}
|
||||
}
|
||||
|
||||
impl mlua::IntoLua for #wrapper {
|
||||
fn into_lua(self, lua: &mlua::Lua) -> mlua::Result<mlua::Value> {
|
||||
let table = lua.create_table()?;
|
||||
#(
|
||||
#field_setters_iter
|
||||
)*
|
||||
|
||||
table.set(
|
||||
#FN_NAME_INTERNAL_REFLECT,
|
||||
lua.create_function(|_, this: Self| {
|
||||
#reflect_fn
|
||||
})?,
|
||||
)?;
|
||||
|
||||
table.set(
|
||||
#FN_NAME_INTERNAL_REFLECT_TYPE,
|
||||
lua.create_function(|_, ()| {
|
||||
#reflect_type_fn
|
||||
})?,
|
||||
)?;
|
||||
|
||||
table.set(mlua::MetaMethod::Type.name(), #lua_name)?;
|
||||
|
||||
Ok(mlua::Value::Table(table))
|
||||
}
|
||||
}
|
||||
|
||||
impl LuaWrapper for #wrapper {
|
||||
type Wrap = #type_path;
|
||||
|
||||
#[inline(always)]
|
||||
fn wrapped_type_id() -> std::any::TypeId {
|
||||
std::any::TypeId::of::<#type_path>()
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn into_wrapped(self) -> Self::Wrap {
|
||||
self.0
|
||||
}
|
||||
}
|
||||
|
||||
}.into_token_stream().into()
|
||||
}
|
|
@ -1,186 +0,0 @@
|
|||
use quote::quote;
|
||||
use syn::{Path, Ident};
|
||||
use proc_macro2::Span;
|
||||
|
||||
#[derive(Default)]
|
||||
pub(crate) struct VecWrapper;
|
||||
|
||||
#[allow(dead_code)]
|
||||
impl VecWrapper {
|
||||
fn vec_size(&self, wrapper_ident: &Ident) -> usize {
|
||||
let name = wrapper_ident.to_string();
|
||||
name[name.len() - 1..].parse::<usize>()
|
||||
.or_else(|_| name[name.len() - 2.. name.len() - 1].parse::<usize>())
|
||||
.expect("Failure to grab Vec size from ident name")
|
||||
}
|
||||
|
||||
/// Returns the token stream of the type of the axis of the vec (Vec2 vs IVec2 vs I64Vec2, etc.)
|
||||
fn vec_axis_type(&self, wrapper_ident: &Ident) -> &'static str {
|
||||
let name = wrapper_ident.to_string();
|
||||
let start = name.find("Vec").unwrap();
|
||||
|
||||
let before = &name[start - 1.. start];
|
||||
match before {
|
||||
"D" => return "f64",
|
||||
"I" => return "i32",
|
||||
"U" => return "u32",
|
||||
"B" => return "bool",
|
||||
_ => {},
|
||||
}
|
||||
//println!("before is {before}");
|
||||
|
||||
let three_before = &name[start - 3.. start];
|
||||
match three_before {
|
||||
"I64" => return "i64",
|
||||
"U64" => return "u64",
|
||||
_ => {},
|
||||
}
|
||||
//println!("three before is {three_before}");
|
||||
|
||||
"f32"
|
||||
}
|
||||
|
||||
pub fn to_field_tokens(&self, wrapped_path: &Path, wrapper_ident: &Ident) -> proc_macro2::TokenStream {
|
||||
let mut consts = vec![quote!(ZERO), quote!(ONE), quote!(X),
|
||||
quote!(Y), ]; // , quote!(AXES)
|
||||
|
||||
let vec_size = self.vec_size(wrapper_ident);
|
||||
let axis_type_name = self.vec_axis_type(wrapper_ident);
|
||||
|
||||
if axis_type_name.contains("b") {
|
||||
return quote! {
|
||||
fields.add_field_method_get("FALSE", #wrapper_ident(#wrapped_path::FALSE));
|
||||
fields.add_field_method_get("TRUE", #wrapper_ident(#wrapped_path::TRUE));
|
||||
};
|
||||
}
|
||||
|
||||
if vec_size >= 3 {
|
||||
consts.push(quote!(Z));
|
||||
|
||||
// no negative numbers for unsigned vecs
|
||||
if !axis_type_name.contains("u") {
|
||||
consts.push(quote!(NEG_Z));
|
||||
}
|
||||
}
|
||||
|
||||
if vec_size == 4 {
|
||||
consts.push(quote!(W));
|
||||
|
||||
// no negative numbers for unsigned vecs
|
||||
if !axis_type_name.contains("u") {
|
||||
consts.push(quote!(NEG_W));
|
||||
}
|
||||
}
|
||||
|
||||
// no negative numbers for unsigned vecs
|
||||
if !axis_type_name.contains("u") {
|
||||
consts.push(quote!(NEG_X));
|
||||
consts.push(quote!(NEG_Y));
|
||||
consts.push(quote!(NEG_ONE));
|
||||
}
|
||||
|
||||
if axis_type_name.contains("f") {
|
||||
consts.push(quote!(NAN))
|
||||
}
|
||||
|
||||
let const_tokens = consts.iter().map(|cnst| {
|
||||
let const_name = cnst.to_string();
|
||||
|
||||
quote! {
|
||||
fields.add_field_method_get(#const_name, #wrapper_ident(#wrapped_path::#cnst));
|
||||
}
|
||||
});
|
||||
|
||||
quote! {
|
||||
#(#const_tokens)*
|
||||
}
|
||||
}
|
||||
|
||||
pub fn to_method_tokens(&self, wrapper_ident: &Ident) -> proc_macro2::TokenStream {
|
||||
let vec_size = self.vec_size(wrapper_ident);
|
||||
let axis_type_name = self.vec_axis_type(wrapper_ident);
|
||||
// methods that only some vecs have
|
||||
let mut optional_methods = vec![];
|
||||
|
||||
// boolean vectors dont have much :(
|
||||
if axis_type_name.contains("b") {
|
||||
return quote!(); // TODO: all, any, bitmask, splat
|
||||
}
|
||||
|
||||
if axis_type_name.contains("f") {
|
||||
let type_id = Ident::new(axis_type_name, Span::call_site());
|
||||
|
||||
optional_methods.push(
|
||||
quote! {
|
||||
methods.add_method("clamp_length",
|
||||
|_, this, (min, max): (#type_id, #type_id)| {
|
||||
Ok(#wrapper_ident(this.clamp_length(min, max)))
|
||||
});
|
||||
|
||||
methods.add_method("abs_diff_eq",
|
||||
|_, this, (rhs, max_abs_diff): (#wrapper_ident, #type_id)| {
|
||||
Ok(this.abs_diff_eq(rhs.0, max_abs_diff))
|
||||
});
|
||||
|
||||
methods.add_method("ceil",
|
||||
|_, this, (): ()| {
|
||||
Ok(#wrapper_ident(this.ceil()))
|
||||
});
|
||||
}
|
||||
);
|
||||
|
||||
if vec_size == 2 {
|
||||
// angle_between is deprecated for Vec2, must use angle_to instead.
|
||||
optional_methods.push(
|
||||
quote! {
|
||||
methods.add_method("angle_to",
|
||||
|_, this, (rhs,): (#wrapper_ident,)| {
|
||||
Ok(this.angle_to(rhs.0))
|
||||
});
|
||||
}
|
||||
)
|
||||
} else if vec_size != 4 {
|
||||
optional_methods.push(
|
||||
quote! {
|
||||
methods.add_method("angle_between",
|
||||
|_, this, (rhs,): (#wrapper_ident,)| {
|
||||
Ok(this.angle_between(rhs.0))
|
||||
});
|
||||
}
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
if !axis_type_name.contains("u") {
|
||||
optional_methods.push(
|
||||
quote! {
|
||||
methods.add_method("abs",
|
||||
|_, this, (): ()| {
|
||||
Ok(#wrapper_ident(this.abs()))
|
||||
});
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
let optional_methods_iter = optional_methods.iter();
|
||||
quote! {
|
||||
methods.add_method("clamp",
|
||||
|_, this, (min, max): (#wrapper_ident, #wrapper_ident)| {
|
||||
Ok(#wrapper_ident(this.clamp(min.0, max.0)))
|
||||
});
|
||||
|
||||
// TODO: Not all Vecs have this
|
||||
/* methods.add_method("clamp_length",
|
||||
|_, this, (min, max): (f32, f32)| {
|
||||
Ok(#wrapper_ident(this.clamp_length(min, max)))
|
||||
}); */
|
||||
|
||||
methods.add_method("to_array",
|
||||
|_, this, (): ()| {
|
||||
Ok(this.to_array())
|
||||
});
|
||||
|
||||
#(#optional_methods_iter)*
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,72 +0,0 @@
|
|||
---Create a Resource query that will return the specific ECS world resource.
|
||||
---
|
||||
---@see ResQuery
|
||||
---@param resource table|userdata
|
||||
---@return ResQuery
|
||||
function Res(resource)
|
||||
return ResQuery.new(resource)
|
||||
end
|
||||
|
||||
---@alias Query function|table|userdata
|
||||
|
||||
---Create a `ChangedQuery` query that will return only if the resource or component has changed
|
||||
---since last tick.
|
||||
---
|
||||
---@see ChangedQuery
|
||||
---@param val table|userdata
|
||||
---@return ChangedQuery
|
||||
function Changed(val)
|
||||
return ChangedQuery.new(val)
|
||||
end
|
||||
|
||||
---Create a `HasQuery` filter that will return only if the entity has a specific component.
|
||||
---
|
||||
---@see HasQuery
|
||||
---@param val table|userdata
|
||||
---@return HasQuery
|
||||
function Has(val)
|
||||
return HasQuery.new(val)
|
||||
end
|
||||
|
||||
---Create a `NotQuery` filter that will allow results if the query returns nothing or
|
||||
---filter denies.
|
||||
---
|
||||
---@see NotQuery
|
||||
---@param val Query
|
||||
---@return NotQuery
|
||||
function Not(val)
|
||||
return NotQuery.new(val)
|
||||
end
|
||||
|
||||
---Create a `AnyQuery` filter that will allow results if any of the queries return something.
|
||||
---
|
||||
---The queries are evaluated in the order they were provided.
|
||||
---
|
||||
---@see AnyQuery
|
||||
---@param ... Query
|
||||
---@return AnyQuery
|
||||
function Any(...)
|
||||
return AnyQuery.new(...)
|
||||
end
|
||||
|
||||
---Create a `TickOfQuery` for retrieving the tick of the resource or component on the entity.
|
||||
---
|
||||
---@see TickOfQuery
|
||||
---@param ... table|userdata
|
||||
---@return TickOfQuery
|
||||
function TickOf(...)
|
||||
return TickOfQuery.new(...)
|
||||
end
|
||||
|
||||
---Create any `OptionalQuery` that allows for a query to return nothing.
|
||||
---
|
||||
---If the query is a filter, its result will essentially be ignored. If the query returns `None`
|
||||
---or `AlwaysNone`, this query will return `Nil`. If the query results in a value, its value
|
||||
---will be the result of this query.
|
||||
---
|
||||
---@see OptionalQuery
|
||||
---@param q Query
|
||||
---@return OptionalQuery
|
||||
function Optional(q)
|
||||
return OptionalQuery.new(q)
|
||||
end
|
|
@ -1,94 +0,0 @@
|
|||
---@enum WindowMode
|
||||
WindowMode = {
|
||||
WNDOWED = "windowed",
|
||||
BORDERLESS_FULLSCREEN = "borderless_fullscreen",
|
||||
SIZED_FULLSCREEN = "sized_fullscreen",
|
||||
FULLSCREEN = "fullscreen",
|
||||
}
|
||||
|
||||
---@enum CursorGrabMode
|
||||
CursorGrabMode = {
|
||||
NONE = "none",
|
||||
CONFINED = "confined",
|
||||
LOCKED = "locked",
|
||||
}
|
||||
|
||||
---@enum WindowTheme
|
||||
WindowTheme = {
|
||||
LIGHT = "light",
|
||||
DARK = "dark",
|
||||
}
|
||||
|
||||
---@enum WindowLevel
|
||||
WindowLevel = {
|
||||
ALWAYS_ON_BOTTOM = "always_on_bottom",
|
||||
NORMAL = "normal",
|
||||
ALWAYS_ON_TOP = "always_on_top",
|
||||
}
|
||||
|
||||
---@enum HandleState
|
||||
HandleState = {
|
||||
LOADING = "loading",
|
||||
READY = "ready",
|
||||
ERROR = "error",
|
||||
}
|
||||
|
||||
---@enum ActionKind
|
||||
ActionKind = {
|
||||
BUTTON = "button",
|
||||
AXIS = "axis",
|
||||
}
|
||||
|
||||
---@enum ActionState
|
||||
ActionState = {
|
||||
IDLE = "idle",
|
||||
PRESSED = "pressed",
|
||||
JUST_PRESSED = "just_pressed",
|
||||
JUST_RELEASED = "just_released",
|
||||
AXIS = "axis",
|
||||
OTHER = "other",
|
||||
}
|
||||
|
||||
---@enum FilterMode
|
||||
FilterMode = {
|
||||
NEAREST = "nearest",
|
||||
LINEAR = "linear",
|
||||
}
|
||||
|
||||
---@enum WrappingMode
|
||||
WrappingMode = {
|
||||
CLAMP_TO_EDGE = "clamp_to_edge",
|
||||
MIRRORED_REPEAT = "mirrored_repeat",
|
||||
REPEAT = "repeat",
|
||||
}
|
||||
|
||||
---@enum CameraProjectionMode
|
||||
CameraProjectionMode = {
|
||||
PERSPECTIVE = "perspective",
|
||||
ORTHOGRAPHIC = "orthographic",
|
||||
}
|
||||
|
||||
---@enum DeviceEventKind
|
||||
DeviceEventKind = {
|
||||
ADDED = "added",
|
||||
REMOVED = "removed",
|
||||
MOUSE_MOTION = "mouse_motion",
|
||||
MOUSE_WHEEL = "mouse_wheel",
|
||||
MOTION = "motion",
|
||||
BUTTON = "button",
|
||||
KEY = "key",
|
||||
}
|
||||
|
||||
---@enum NativeKeyCodeKind
|
||||
NativeKeyCodeKind = {
|
||||
ANDROID = "android",
|
||||
MACOS = "macos",
|
||||
WINDOWS = "windows",
|
||||
XKB = "xkb",
|
||||
}
|
||||
|
||||
---@enum ElementState
|
||||
ElementState = {
|
||||
PRESSED = "pressed",
|
||||
RELEASED = "released",
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue