Compare commits

..

No commits in common. "be8ac84e36de4503491bed0747ca7e1d1501d953" and "0412bc60c07610bc4fba7dd69f743613cbee8fd5" have entirely different histories.

23 changed files with 8 additions and 4096 deletions

1
.envrc
View file

@ -1 +0,0 @@
use flake

1
.gitignore vendored
View file

@ -1 +0,0 @@
/target

2264
Cargo.lock generated

File diff suppressed because it is too large Load diff

View file

@ -1,23 +0,0 @@
[package]
name = "khors"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
anyhow = "1.0.80"
shrev = "1.1.3"
winit = { version = "0.29.15",features = ["rwh_05"] }
vulkano = { git = "https://github.com/vulkano-rs/vulkano.git", branch = "master" }
vulkano-shaders = { git = "https://github.com/vulkano-rs/vulkano.git", branch = "master" }
vulkano-util = { git = "https://github.com/vulkano-rs/vulkano.git", branch = "master" }
flax = { version = "0.6.2", features = ["derive", "serde", "tokio", "tracing"] }
flume = "0.11.0"
parking_lot = "0.12.1"
downcast-rs = "1.2.0"
serde = { version = "1.0.197", features = ["derive"] }
serde-lexpr = "0.1.3"
tokio = { version = "1.36.0", features = ["full"] }
notify = "6.1.1"
notify-debouncer-mini = "0.4.1"

30
LICENSE
View file

@ -1,29 +1,11 @@
BSD 3-Clause License
Copyright (c) 2024 klink.
Copyright (c) 2024, Klink
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View file

@ -1,8 +1,3 @@
# Khors engine
# khors
Engine ECS shenanigans with modular architetchure
Currently nothing to look at
# Building
```
cargo build
```
Modular ECS game engine

View file

@ -1 +0,0 @@
((asset_path . "/assets"))

View file

@ -1,93 +0,0 @@
{
"nodes": {
"flake-utils": {
"inputs": {
"systems": "systems"
},
"locked": {
"lastModified": 1710146030,
"narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"naersk": {
"inputs": {
"nixpkgs": "nixpkgs"
},
"locked": {
"lastModified": 1698420672,
"narHash": "sha256-/TdeHMPRjjdJub7p7+w55vyABrsJlt5QkznPYy55vKA=",
"owner": "nix-community",
"repo": "naersk",
"rev": "aeb58d5e8faead8980a807c840232697982d47b9",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "naersk",
"type": "github"
}
},
"nixpkgs": {
"locked": {
"lastModified": 1710272261,
"narHash": "sha256-g0bDwXFmTE7uGDOs9HcJsfLFhH7fOsASbAuOzDC+fhQ=",
"path": "/nix/store/k5l01g2zwhysjyl5zjvg5zxnj0lyxpp1-source",
"rev": "0ad13a6833440b8e238947e47bea7f11071dc2b2",
"type": "path"
},
"original": {
"id": "nixpkgs",
"type": "indirect"
}
},
"nixpkgs_2": {
"locked": {
"lastModified": 1710637405,
"narHash": "sha256-w/woLwnFyhOeJWPjSWFtMNI2/RZTaAtHySIfm43Chos=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "299d4668ba61600311553920d9fd9c102145b2cb",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixpkgs-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"root": {
"inputs": {
"flake-utils": "flake-utils",
"naersk": "naersk",
"nixpkgs": "nixpkgs_2"
}
},
"systems": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
}
},
"root": "root",
"version": 7
}

View file

@ -1,73 +0,0 @@
{
inputs = {
flake-utils.url = "github:numtide/flake-utils";
naersk.url = "github:nix-community/naersk";
nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable";
};
outputs = { self, flake-utils, naersk, nixpkgs }:
flake-utils.lib.eachDefaultSystem (system:
let
pkgs = (import nixpkgs) { inherit system; };
naersk' = pkgs.callPackage naersk { };
libPath = with pkgs;
lib.makeLibraryPath [
libGL
libxkbcommon
wayland
glibc
vulkan-loader
xorg.libX11
xorg.libXcursor
xorg.libXi
xorg.libXrandr
alsa-lib
vulkan-tools
];
in rec {
# For `nix build` & `nix run`:
packages.default = naersk'.buildPackage {
src = ./.;
pname = "khors";
nativeBuildInputs = with pkgs; [
makeWrapper
pkg-config
openssl
xorg.libxcb
];
GIT_HASH = "000000000000000000000000000000";
postInstall = ''
wrapProgram "$out/bin/${packages.default.pname}" --prefix LD_LIBRARY_PATH : "${libPath}"
'';
};
# For `nix develop`:
devShells.default = pkgs.mkShell {
nativeBuildInputs = with pkgs; [
rustc
cargo
cargo-watch
clippy
rustfmt
rust-analyzer
cmake
vulkan-tools
python3
vulkan-tools-lunarg
pkg-config
openssl
xorg.libxcb
alsa-lib
];
LD_LIBRARY_PATH = libPath;
env = {
VK_LAYER_PATH = "${pkgs.vulkan-validation-layers}/share/vulkan/explicit_layer.d";
RUST_BACKTRACE = 1;
RUST_LOG = "debug";
};
};
});
}

View file

@ -1,146 +0,0 @@
#![warn(dead_code)]
use flax::{Schedule, World};
use anyhow::Result;
use crate::{
core::events::Events,
module::{Module, ModulesStack},
};
#[allow(dead_code)]
pub struct App {
name: String,
modules: ModulesStack,
world: World,
schedule: Schedule,
events: Events,
rx: flume::Receiver<AppEvent>,
running: bool,
event_cleanup_time: std::time::Duration,
}
impl App {
pub fn new() -> Self {
let mut events = Events::new();
let (tx, rx) = flume::unbounded();
events.subscribe_custom(tx);
Self {
name: "ZTest".into(),
modules: ModulesStack::new(),
world: World::new(),
schedule: Schedule::default(),
events,
rx,
running: false,
event_cleanup_time: std::time::Duration::from_secs(60),
}
}
pub fn run(&mut self) -> Result<()> {
self.running = true;
// self.schedule.execute_par(&mut self.world).unwrap();
let world = &mut self.world;
let events = &mut self.events;
let frame_time = std::time::Duration::from_millis(16);
for module in self.modules.iter_mut() {
module.on_update(world, events, frame_time)?;
}
self.handle_events();
Ok(())
}
pub fn handle_events(&mut self) {
for event in self.rx.try_iter() {
match event {
AppEvent::Exit => self.running = false,
}
}
}
pub fn set_schedule(&mut self, schedule: Schedule) {
self.schedule = schedule;
}
pub fn world(&self) -> &World {
&self.world
}
pub fn world_mut(&mut self) -> &mut World {
&mut self.world
}
pub fn events(&self) -> &Events {
&self.events
}
pub fn events_mut(&mut self) -> &mut Events {
&mut self.events
}
/// Pushes a layer from the provided init closure to to the top of the layer stack. The provided
/// closure to construct the layer takes in the world and events.
pub fn push_module<F, T>(&mut self, func: F)
where
F: FnOnce(&mut World, &mut Events) -> T,
T: 'static + Module,
{
let module = func(&mut self.world, &mut self.events);
self.modules.push(module);
}
/// Pushes a module from the provided init closure to to the top of the module stack. The provided
/// closure to construct the module takes in the world and events, and may return an error which
/// is propagated to the callee.
pub fn try_push_module<F, T, E>(&mut self, func: F) -> Result<(), E>
where
F: FnOnce(&mut World, &mut Events) -> Result<T, E>,
T: 'static + Module,
{
let module = func(&mut self.world, &mut self.events)?;
self.modules.push(module);
Ok(())
}
/// Inserts a module from the provided init closure to to the top of the module stack. The provided
/// closure to construct the module takes in the world and events.
pub fn insert_module<F, T>(&mut self, index: usize, func: F)
where
F: FnOnce(&mut World, &mut Events) -> T,
T: 'static + Module,
{
let module = func(&mut self.world, &mut self.events);
self.modules.insert(index, module);
}
/// Pushes a module from the provided init closure to to the top of the module stack. The provided
/// closure to construct the module takes in the world and events, and may return an error which
/// is propagated to the callee.
pub fn try_insert_module<F, T, E>(&mut self, index: usize, func: F) -> Result<(), E>
where
F: FnOnce(&mut World, &mut Events) -> Result<T, E>,
T: 'static + Module,
{
let module = func(&mut self.world, &mut self.events)?;
self.modules.insert(index, module);
Ok(())
}
}
#[derive(Debug, Clone, Copy, PartialEq)]
#[allow(dead_code)]
pub enum AppEvent {
Exit,
}
impl Default for App {
fn default() -> Self {
Self::new()
}
}

View file

@ -1,42 +0,0 @@
use flax::{component, BoxedSystem, EntityBorrow, Query, System};
use winit::window::Window;
component! {
pub window_width: f32,
pub window: Window,
pub counter: i32,
pub resources,
}
pub fn update_distance_system() -> BoxedSystem {
System::builder()
.with_name("update_distance")
.with_query(
Query::new((window_width().as_mut(), window(), counter().as_mut())).entity(resources()),
)
.build(|mut query: EntityBorrow<_>| {
if let Ok((window_width, _window, counter)) = query.get() {
// println!("Win width: {window_width}");
*(window_width as &mut f32) = *(counter as &mut i32) as f32;
*(counter as &mut i32) += 1;
}
})
.boxed()
}
pub fn log_window_system() -> BoxedSystem {
let query = Query::new((window_width(), window())).entity(resources());
System::builder()
.with_query(query)
.build(|mut q: EntityBorrow<_>| {
if let Ok((width, wind)) = q.get() {
println!("window id: {:?}", (wind as &Window).id());
println!("Config changed width: {width}");
} else {
println!("No config change");
}
})
.boxed()
}

View file

@ -1,10 +0,0 @@
use std::sync::Arc;
use specs::{Component, VecStorage};
use winit::window::Window;
#[derive(Component, Debug)]
#[storage(VecStorage)]
pub struct EntityWindow {
pub window: Arc<Window>,
}

View file

@ -1,9 +0,0 @@
use flax::component;
use super::Config;
component! {
pub config: Config,
pub notify_file_event: notify::Event,
pub resources,
}

View file

@ -1,68 +0,0 @@
use flax::{Schedule, World};
use notify::{Config as NotifyConfig, INotifyWatcher, RecommendedWatcher, RecursiveMode, Watcher};
use serde::{Deserialize, Serialize};
use std::env::current_dir;
use crate::module::Module;
use self::{components::{notify_file_event, resources}, systems::{read_config_system, read_notify_events_system}};
pub mod components;
pub mod systems;
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)]
pub struct Config {
pub asset_path: String,
}
#[allow(dead_code)]
pub struct ConfigModule {
schedule: Schedule,
watcher: INotifyWatcher,
watcher_rx: std::sync::mpsc::Receiver<Result<notify::Event, notify::Error>>,
}
impl ConfigModule {
pub fn new(_world: &mut World, _events: &mut crate::core::events::Events) -> Self {
let (tx, rx) = std::sync::mpsc::channel();
let mut watcher = RecommendedWatcher::new(tx, NotifyConfig::default().with_poll_interval(std::time::Duration::from_secs(2))).unwrap();
watcher
.watch(&current_dir().unwrap(), RecursiveMode::NonRecursive)
.unwrap();
let schedule = Schedule::builder()
.with_system(read_config_system())
.with_system(read_notify_events_system())
.build();
Self {
schedule,
watcher,
watcher_rx: rx,
}
}
}
impl Module for ConfigModule {
fn on_update(
&mut self,
world: &mut World,
_events: &mut crate::core::events::Events,
_frame_time: std::time::Duration,
) -> anyhow::Result<()> {
self.schedule.execute_par(world).unwrap();
if let Ok(event) = self.watcher_rx.recv() {
match event {
Ok(e) => {
world.set(resources(), notify_file_event(), e.clone()).unwrap();
}
Err(e) => println!("Watcher error. {}", e),
}
}
Ok(())
}
}

View file

@ -1,51 +0,0 @@
use std::{fs, path::Path};
use flax::{BoxedSystem, CommandBuffer, EntityBorrow, Query, System};
use serde_lexpr::from_str;
use super::{components::{config, notify_file_event, resources}, Config};
pub fn read_config_system() -> BoxedSystem {
let query = Query::new(notify_file_event()).entity(resources());
System::builder()
.with_name("read_config")
.with_cmd_mut()
.with_query(query)
.build(|cmd: &mut CommandBuffer, mut q: EntityBorrow<_>| {
if let Ok(n_event) = q.get() {
if (n_event as &notify::Event).kind.is_modify() {
println!("file modified: {:?}", (n_event as &notify::Event).paths);
cmd.set(resources(), config(), read_engine_config());
}
}
})
.boxed()
}
fn read_engine_config() -> Config {
let config_path = Path::new("engine_config.scm");
let config_file = fs::read_to_string(config_path).unwrap();
let config: Config = from_str::<Config>(&config_file).expect("Failed to parse config file");
config
}
pub fn read_notify_events_system() -> BoxedSystem {
let query = Query::new(config().as_mut()).entity(resources());
System::builder()
.with_name("first_read_config")
.with_cmd_mut()
.with_query(query)
.build(|cmd: &mut CommandBuffer, mut q: EntityBorrow<_>| {
if let Ok(_config) = q.get() {
return;
} else {
println!("read_notify_events_system: config read");
cmd.set(resources(), config(), read_engine_config());
}
std::thread::sleep(std::time::Duration::from_secs(3));
})
.boxed()
}

View file

@ -1,183 +0,0 @@
use std::sync::mpsc;
use downcast_rs::{impl_downcast, Downcast};
use parking_lot::Mutex;
use super::Event;
pub trait AnyEventDispatcher: 'static + Send + Sync + Downcast {
fn cleanup(&mut self);
}
impl_downcast!(AnyEventDispatcher);
pub trait AnyEventSender: 'static + Send + Sync + Downcast {}
impl_downcast!(AnyEventSender);
/// Handles event dispatching for a single type of event
pub struct EventDispatcher<T: Event> {
subscribers: Vec<Subscriber<T>>,
pub blocked: bool,
}
impl<T> Default for EventDispatcher<T>
where
T: Event + Clone,
{
fn default() -> Self {
EventDispatcher::new()
}
}
impl<T> EventDispatcher<T>
where
T: Event + Clone,
{
pub fn new() -> Self {
Self {
subscribers: Vec::new(),
blocked: false,
}
}
/// Sends an event to all subscribed subscriber. Event is cloned for each registered subscriber. Requires mutable access to cleanup no longer active subscribers.
pub fn send(&self, event: T) {
if self.blocked {
return;
}
for subscriber in &self.subscribers {
if (subscriber.filter)(&event) {
subscriber.send(event.clone());
}
}
}
/// Subscribes to events using sender to send events. The subscriber is automatically cleaned
/// up when the receiving end is dropped.
pub fn subscribe<S>(&mut self, sender: S, filter: fn(&T) -> bool)
where
S: 'static + EventSender<T> + Send,
{
self.subscribers.push(Subscriber::new(sender, filter));
}
}
impl<T: Event> AnyEventDispatcher for EventDispatcher<T> {
fn cleanup(&mut self) {
self.subscribers.retain(|val| !val.sender.is_disconnected())
}
}
struct Subscriber<T> {
sender: Box<dyn EventSender<T> + Send>,
filter: fn(&T) -> bool,
}
impl<T: Event> Subscriber<T> {
pub fn new<S>(sender: S, filter: fn(&T) -> bool) -> Self
where
S: 'static + EventSender<T> + Send,
{
Self {
sender: Box::new(sender),
filter,
}
}
pub fn send(&self, event: T) {
self.sender.send(event)
}
}
/// Describes a type which can send events. Implemented for mpsc::channel and crossbeam channel.
pub trait EventSender<T>: 'static + Send + Sync {
/// Send an event
fn send(&self, event: T);
/// Returns true if the sender has been disconnected
fn is_disconnected(&self) -> bool;
}
/// Wrapper for thread safe sender
pub struct MpscSender<T> {
inner: Mutex<(bool, mpsc::Sender<T>)>,
}
impl<T> From<mpsc::Sender<T>> for MpscSender<T> {
fn from(val: mpsc::Sender<T>) -> Self {
Self::new(val)
}
}
impl<T> MpscSender<T> {
pub fn new(inner: mpsc::Sender<T>) -> Self {
Self {
inner: Mutex::new((false, inner)),
}
}
}
impl<T: Event> EventSender<T> for MpscSender<T> {
fn send(&self, event: T) {
let mut inner = self.inner.lock();
match inner.1.send(event) {
Ok(_) => {}
Err(_) => inner.0 = true,
}
}
fn is_disconnected(&self) -> bool {
// TODO
self.inner.lock().0
// self.inner.is_disconnected()
}
}
#[cfg(feature = "crossbeam-channel")]
impl<T: Event> EventSender<T> for crossbeam_channel::Sender<T> {
fn send(&self, event: T) -> bool {
let _ = self.send(event);
}
fn is_disconnected(&self) -> bool {
self.is_disconnected
}
}
impl<T: Event> EventSender<T> for flume::Sender<T> {
fn send(&self, event: T) {
let _ = self.send(event);
}
fn is_disconnected(&self) -> bool {
self.is_disconnected()
}
}
pub fn new_event_dispatcher<T: Event + Clone>() -> Box<dyn AnyEventDispatcher> {
let dispatcher: EventDispatcher<T> = EventDispatcher::new();
Box::new(dispatcher)
}
pub struct ConcreteSender<T> {
inner: Box<dyn EventSender<T>>,
}
impl<T> ConcreteSender<T> {
pub fn new<S: EventSender<T>>(sender: S) -> Self {
Self {
inner: Box::new(sender),
}
}
}
impl<T: Event> EventSender<T> for ConcreteSender<T> {
fn send(&self, event: T) {
self.inner.send(event)
}
fn is_disconnected(&self) -> bool {
self.inner.is_disconnected()
}
}
impl<T: Event> AnyEventSender for ConcreteSender<T> {}

View file

@ -1,208 +0,0 @@
mod dispatcher;
pub use dispatcher::EventSender;
use std::{
any::{type_name, TypeId},
collections::HashMap,
error::Error,
fmt::Display,
};
use self::dispatcher::{
new_event_dispatcher, AnyEventDispatcher, AnyEventSender, ConcreteSender, EventDispatcher,
};
#[derive(Default, Debug, Clone, PartialEq, Eq)]
pub struct AlreadyIntercepted {
ty: &'static str,
}
impl Display for AlreadyIntercepted {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"Events of type {:?} have already been intercepted",
self.ty
)
}
}
impl Error for AlreadyIntercepted {}
/// Manages event broadcasting for different types of events.
/// Sending an event will send a clone of the event to all subscribed listeners.
///
/// The event listeners can be anything implementing `EventSender`. Implemented by `std::sync::mpsc::Sender`,
/// `flume::Sender`, `crossbeam_channel::Sender`.
///
/// # Example
/// ```
/// use ivy_base::Events;
/// use std::sync::mpsc;
/// let mut events = Events::new();
///
/// let (tx1, rx1) = mpsc::channel::<&'static str>();
/// events.subscribe(tx1);
///
/// let (tx2, rx2) = mpsc::channel::<&'static str>();
/// events.subscribe(tx2);
///
/// events.send("Hello");
///
/// if let Ok(e) = rx1.try_recv() {
/// println!("1 Received: {}", e);
/// }
///
/// if let Ok(e) = rx2.try_recv() {
/// println!("2 Received: {}", e);
/// }
/// ```
pub struct Events {
dispatchers: HashMap<TypeId, Box<dyn AnyEventDispatcher>>,
// A single receiver to intercept events
intercepts: HashMap<TypeId, Box<dyn AnyEventSender>>,
}
impl Events {
pub fn new() -> Events {
Self {
dispatchers: HashMap::new(),
intercepts: HashMap::new(),
}
}
/// Returns the internal dispatcher for the specified event type.
pub fn dispatcher<T: Event>(&self) -> Option<&EventDispatcher<T>> {
self.dispatchers.get(&TypeId::of::<T>()).map(|val| {
val.downcast_ref::<EventDispatcher<T>>()
.expect("Failed to downcast")
})
}
/// Returns the internal dispatcher for the specified event type.
pub fn dispatcher_mut<T: Event + Clone>(&mut self) -> &mut EventDispatcher<T> {
self.dispatchers
.entry(TypeId::of::<T>())
.or_insert_with(new_event_dispatcher::<T>)
.downcast_mut::<EventDispatcher<T>>()
.expect("Failed to downcast")
}
/// Sends an event of type `T` to all subscribed listeners.
/// If no dispatcher exists for event `T`, a new one will be created.
pub fn send<T: Event + Clone>(&self, event: T) {
if let Some(intercept) = self.intercepts.get(&TypeId::of::<T>()) {
intercept
.downcast_ref::<ConcreteSender<T>>()
.unwrap()
.send(event);
} else if let Some(dispatcher) = self.dispatcher() {
dispatcher.send(event)
}
}
/// Send an event after intercept, this function avoids intercepts.
/// It can also be useful if the message is not supposed to be intercepted
pub fn intercepted_send<T: Event + Clone>(&self, event: T) {
if let Some(dispatcher) = self.dispatcher() {
dispatcher.send(event)
}
}
/// Intercept an event before it is broadcasted. Use
/// `Events::intercepted_send` to send.
pub fn intercept<T: Event, S: EventSender<T>>(
&mut self,
sender: S,
) -> Result<(), AlreadyIntercepted> {
match self.intercepts.entry(TypeId::of::<T>()) {
std::collections::hash_map::Entry::Occupied(_) => Err(AlreadyIntercepted {
ty: type_name::<T>(),
}),
std::collections::hash_map::Entry::Vacant(entry) => {
entry.insert(Box::new(ConcreteSender::new(sender)));
Ok(())
}
}
}
/// Shorthand to subscribe using a flume channel.
pub fn subscribe<T: Event + Clone>(&mut self) -> flume::Receiver<T> {
let (tx, rx) = flume::unbounded();
self.dispatcher_mut().subscribe(tx, |_| true);
dbg!(self.dispatchers.len());
rx
}
/// Subscribes to an event of type T by sending events to the provided
/// channel
pub fn subscribe_custom<S, T: Event>(&mut self, sender: S)
where
S: 'static + EventSender<T> + Send,
{
self.dispatcher_mut().subscribe(sender, |_| true)
}
/// Subscribes to an event of type T by sending events to teh provided
/// channel
pub fn subscribe_filter<S, T: Event + Clone>(&mut self, sender: S, filter: fn(&T) -> bool)
where
S: EventSender<T>,
{
self.dispatcher_mut().subscribe(sender, filter)
}
/// Blocks all events of a certain type. All events sent will be silently
/// ignored.
pub fn block<T: Event + Clone>(&mut self, block: bool) {
self.dispatcher_mut::<T>().blocked = block
}
/// Return true if events of type T are blocked
pub fn is_blocked<T: Event + Clone>(&mut self) -> bool {
self.dispatcher_mut::<T>().blocked
}
/// Remove disconnected subscribers
pub fn cleanup(&mut self) {
for (_, dispatcher) in self.dispatchers.iter_mut() {
dispatcher.cleanup()
}
}
}
impl Default for Events {
fn default() -> Self {
Self::new()
}
}
// Blanket type for events.
pub trait Event: Send + Sync + 'static + Clone {}
impl<T: Send + Sync + 'static + Clone> Event for T {}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn event_broadcast() {
let mut events = Events::new();
let (tx1, rx1) = flume::unbounded::<&'static str>();
events.subscribe_custom(tx1);
let (tx2, rx2) = flume::unbounded::<&'static str>();
events.subscribe_custom(tx2);
events.send("Hello");
if let Ok(e) = rx1.try_recv() {
assert_eq!(e, "Hello")
}
if let Ok(e) = rx2.try_recv() {
assert_eq!(e, "Hello")
}
}
}

View file

@ -1,2 +0,0 @@
pub mod events;
// pub mod render;

View file

@ -1 +0,0 @@

View file

@ -1,112 +0,0 @@
use app::App;
use config::ConfigModule;
use tokio::runtime::Builder;
use vulkano_util::{
context::{VulkanoConfig, VulkanoContext},
renderer::VulkanoWindowRenderer,
window::{VulkanoWindows, WindowDescriptor},
};
use winit::{
event::{Event, WindowEvent},
event_loop::{ControlFlow, EventLoopBuilder},
};
mod app;
mod config;
mod core;
mod module;
fn main() {
let event_loop = EventLoopBuilder::new().build().unwrap();
let context = VulkanoContext::new(VulkanoConfig::default());
let mut windows = VulkanoWindows::default();
let runtime = Builder::new_multi_thread().enable_all().build().unwrap();
let (event_tx, event_rx) = flume::unbounded();
runtime.block_on(async {
runtime.spawn(async move {
loop {
let _event = event_rx.recv_async().await.unwrap();
// println!(
// "Tokio got event: {:?} on thread: {:?}",
// event,
// std::thread::current().id()
// );
std::thread::sleep(std::time::Duration::from_secs(1));
}
});
});
let _id = windows.create_window(
&event_loop,
&context,
&WindowDescriptor {
title: "ztest".into(),
present_mode: vulkano::swapchain::PresentMode::Fifo,
..Default::default()
},
|_| {},
);
let primary_window_renderer = windows.get_primary_renderer_mut().unwrap();
let _gfx_queue = context.graphics_queue();
let mut app = App::new();
app.push_module(ConfigModule::new);
event_loop
.run(move |event, elwt| {
elwt.set_control_flow(ControlFlow::Poll);
if process_event(primary_window_renderer, &event, &mut app) {
elwt.exit();
}
event_tx.send(event.clone()).unwrap();
})
.unwrap();
}
pub fn process_event(
renderer: &mut VulkanoWindowRenderer,
event: &Event<()>,
app: &mut App,
) -> bool {
match &event {
Event::WindowEvent {
event: WindowEvent::CloseRequested,
..
} => {
return true;
}
Event::WindowEvent {
event: WindowEvent::Resized(..) | WindowEvent::ScaleFactorChanged { .. },
..
} => renderer.resize(),
Event::WindowEvent {
event: WindowEvent::RedrawRequested,
..
} => 'redraw: {
app.run().unwrap();
// Tasks for redrawing:
// 1. Update state based on events
// 2. Compute & Render
// 3. Reset input state
// 4. Update time & title
// The rendering part goes here:
match renderer.window_size() {
[w, h] => {
// Skip this frame when minimized.
if w == 0.0 || h == 0.0 {
break 'redraw;
}
}
}
}
Event::AboutToWait => renderer.window().request_redraw(),
_ => (),
}
false
}

View file

@ -1,64 +0,0 @@
use std::time::Duration;
use anyhow::Result;
use flax::World;
use crate::core::events::Events;
pub trait Module {
fn on_update(&mut self, world: &mut World, events: &mut Events, frame_time: Duration) -> Result<()>;
}
pub struct ModulesStack {
modules: Vec<Box<dyn Module>>,
}
impl ModulesStack {
pub fn new() -> Self {
Self { modules: Vec::new() }
}
pub fn iter(&self) -> std::slice::Iter<Box<dyn Module>> {
self.modules.iter()
}
pub fn iter_mut(&mut self) -> std::slice::IterMut<Box<dyn Module>> {
self.modules.iter_mut()
}
pub fn push<T: 'static + Module>(&mut self, layer: T) {
let layer = Box::new(layer);
self.modules.push(layer);
}
pub fn insert<T: 'static + Module>(&mut self, index: usize, layer: T) {
let layer = Box::new(layer);
self.modules.insert(index, layer);
}
}
impl Default for ModulesStack {
fn default() -> Self {
Self::new()
}
}
impl<'a> IntoIterator for &'a ModulesStack {
type Item = &'a Box<dyn Module>;
type IntoIter = std::slice::Iter<'a, Box<dyn Module>>;
fn into_iter(self) -> Self::IntoIter {
self.iter()
}
}
impl<'a> IntoIterator for &'a mut ModulesStack {
type Item = &'a mut Box<dyn Module>;
type IntoIter = std::slice::IterMut<'a, Box<dyn Module>>;
fn into_iter(self) -> Self::IntoIter {
self.iter_mut()
}
}

View file

@ -1,19 +0,0 @@
use vulkano::device::DeviceFeatures;
use vulkano_util::context::{VulkanoConfig, VulkanoContext};
pub fn make_render_config() -> VulkanoConfig {
let device_features: DeviceFeatures = DeviceFeatures {
dynamic_rendering: true,
..DeviceFeatures::empty()
};
VulkanoConfig {
device_features,
print_device_name: true,
..Default::default()
}
}
pub fn make_render_context() -> VulkanoContext {
VulkanoContext::new(make_render_config())
}

View file

@ -1,694 +0,0 @@
use std::{collections::HashMap, sync::Arc};
use super::components::EntityWindow;
use specs::prelude::*;
use vulkano::{
buffer::{Buffer, BufferContents, BufferCreateInfo, BufferUsage, Subbuffer},
command_buffer::{
allocator::StandardCommandBufferAllocator, CommandBufferBeginInfo, CommandBufferLevel,
CommandBufferUsage, RecordingCommandBuffer, RenderingAttachmentInfo, RenderingInfo,
},
device::{
physical::PhysicalDeviceType, Device, DeviceCreateInfo, DeviceExtensions, DeviceFeatures,
Queue, QueueCreateInfo, QueueFlags,
},
image::{view::ImageView, Image, ImageUsage},
instance::{Instance, InstanceCreateFlags, InstanceCreateInfo},
memory::allocator::{AllocationCreateInfo, MemoryTypeFilter, StandardMemoryAllocator},
pipeline::{
graphics::{
color_blend::{ColorBlendAttachmentState, ColorBlendState},
input_assembly::InputAssemblyState,
multisample::MultisampleState,
rasterization::RasterizationState,
subpass::PipelineRenderingCreateInfo,
vertex_input::{Vertex, VertexDefinition},
viewport::{Viewport, ViewportState},
GraphicsPipelineCreateInfo,
},
layout::PipelineDescriptorSetLayoutCreateInfo,
DynamicState, GraphicsPipeline, PipelineLayout, PipelineShaderStageCreateInfo,
},
render_pass::{AttachmentLoadOp, AttachmentStoreOp},
swapchain::{
acquire_next_image, Surface, Swapchain, SwapchainCreateInfo, SwapchainPresentInfo,
},
sync::{self, GpuFuture},
Validated, Version, VulkanError, VulkanLibrary,
};
use winit::window::{Window, WindowId};
pub struct Render {
renderers: HashMap<WindowId, VkRender>,
library: Arc<VulkanLibrary>,
}
impl<'a> System<'a> for Render {
type SystemData = (Entities<'a>, ReadStorage<'a, EntityWindow>);
fn run(&mut self, data: Self::SystemData) {
let (entities, windows) = data;
(&entities, &windows).join().for_each(|(_entity, window)| {
self.renderers
.entry(window.window.id())
.or_insert_with(|| VkRender::new(self.library.clone(), window.window.clone()));
self.renderers.values_mut().for_each(|rend| rend.render());
window.window.request_redraw();
});
}
fn setup(&mut self, world: &mut World) {
Self::SystemData::setup(world);
}
}
impl Default for Render {
fn default() -> Self {
Self {
renderers: HashMap::new(),
library: VulkanLibrary::new().unwrap(),
}
}
}
struct VkRender {
window: Arc<Window>,
device: Arc<Device>,
queue: Arc<Queue>,
command_buffer_allocator: Arc<StandardCommandBufferAllocator>,
viewport: Viewport,
vertex_buffer: Subbuffer<[MyVertex]>,
recreate_swapchain: bool,
swapchain: Arc<Swapchain>,
previous_frame_end: Option<Box<dyn GpuFuture>>,
attachment_image_views: Vec<Arc<ImageView>>,
pipeline: Arc<GraphicsPipeline>,
}
impl VkRender {
pub fn new(library: Arc<VulkanLibrary>, window: Arc<Window>) -> Self {
println!("Created new renderer for window: {:?}", window.id());
let required_extensions = Surface::required_extensions(&window).unwrap();
// Now creating the instance.
let instance = Instance::new(
library,
InstanceCreateInfo {
// Enable enumerating devices that use non-conformant Vulkan implementations.
// (e.g. MoltenVK)
flags: InstanceCreateFlags::ENUMERATE_PORTABILITY,
enabled_extensions: required_extensions,
..Default::default()
},
)
.unwrap();
let surface = Surface::from_window(instance.clone(), window.clone()).unwrap();
// Choose device extensions that we're going to use. In order to present images to a surface,
// we need a `Swapchain`, which is provided by the `khr_swapchain` extension.
let mut device_extensions = DeviceExtensions {
khr_swapchain: true,
..DeviceExtensions::empty()
};
// We then choose which physical device to use. First, we enumerate all the available physical
// devices, then apply filters to narrow them down to those that can support our needs.
let (physical_device, queue_family_index) = instance
.enumerate_physical_devices()
.unwrap()
.filter(|p| {
// For this example, we require at least Vulkan 1.3, or a device that has the
// `khr_dynamic_rendering` extension available.
p.api_version() >= Version::V1_3 || p.supported_extensions().khr_dynamic_rendering
})
.filter(|p| {
// Some devices may not support the extensions or features that your application, or
// report properties and limits that are not sufficient for your application. These
// should be filtered out here.
p.supported_extensions().contains(&device_extensions)
})
.filter_map(|p| {
// For each physical device, we try to find a suitable queue family that will execute
// our draw commands.
//
// Devices can provide multiple queues to run commands in parallel (for example a draw
// queue and a compute queue), similar to CPU threads. This is something you have to
// have to manage manually in Vulkan. Queues of the same type belong to the same queue
// family.
//
// Here, we look for a single queue family that is suitable for our purposes. In a
// real-world application, you may want to use a separate dedicated transfer queue to
// handle data transfers in parallel with graphics operations. You may also need a
// separate queue for compute operations, if your application uses those.
p.queue_family_properties()
.iter()
.enumerate()
.position(|(i, q)| {
// We select a queue family that supports graphics operations. When drawing to
// a window surface, as we do in this example, we also need to check that
// queues in this queue family are capable of presenting images to the surface.
q.queue_flags.intersects(QueueFlags::GRAPHICS)
&& p.surface_support(i as u32, &surface).unwrap_or(false)
})
// The code here searches for the first queue family that is suitable. If none is
// found, `None` is returned to `filter_map`, which disqualifies this physical
// device.
.map(|i| (p, i as u32))
})
// All the physical devices that pass the filters above are suitable for the application.
// However, not every device is equal, some are preferred over others. Now, we assign each
// physical device a score, and pick the device with the lowest ("best") score.
//
// In this example, we simply select the best-scoring device to use in the application.
// In a real-world setting, you may want to use the best-scoring device only as a "default"
// or "recommended" device, and let the user choose the device themself.
.min_by_key(|(p, _)| {
// We assign a lower score to device types that are likely to be faster/better.
match p.properties().device_type {
PhysicalDeviceType::DiscreteGpu => 0,
PhysicalDeviceType::IntegratedGpu => 1,
PhysicalDeviceType::VirtualGpu => 2,
PhysicalDeviceType::Cpu => 3,
PhysicalDeviceType::Other => 4,
_ => 5,
}
})
.expect("no suitable physical device found");
if physical_device.api_version() < Version::V1_3 {
device_extensions.khr_dynamic_rendering = true;
}
// Now initializing the device. This is probably the most important object of Vulkan.
//
// An iterator of created queues is returned by the function alongside the device.
let (device, mut queues) = Device::new(
// Which physical device to connect to.
physical_device,
DeviceCreateInfo {
// The list of queues that we are going to use. Here we only use one queue, from the
// previously chosen queue family.
queue_create_infos: vec![QueueCreateInfo {
queue_family_index,
..Default::default()
}],
// A list of optional features and extensions that our program needs to work correctly.
// Some parts of the Vulkan specs are optional and must be enabled manually at device
// creation. In this example the only things we are going to need are the
// `khr_swapchain` extension that allows us to draw to a window, and
// `khr_dynamic_rendering` if we don't have Vulkan 1.3 available.
enabled_extensions: device_extensions,
// In order to render with Vulkan 1.3's dynamic rendering, we need to enable it here.
// Otherwise, we are only allowed to render with a render pass object, as in the
// standard triangle example. The feature is required to be supported by the device if
// it supports Vulkan 1.3 and higher, or if the `khr_dynamic_rendering` extension is
// available, so we don't need to check for support.
enabled_features: DeviceFeatures {
dynamic_rendering: true,
..DeviceFeatures::empty()
},
..Default::default()
},
)
.unwrap();
let queue = queues.next().unwrap();
// Before we can draw on the surface, we have to create what is called a swapchain. Creating a
// swapchain allocates the color buffers that will contain the image that will ultimately be
// visible on the screen. These images are returned alongside the swapchain.
let (mut swapchain, images) = {
// Querying the capabilities of the surface. When we create the swapchain we can only pass
// values that are allowed by the capabilities.
let surface_capabilities = device
.physical_device()
.surface_capabilities(&surface, Default::default())
.unwrap();
// Choosing the internal format that the images will have.
let image_format = device
.physical_device()
.surface_formats(&surface, Default::default())
.unwrap()[0]
.0;
// Please take a look at the docs for the meaning of the parameters we didn't mention.
Swapchain::new(
device.clone(),
surface,
SwapchainCreateInfo {
// Some drivers report an `min_image_count` of 1, but fullscreen mode requires at
// least 2. Therefore we must ensure the count is at least 2, otherwise the program
// would crash when entering fullscreen mode on those drivers.
min_image_count: surface_capabilities.min_image_count.max(2),
image_format,
// The size of the window, only used to initially setup the swapchain.
//
// NOTE:
// On some drivers the swapchain extent is specified by
// `surface_capabilities.current_extent` and the swapchain size must use this
// extent. This extent is always the same as the window size.
//
// However, other drivers don't specify a value, i.e.
// `surface_capabilities.current_extent` is `None`. These drivers will allow
// anything, but the only sensible value is the window size.
//
// Both of these cases need the swapchain to use the window size, so we just
// use that.
image_extent: window.inner_size().into(),
image_usage: ImageUsage::COLOR_ATTACHMENT,
// The alpha mode indicates how the alpha value of the final image will behave. For
// example, you can choose whether the window will be opaque or transparent.
composite_alpha: surface_capabilities
.supported_composite_alpha
.into_iter()
.next()
.unwrap(),
..Default::default()
},
)
.unwrap()
};
let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device.clone()));
let vertices = [
MyVertex {
position: [-0.5, -0.25, 0.1],
},
MyVertex {
position: [0.0, 0.5, 0.1],
},
MyVertex {
position: [0.25, -0.1, 0.1],
},
];
let vertex_buffer = Buffer::from_iter(
memory_allocator,
BufferCreateInfo {
usage: BufferUsage::VERTEX_BUFFER,
..Default::default()
},
AllocationCreateInfo {
memory_type_filter: MemoryTypeFilter::PREFER_DEVICE
| MemoryTypeFilter::HOST_SEQUENTIAL_WRITE,
..Default::default()
},
vertices,
)
.unwrap();
mod vs {
vulkano_shaders::shader! {
ty: "vertex",
src: r"
#version 450
layout(location = 0) in vec3 position;
void main() {
gl_Position = vec4(position, 1.0);
}
",
}
}
mod fs {
vulkano_shaders::shader! {
ty: "fragment",
src: r"
#version 450
layout(location = 0) out vec4 f_color;
void main() {
f_color = vec4(1.0, 0.0, 0.0, 1.0);
}
",
}
}
// At this point, OpenGL initialization would be finished. However in Vulkan it is not. OpenGL
// implicitly does a lot of computation whenever you draw. In Vulkan, you have to do all this
// manually.
// Before we draw, we have to create what is called a **pipeline**. A pipeline describes how
// a GPU operation is to be performed. It is similar to an OpenGL program, but it also contains
// many settings for customization, all baked into a single object. For drawing, we create
// a **graphics** pipeline, but there are also other types of pipeline.
let pipeline = {
// First, we load the shaders that the pipeline will use:
// the vertex shader and the fragment shader.
//
// A Vulkan shader can in theory contain multiple entry points, so we have to specify which
// one.
let vs = vs::load(device.clone())
.unwrap()
.entry_point("main")
.unwrap();
let fs = fs::load(device.clone())
.unwrap()
.entry_point("main")
.unwrap();
// Automatically generate a vertex input state from the vertex shader's input interface,
// that takes a single vertex buffer containing `Vertex` structs.
let vertex_input_state = MyVertex::per_vertex().definition(&vs).unwrap();
// Make a list of the shader stages that the pipeline will have.
let stages = [
PipelineShaderStageCreateInfo::new(vs),
PipelineShaderStageCreateInfo::new(fs),
];
// We must now create a **pipeline layout** object, which describes the locations and types of
// descriptor sets and push constants used by the shaders in the pipeline.
//
// Multiple pipelines can share a common layout object, which is more efficient.
// The shaders in a pipeline must use a subset of the resources described in its pipeline
// layout, but the pipeline layout is allowed to contain resources that are not present in the
// shaders; they can be used by shaders in other pipelines that share the same layout.
// Thus, it is a good idea to design shaders so that many pipelines have common resource
// locations, which allows them to share pipeline layouts.
let layout = PipelineLayout::new(
device.clone(),
// Since we only have one pipeline in this example, and thus one pipeline layout,
// we automatically generate the creation info for it from the resources used in the
// shaders. In a real application, you would specify this information manually so that you
// can re-use one layout in multiple pipelines.
PipelineDescriptorSetLayoutCreateInfo::from_stages(&stages)
.into_pipeline_layout_create_info(device.clone())
.unwrap(),
)
.unwrap();
// We describe the formats of attachment images where the colors, depth and/or stencil
// information will be written. The pipeline will only be usable with this particular
// configuration of the attachment images.
let subpass = PipelineRenderingCreateInfo {
// We specify a single color attachment that will be rendered to. When we begin
// rendering, we will specify a swapchain image to be used as this attachment, so here
// we set its format to be the same format as the swapchain.
color_attachment_formats: vec![Some(swapchain.image_format())],
..Default::default()
};
// Finally, create the pipeline.
GraphicsPipeline::new(
device.clone(),
None,
GraphicsPipelineCreateInfo {
stages: stages.into_iter().collect(),
// How vertex data is read from the vertex buffers into the vertex shader.
vertex_input_state: Some(vertex_input_state),
// How vertices are arranged into primitive shapes.
// The default primitive shape is a triangle.
input_assembly_state: Some(InputAssemblyState::default()),
// How primitives are transformed and clipped to fit the framebuffer.
// We use a resizable viewport, set to draw over the entire window.
viewport_state: Some(ViewportState::default()),
// How polygons are culled and converted into a raster of pixels.
// The default value does not perform any culling.
rasterization_state: Some(RasterizationState::default()),
// How multiple fragment shader samples are converted to a single pixel value.
// The default value does not perform any multisampling.
multisample_state: Some(MultisampleState::default()),
// How pixel values are combined with the values already present in the framebuffer.
// The default value overwrites the old value with the new one, without any blending.
color_blend_state: Some(ColorBlendState::with_attachment_states(
subpass.color_attachment_formats.len() as u32,
ColorBlendAttachmentState::default(),
)),
// Dynamic states allows us to specify parts of the pipeline settings when
// recording the command buffer, before we perform drawing.
// Here, we specify that the viewport should be dynamic.
dynamic_state: [DynamicState::Viewport].into_iter().collect(),
subpass: Some(subpass.into()),
..GraphicsPipelineCreateInfo::layout(layout)
},
)
.unwrap()
};
// Dynamic viewports allow us to recreate just the viewport when the window is resized.
// Otherwise we would have to recreate the whole pipeline.
let mut viewport = Viewport {
offset: [0.0, 0.0],
extent: [0.0, 0.0],
depth_range: 0.0..=1.0,
};
// When creating the swapchain, we only created plain images. To use them as an attachment for
// rendering, we must wrap then in an image view.
//
// Since we need to draw to multiple images, we are going to create a different image view for
// each image.
let mut attachment_image_views = window_size_dependent_setup(&images, &mut viewport);
// Before we can start creating and recording command buffers, we need a way of allocating
// them. Vulkano provides a command buffer allocator, which manages raw Vulkan command pools
// underneath and provides a safe interface for them.
let command_buffer_allocator = Arc::new(StandardCommandBufferAllocator::new(
device.clone(),
Default::default(),
));
// Initialization is finally finished!
// In some situations, the swapchain will become invalid by itself. This includes for example
// when the window is resized (as the images of the swapchain will no longer match the
// window's) or, on Android, when the application went to the background and goes back to the
// foreground.
//
// In this situation, acquiring a swapchain image or presenting it will return an error.
// Rendering to an image of that swapchain will not produce any error, but may or may not work.
// To continue rendering, we need to recreate the swapchain by creating a new swapchain. Here,
// we remember that we need to do this for the next loop iteration.
let mut recreate_swapchain = false;
// In the loop below we are going to submit commands to the GPU. Submitting a command produces
// an object that implements the `GpuFuture` trait, which holds the resources for as long as
// they are in use by the GPU.
//
// Destroying the `GpuFuture` blocks until the GPU is finished executing it. In order to avoid
// that, we store the submission of the previous frame here.
let mut previous_frame_end = Some(sync::now(device.clone()).boxed());
Self {
window,
device,
queue,
command_buffer_allocator,
viewport,
vertex_buffer,
recreate_swapchain,
swapchain,
previous_frame_end,
attachment_image_views,
pipeline,
}
}
pub fn render(&mut self) {
// Do not draw the frame when the screen size is zero. On Windows, this can
// occur when minimizing the application.
let image_extent: [u32; 2] = self.window.inner_size().into();
if image_extent.contains(&0) {
return;
}
// It is important to call this function from time to time, otherwise resources
// will keep accumulating and you will eventually reach an out of memory error.
// Calling this function polls various fences in order to determine what the GPU
// has already processed, and frees the resources that are no longer needed.
self.previous_frame_end.as_mut().unwrap().cleanup_finished();
// Whenever the window resizes we need to recreate everything dependent on the
// window size. In this example that includes the swapchain, the framebuffers and
// the dynamic state viewport.
if self.recreate_swapchain {
let (new_swapchain, new_images) = self
.swapchain
.recreate(SwapchainCreateInfo {
image_extent,
..self.swapchain.create_info()
})
.expect("failed to recreate swapchain");
self.swapchain = new_swapchain;
// Now that we have new swapchain images, we must create new image views from
// them as well.
self.attachment_image_views =
window_size_dependent_setup(&new_images, &mut self.viewport);
self.recreate_swapchain = false;
}
// Before we can draw on the output, we have to *acquire* an image from the
// swapchain. If no image is available (which happens if you submit draw commands
// too quickly), then the function will block. This operation returns the index of
// the image that we are allowed to draw upon.
//
// This function can block if no image is available. The parameter is an optional
// timeout after which the function call will return an error.
let (image_index, suboptimal, acquire_future) =
match acquire_next_image(self.swapchain.clone(), None).map_err(Validated::unwrap) {
Ok(r) => r,
Err(VulkanError::OutOfDate) => {
self.recreate_swapchain = true;
return;
}
Err(e) => panic!("failed to acquire next image: {e}"),
};
// `acquire_next_image` can be successful, but suboptimal. This means that the
// swapchain image will still work, but it may not display correctly. With some
// drivers this can be when the window resizes, but it may not cause the swapchain
// to become out of date.
if suboptimal {
self.recreate_swapchain = true;
}
// In order to draw, we have to build a *command buffer*. The command buffer object
// holds the list of commands that are going to be executed.
//
// Building a command buffer is an expensive operation (usually a few hundred
// microseconds), but it is known to be a hot path in the driver and is expected to
// be optimized.
//
// Note that we have to pass a queue family when we create the command buffer. The
// command buffer will only be executable on that given queue family.
let mut builder = RecordingCommandBuffer::new(
self.command_buffer_allocator.clone(),
self.queue.queue_family_index(),
CommandBufferLevel::Primary,
CommandBufferBeginInfo {
usage: CommandBufferUsage::OneTimeSubmit,
..Default::default()
},
)
.unwrap();
builder
// Before we can draw, we have to *enter a render pass*. We specify which
// attachments we are going to use for rendering here, which needs to match
// what was previously specified when creating the pipeline.
.begin_rendering(RenderingInfo {
// As before, we specify one color attachment, but now we specify the image
// view to use as well as how it should be used.
color_attachments: vec![Some(RenderingAttachmentInfo {
// `Clear` means that we ask the GPU to clear the content of this
// attachment at the start of rendering.
load_op: AttachmentLoadOp::Clear,
// `Store` means that we ask the GPU to store the rendered output in
// the attachment image. We could also ask it to discard the result.
store_op: AttachmentStoreOp::Store,
// The value to clear the attachment with. Here we clear it with a blue
// color.
//
// Only attachments that have `AttachmentLoadOp::Clear` are provided
// with clear values, any others should use `None` as the clear value.
clear_value: Some([0.0, 0.0, 1.0, 1.0].into()),
..RenderingAttachmentInfo::image_view(
// We specify image view corresponding to the currently acquired
// swapchain image, to use for this attachment.
self.attachment_image_views[image_index as usize].clone(),
)
})],
..Default::default()
})
.unwrap()
// We are now inside the first subpass of the render pass.
//
// TODO: Document state setting and how it affects subsequent draw commands.
.set_viewport(0, [self.viewport.clone()].into_iter().collect())
.unwrap()
.bind_pipeline_graphics(self.pipeline.clone())
.unwrap()
.bind_vertex_buffers(0, self.vertex_buffer.clone())
.unwrap();
unsafe {
builder
// We add a draw command.
.draw(self.vertex_buffer.len() as u32, 1, 0, 0)
.unwrap();
}
builder
// We leave the render pass.
.end_rendering()
.unwrap();
// Finish recording the command buffer by calling `end`.
let command_buffer = builder.end().unwrap();
let future = self
.previous_frame_end
.take()
.unwrap()
.join(acquire_future)
.then_execute(self.queue.clone(), command_buffer)
.unwrap()
// The color output is now expected to contain our triangle. But in order to
// show it on the screen, we have to *present* the image by calling
// `then_swapchain_present`.
//
// This function does not actually present the image immediately. Instead it
// submits a present command at the end of the queue. This means that it will
// only be presented once the GPU has finished executing the command buffer
// that draws the triangle.
.then_swapchain_present(
self.queue.clone(),
SwapchainPresentInfo::swapchain_image_index(self.swapchain.clone(), image_index),
)
.then_signal_fence_and_flush();
match future.map_err(Validated::unwrap) {
Ok(future) => {
self.previous_frame_end = Some(future.boxed());
}
Err(VulkanError::OutOfDate) => {
self.recreate_swapchain = true;
self.previous_frame_end = Some(sync::now(self.device.clone()).boxed());
}
Err(e) => {
println!("failed to flush future: {e}");
self.previous_frame_end = Some(sync::now(self.device.clone()).boxed());
}
}
}
}
#[derive(BufferContents, Vertex)]
#[repr(C)]
struct MyVertex {
#[format(R32G32B32_SFLOAT)]
position: [f32; 3],
}
fn window_size_dependent_setup(
images: &[Arc<Image>],
viewport: &mut Viewport,
) -> Vec<Arc<ImageView>> {
let extent = images[0].extent();
viewport.extent = [extent[0] as f32, extent[1] as f32];
images
.iter()
.map(|image| ImageView::new_default(image.clone()).unwrap())
.collect::<Vec<_>>()
}