Reorganize project to workspace

This commit is contained in:
Tony Klink 2024-04-01 20:51:39 -06:00
parent 92c0278ef0
commit 960e2f8a37
Signed by: klink
GPG key ID: 85175567C4D19231
39 changed files with 4420 additions and 1189 deletions

1
src/NOTICE Normal file
View file

@ -0,0 +1 @@
Please use 'khors-test' as a main test bench

View file

@ -1,320 +0,0 @@
#![warn(dead_code)]
use std::collections::HashMap;
use crate::{
core::{
events::Events,
module::{Module, ModulesStack, RenderModule as ThreadLocalModule, RenderModulesStack},
},
modules::graphics::egui::{Gui, GuiConfig},
};
use anyhow::Result;
use flax::{Schedule, World};
use vulkano::device::DeviceFeatures;
use vulkano_util::{
context::{VulkanoConfig, VulkanoContext},
window::VulkanoWindows,
};
use winit::{
event::{Event, WindowEvent},
window::WindowId,
};
#[allow(dead_code)]
pub struct App {
name: String,
modules: ModulesStack,
thread_local_modules: RenderModulesStack,
world: World,
schedule: Schedule,
events: Events,
rx: flume::Receiver<AppEvent>,
running: bool,
event_cleanup_time: std::time::Duration,
vk_context: VulkanoContext,
vk_windows: VulkanoWindows,
guis: HashMap<WindowId, Gui>,
}
impl App {
pub fn new() -> Self {
let mut events = Events::new();
let (tx, rx) = flume::unbounded();
events.subscribe_custom(tx);
let schedule = Schedule::builder().build();
let vk_config = VulkanoConfig {
device_features: DeviceFeatures {
dynamic_rendering: true,
..Default::default()
},
..Default::default()
};
let vk_context = VulkanoContext::new(vk_config);
let vk_windows = VulkanoWindows::default();
Self {
name: "Khors".into(),
modules: ModulesStack::new(),
thread_local_modules: RenderModulesStack::new(),
world: World::new(),
schedule,
events,
rx,
running: false,
event_cleanup_time: std::time::Duration::from_secs(60),
vk_context,
vk_windows,
guis: HashMap::new(),
}
}
pub fn run(&mut self) -> Result<()> {
self.running = true;
self.schedule.execute_par(&mut self.world).unwrap();
let vk_context = &mut self.vk_context;
let vk_windows = &mut self.vk_windows;
let world = &mut self.world;
let events = &mut self.events;
let frame_time = std::time::Duration::from_millis(16);
let guis = &mut self.guis;
for module in self.modules.iter_mut() {
module.on_update(world, events, frame_time)?;
}
for module in self.thread_local_modules.iter_mut() {
module.on_update(guis, vk_context, vk_windows, world, events, frame_time)?;
}
self.handle_events();
Ok(())
}
pub fn create_window<T>(&mut self, event_loop: &winit::event_loop::EventLoopWindowTarget<T>)
where
T: Clone + Send + Sync,
{
let window = self.vk_windows.create_window(
event_loop,
&self.vk_context,
&vulkano_util::window::WindowDescriptor {
title: self.name.clone(),
present_mode: vulkano::swapchain::PresentMode::Mailbox,
..Default::default()
},
|_| {},
);
let renderer = self.vk_windows.get_renderer(window).unwrap();
let gui = Gui::new(
event_loop,
renderer.surface().clone(),
renderer.graphics_queue().clone(),
renderer.swapchain_format(),
GuiConfig {
is_overlay: true,
allow_srgb_render_target: false,
..Default::default()
},
);
self.guis.insert(window, gui);
}
pub fn process_event_loop<T>(
&mut self,
event: winit::event::Event<T>,
_elwt: &winit::event_loop::EventLoopWindowTarget<T>,
) -> Result<bool>
where
T: Clone + Send + Sync,
{
match &event {
Event::WindowEvent {
event: WindowEvent::CloseRequested,
..
} => {
return Ok(true);
}
Event::WindowEvent {
event: WindowEvent::Focused(_),
..
} => self.events().send(event.clone()),
Event::WindowEvent {
event: WindowEvent::Resized(..) | WindowEvent::ScaleFactorChanged { .. },
window_id,
} => self
.vk_windows
.get_renderer_mut(*window_id)
.unwrap()
.resize(),
Event::WindowEvent {
event: WindowEvent::RedrawRequested,
window_id,
} => 'redraw: {
// Tasks for redrawing:
// 1. Update state based on events
// 2. Compute & Render
// 3. Reset input state
// 4. Update time & title
// The rendering part goes here:
match self
.vk_windows
.get_renderer(*window_id)
.unwrap()
.window_size()
{
[w, h] => {
// Skip this frame when minimized.
if w == 0.0 || h == 0.0 {
break 'redraw;
}
}
}
self.run()?;
}
Event::WindowEvent { window_id, event } => {
let window = self.vk_windows.get_window(*window_id).unwrap();
let gui = self.guis.get_mut(window_id).unwrap();
gui.update(window, event);
}
Event::AboutToWait => {
self.vk_windows.iter().for_each(|(window_id, _)| {
self.vk_windows
.get_window(*window_id)
.unwrap()
.request_redraw()
});
}
_ => (),
}
Ok(false)
}
pub fn handle_events(&mut self) {
for event in self.rx.try_iter() {
match event {
AppEvent::Exit => self.running = false,
}
}
}
#[allow(dead_code)]
pub fn set_schedule(&mut self, schedule: Schedule) {
self.schedule = schedule;
}
#[allow(dead_code)]
pub fn world(&self) -> &World {
&self.world
}
#[allow(dead_code)]
pub fn world_mut(&mut self) -> &mut World {
&mut self.world
}
pub fn events(&self) -> &Events {
&self.events
}
#[allow(dead_code)]
pub fn events_mut(&mut self) -> &mut Events {
&mut self.events
}
/// Pushes a module from the provided init closure to to the top of the layer stack. The provided
/// closure to construct the layer takes in the world and events.
pub fn push_render_module<F, T>(&mut self, func: F)
where
F: FnOnce(
&mut VulkanoContext,
&mut VulkanoWindows,
&mut Schedule,
&mut World,
&mut Events,
) -> T,
T: 'static + ThreadLocalModule,
{
let module = func(
&mut self.vk_context,
&mut self.vk_windows,
&mut self.schedule,
&mut self.world,
&mut self.events,
);
self.thread_local_modules.push(module);
}
/// Pushes a layer from the provided init closure to to the top of the layer stack. The provided
/// closure to construct the layer takes in the world and events.
pub fn push_module<F, T>(&mut self, func: F)
where
F: FnOnce(&mut Schedule, &mut World, &mut Events) -> T,
T: 'static + Module,
{
let module = func(&mut self.schedule, &mut self.world, &mut self.events);
self.modules.push(module);
}
/// Pushes a module from the provided init closure to to the top of the module stack. The provided
/// closure to construct the module takes in the world and events, and may return an error which
/// is propagated to the callee.
#[allow(dead_code)]
pub fn try_push_module<F, T, E>(&mut self, func: F) -> Result<(), E>
where
F: FnOnce(&mut World, &mut Events) -> Result<T, E>,
T: 'static + Module,
{
let module = func(&mut self.world, &mut self.events)?;
self.modules.push(module);
Ok(())
}
/// Inserts a module from the provided init closure to to the top of the module stack. The provided
/// closure to construct the module takes in the world and events.
#[allow(dead_code)]
pub fn insert_module<F, T>(&mut self, index: usize, func: F)
where
F: FnOnce(&mut World, &mut Events) -> T,
T: 'static + Module,
{
let module = func(&mut self.world, &mut self.events);
self.modules.insert(index, module);
}
/// Pushes a module from the provided init closure to to the top of the module stack. The provided
/// closure to construct the module takes in the world and events, and may return an error which
/// is propagated to the callee.
#[allow(dead_code)]
pub fn try_insert_module<F, T, E>(&mut self, index: usize, func: F) -> Result<(), E>
where
F: FnOnce(&mut World, &mut Events) -> Result<T, E>,
T: 'static + Module,
{
let module = func(&mut self.world, &mut self.events)?;
self.modules.insert(index, module);
Ok(())
}
}
#[derive(Debug, Clone, Copy, PartialEq)]
#[allow(dead_code)]
pub enum AppEvent {
Exit,
}
impl Default for App {
fn default() -> Self {
Self::new()
}
}

View file

@ -1,50 +0,0 @@
use std::collections::HashMap;
use vulkano_util::renderer::VulkanoWindowRenderer;
use winit::{event_loop::EventLoopWindowTarget, window::WindowId};
use crate::modules::graphics::egui::{Gui, GuiConfig};
#[derive(Default)]
pub struct DebugGuiStack {
guis: HashMap<WindowId, Gui>,
}
impl DebugGuiStack {
pub fn add_gui<T>(
&mut self,
window_id: WindowId,
event_loop: &EventLoopWindowTarget<T>,
renderer: &VulkanoWindowRenderer,
is_overlay: bool,
allow_srgb_render_target: bool,
) where
T: Clone + Send + Sync,
{
let gui = Gui::new(
event_loop,
renderer.surface().clone(),
renderer.graphics_queue().clone(),
renderer.swapchain_format(),
GuiConfig {
is_overlay,
allow_srgb_render_target,
..Default::default()
},
);
self.guis.insert(window_id, gui);
}
pub fn remove_gui(&mut self, window_id: WindowId) {
self.guis.remove(&window_id).unwrap();
}
pub fn get(&mut self, window_id: WindowId) -> Option<&Gui> {
self.guis.get(&window_id)
}
pub fn get_mut(&mut self, window_id: WindowId) -> Option<&mut Gui> {
self.guis.get_mut(&window_id)
}
}

View file

@ -1,183 +0,0 @@
use std::sync::mpsc;
use downcast_rs::{impl_downcast, Downcast};
use parking_lot::Mutex;
use super::Event;
pub trait AnyEventDispatcher: 'static + Send + Sync + Downcast {
fn cleanup(&mut self);
}
impl_downcast!(AnyEventDispatcher);
pub trait AnyEventSender: 'static + Send + Sync + Downcast {}
impl_downcast!(AnyEventSender);
/// Handles event dispatching for a single type of event
pub struct EventDispatcher<T: Event> {
subscribers: Vec<Subscriber<T>>,
pub blocked: bool,
}
impl<T> Default for EventDispatcher<T>
where
T: Event + Clone,
{
fn default() -> Self {
EventDispatcher::new()
}
}
impl<T> EventDispatcher<T>
where
T: Event + Clone,
{
pub fn new() -> Self {
Self {
subscribers: Vec::new(),
blocked: false,
}
}
/// Sends an event to all subscribed subscriber. Event is cloned for each registered subscriber. Requires mutable access to cleanup no longer active subscribers.
pub fn send(&self, event: T) {
if self.blocked {
return;
}
for subscriber in &self.subscribers {
if (subscriber.filter)(&event) {
subscriber.send(event.clone());
}
}
}
/// Subscribes to events using sender to send events. The subscriber is automatically cleaned
/// up when the receiving end is dropped.
pub fn subscribe<S>(&mut self, sender: S, filter: fn(&T) -> bool)
where
S: 'static + EventSender<T> + Send,
{
self.subscribers.push(Subscriber::new(sender, filter));
}
}
impl<T: Event> AnyEventDispatcher for EventDispatcher<T> {
fn cleanup(&mut self) {
self.subscribers.retain(|val| !val.sender.is_disconnected())
}
}
struct Subscriber<T> {
sender: Box<dyn EventSender<T> + Send>,
filter: fn(&T) -> bool,
}
impl<T: Event> Subscriber<T> {
pub fn new<S>(sender: S, filter: fn(&T) -> bool) -> Self
where
S: 'static + EventSender<T> + Send,
{
Self {
sender: Box::new(sender),
filter,
}
}
pub fn send(&self, event: T) {
self.sender.send(event)
}
}
/// Describes a type which can send events. Implemented for mpsc::channel and crossbeam channel.
pub trait EventSender<T>: 'static + Send + Sync {
/// Send an event
fn send(&self, event: T);
/// Returns true if the sender has been disconnected
fn is_disconnected(&self) -> bool;
}
/// Wrapper for thread safe sender
pub struct MpscSender<T> {
inner: Mutex<(bool, mpsc::Sender<T>)>,
}
impl<T> From<mpsc::Sender<T>> for MpscSender<T> {
fn from(val: mpsc::Sender<T>) -> Self {
Self::new(val)
}
}
impl<T> MpscSender<T> {
pub fn new(inner: mpsc::Sender<T>) -> Self {
Self {
inner: Mutex::new((false, inner)),
}
}
}
impl<T: Event> EventSender<T> for MpscSender<T> {
fn send(&self, event: T) {
let mut inner = self.inner.lock();
match inner.1.send(event) {
Ok(_) => {}
Err(_) => inner.0 = true,
}
}
fn is_disconnected(&self) -> bool {
// TODO
self.inner.lock().0
// self.inner.is_disconnected()
}
}
#[cfg(feature = "crossbeam-channel")]
impl<T: Event> EventSender<T> for crossbeam_channel::Sender<T> {
fn send(&self, event: T) -> bool {
let _ = self.send(event);
}
fn is_disconnected(&self) -> bool {
self.is_disconnected
}
}
impl<T: Event> EventSender<T> for flume::Sender<T> {
fn send(&self, event: T) {
let _ = self.send(event);
}
fn is_disconnected(&self) -> bool {
self.is_disconnected()
}
}
pub fn new_event_dispatcher<T: Event + Clone>() -> Box<dyn AnyEventDispatcher> {
let dispatcher: EventDispatcher<T> = EventDispatcher::new();
Box::new(dispatcher)
}
pub struct ConcreteSender<T> {
inner: Box<dyn EventSender<T>>,
}
impl<T> ConcreteSender<T> {
pub fn new<S: EventSender<T>>(sender: S) -> Self {
Self {
inner: Box::new(sender),
}
}
}
impl<T: Event> EventSender<T> for ConcreteSender<T> {
fn send(&self, event: T) {
self.inner.send(event)
}
fn is_disconnected(&self) -> bool {
self.inner.is_disconnected()
}
}
impl<T: Event> AnyEventSender for ConcreteSender<T> {}

View file

@ -1,208 +0,0 @@
mod dispatcher;
pub use dispatcher::EventSender;
use std::{
any::{type_name, TypeId},
collections::HashMap,
error::Error,
fmt::Display,
};
use self::dispatcher::{
new_event_dispatcher, AnyEventDispatcher, AnyEventSender, ConcreteSender, EventDispatcher,
};
#[derive(Default, Debug, Clone, PartialEq, Eq)]
pub struct AlreadyIntercepted {
ty: &'static str,
}
impl Display for AlreadyIntercepted {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"Events of type {:?} have already been intercepted",
self.ty
)
}
}
impl Error for AlreadyIntercepted {}
/// Manages event broadcasting for different types of events.
/// Sending an event will send a clone of the event to all subscribed listeners.
///
/// The event listeners can be anything implementing `EventSender`. Implemented by `std::sync::mpsc::Sender`,
/// `flume::Sender`, `crossbeam_channel::Sender`.
///
/// # Example
/// ```
/// use ivy_base::Events;
/// use std::sync::mpsc;
/// let mut events = Events::new();
///
/// let (tx1, rx1) = mpsc::channel::<&'static str>();
/// events.subscribe(tx1);
///
/// let (tx2, rx2) = mpsc::channel::<&'static str>();
/// events.subscribe(tx2);
///
/// events.send("Hello");
///
/// if let Ok(e) = rx1.try_recv() {
/// println!("1 Received: {}", e);
/// }
///
/// if let Ok(e) = rx2.try_recv() {
/// println!("2 Received: {}", e);
/// }
/// ```
pub struct Events {
dispatchers: HashMap<TypeId, Box<dyn AnyEventDispatcher>>,
// A single receiver to intercept events
intercepts: HashMap<TypeId, Box<dyn AnyEventSender>>,
}
impl Events {
pub fn new() -> Events {
Self {
dispatchers: HashMap::new(),
intercepts: HashMap::new(),
}
}
/// Returns the internal dispatcher for the specified event type.
pub fn dispatcher<T: Event>(&self) -> Option<&EventDispatcher<T>> {
self.dispatchers.get(&TypeId::of::<T>()).map(|val| {
val.downcast_ref::<EventDispatcher<T>>()
.expect("Failed to downcast")
})
}
/// Returns the internal dispatcher for the specified event type.
pub fn dispatcher_mut<T: Event + Clone>(&mut self) -> &mut EventDispatcher<T> {
self.dispatchers
.entry(TypeId::of::<T>())
.or_insert_with(new_event_dispatcher::<T>)
.downcast_mut::<EventDispatcher<T>>()
.expect("Failed to downcast")
}
/// Sends an event of type `T` to all subscribed listeners.
/// If no dispatcher exists for event `T`, a new one will be created.
pub fn send<T: Event + Clone>(&self, event: T) {
if let Some(intercept) = self.intercepts.get(&TypeId::of::<T>()) {
intercept
.downcast_ref::<ConcreteSender<T>>()
.unwrap()
.send(event);
} else if let Some(dispatcher) = self.dispatcher() {
dispatcher.send(event)
}
}
/// Send an event after intercept, this function avoids intercepts.
/// It can also be useful if the message is not supposed to be intercepted
pub fn intercepted_send<T: Event + Clone>(&self, event: T) {
if let Some(dispatcher) = self.dispatcher() {
dispatcher.send(event)
}
}
/// Intercept an event before it is broadcasted. Use
/// `Events::intercepted_send` to send.
pub fn intercept<T: Event, S: EventSender<T>>(
&mut self,
sender: S,
) -> Result<(), AlreadyIntercepted> {
match self.intercepts.entry(TypeId::of::<T>()) {
std::collections::hash_map::Entry::Occupied(_) => Err(AlreadyIntercepted {
ty: type_name::<T>(),
}),
std::collections::hash_map::Entry::Vacant(entry) => {
entry.insert(Box::new(ConcreteSender::new(sender)));
Ok(())
}
}
}
/// Shorthand to subscribe using a flume channel.
pub fn subscribe<T: Event + Clone>(&mut self) -> flume::Receiver<T> {
let (tx, rx) = flume::unbounded();
self.dispatcher_mut().subscribe(tx, |_| true);
dbg!(self.dispatchers.len());
rx
}
/// Subscribes to an event of type T by sending events to the provided
/// channel
pub fn subscribe_custom<S, T: Event>(&mut self, sender: S)
where
S: 'static + EventSender<T> + Send,
{
self.dispatcher_mut().subscribe(sender, |_| true)
}
/// Subscribes to an event of type T by sending events to teh provided
/// channel
pub fn subscribe_filter<S, T: Event + Clone>(&mut self, sender: S, filter: fn(&T) -> bool)
where
S: EventSender<T>,
{
self.dispatcher_mut().subscribe(sender, filter)
}
/// Blocks all events of a certain type. All events sent will be silently
/// ignored.
pub fn block<T: Event + Clone>(&mut self, block: bool) {
self.dispatcher_mut::<T>().blocked = block
}
/// Return true if events of type T are blocked
pub fn is_blocked<T: Event + Clone>(&mut self) -> bool {
self.dispatcher_mut::<T>().blocked
}
/// Remove disconnected subscribers
pub fn cleanup(&mut self) {
for (_, dispatcher) in self.dispatchers.iter_mut() {
dispatcher.cleanup()
}
}
}
impl Default for Events {
fn default() -> Self {
Self::new()
}
}
// Blanket type for events.
pub trait Event: Send + Sync + 'static + Clone {}
impl<T: Send + Sync + 'static + Clone> Event for T {}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn event_broadcast() {
let mut events = Events::new();
let (tx1, rx1) = flume::unbounded::<&'static str>();
events.subscribe_custom(tx1);
let (tx2, rx2) = flume::unbounded::<&'static str>();
events.subscribe_custom(tx2);
events.send("Hello");
if let Ok(e) = rx1.try_recv() {
assert_eq!(e, "Hello")
}
if let Ok(e) = rx2.try_recv() {
assert_eq!(e, "Hello")
}
}
}

View file

@ -1,4 +0,0 @@
pub mod events;
pub mod module;
pub mod time;
pub mod debug_gui;

View file

@ -1,126 +0,0 @@
use std::time::Duration;
use anyhow::Result;
use flax::World;
use crate::core::events::Events;
use super::debug_gui::DebugGuiStack;
pub trait Module {
fn on_update(&mut self, world: &mut World, events: &mut Events, frame_time: Duration) -> Result<()>;
}
pub struct ModulesStack {
modules: Vec<Box<dyn Module>>,
}
impl ModulesStack {
pub fn new() -> Self {
Self { modules: Vec::new() }
}
pub fn iter(&self) -> std::slice::Iter<Box<dyn Module>> {
self.modules.iter()
}
pub fn iter_mut(&mut self) -> std::slice::IterMut<Box<dyn Module>> {
self.modules.iter_mut()
}
pub fn push<T: 'static + Module>(&mut self, layer: T) {
let layer = Box::new(layer);
self.modules.push(layer);
}
pub fn insert<T: 'static + Module>(&mut self, index: usize, layer: T) {
let layer = Box::new(layer);
self.modules.insert(index, layer);
}
}
impl Default for ModulesStack {
fn default() -> Self {
Self::new()
}
}
impl<'a> IntoIterator for &'a ModulesStack {
type Item = &'a Box<dyn Module>;
type IntoIter = std::slice::Iter<'a, Box<dyn Module>>;
fn into_iter(self) -> Self::IntoIter {
self.iter()
}
}
impl<'a> IntoIterator for &'a mut ModulesStack {
type Item = &'a mut Box<dyn Module>;
type IntoIter = std::slice::IterMut<'a, Box<dyn Module>>;
fn into_iter(self) -> Self::IntoIter {
self.iter_mut()
}
}
// THREAD LOCAL STUFF
pub trait RenderModule {
fn on_update(&mut self, gui_stack: &mut DebugGuiStack, vk_context: &mut vulkano_util::context::VulkanoContext, vk_windows: &mut vulkano_util::window::VulkanoWindows, world: &mut World, events: &mut Events, frame_time: Duration) -> Result<()>;
}
pub struct RenderModulesStack {
modules: Vec<Box<dyn RenderModule>>,
}
impl RenderModulesStack {
pub fn new() -> Self {
Self { modules: Vec::new() }
}
pub fn iter(&self) -> std::slice::Iter<Box<dyn RenderModule>> {
self.modules.iter()
}
pub fn iter_mut(&mut self) -> std::slice::IterMut<Box<dyn RenderModule>> {
self.modules.iter_mut()
}
pub fn push<T: 'static + RenderModule>(&mut self, layer: T) {
let layer = Box::new(layer);
self.modules.push(layer);
}
pub fn insert<T: 'static + RenderModule>(&mut self, index: usize, layer: T) {
let layer = Box::new(layer);
self.modules.insert(index, layer);
}
}
impl Default for RenderModulesStack {
fn default() -> Self {
Self::new()
}
}
impl<'a> IntoIterator for &'a RenderModulesStack {
type Item = &'a Box<dyn RenderModule>;
type IntoIter = std::slice::Iter<'a, Box<dyn RenderModule>>;
fn into_iter(self) -> Self::IntoIter {
self.iter()
}
}
impl<'a> IntoIterator for &'a mut RenderModulesStack {
type Item = &'a mut Box<dyn RenderModule>;
type IntoIter = std::slice::IterMut<'a, Box<dyn RenderModule>>;
fn into_iter(self) -> Self::IntoIter {
self.iter_mut()
}
}

View file

@ -1,43 +0,0 @@
//! Provides time related functionality for Clocks.
use std::time::{Duration, Instant};
use flax::component;
component! {
pub clock: Clock,
}
/// Measures high precision time
#[allow(dead_code)]
pub struct Clock {
start: Instant,
}
#[allow(dead_code)]
impl Clock {
// Creates and starts a new clock
pub fn new() -> Self {
Clock {
start: Instant::now(),
}
}
// Returns the elapsed time
pub fn elapsed(&self) -> Duration {
Instant::now() - self.start
}
// Resets the clock and returns the elapsed time
pub fn reset(&mut self) -> Duration {
let elapsed = self.elapsed();
self.start = Instant::now();
elapsed
}
}
impl Default for Clock {
fn default() -> Self {
Self::new()
}
}

View file

@ -1,52 +0,0 @@
use anyhow::Result;
use app::App;
use modules::{config::ConfigModule, graphics::RenderModule, window::WindowModule};
use tokio::runtime::Builder;
use winit::event_loop::{ControlFlow, EventLoopBuilder};
mod app;
mod core;
mod modules;
fn main() -> Result<()> {
let event_loop = EventLoopBuilder::new().build()?;
let runtime = Builder::new_multi_thread().enable_all().build()?;
// let (event_tx, event_rx) = flume::unbounded();
runtime.block_on(async {
runtime.spawn(async move {
loop {
std::thread::sleep(std::time::Duration::from_secs(1));
// let _event = event_rx.recv_async().await.unwrap();
// println!(
// "Tokio got event: {:?} on thread: {:?}",
// event,
// std::thread::current().id()
// );
}
});
});
let mut app = App::new(); // TODO: Move renderer into App
app.create_window(&event_loop);
app.push_module(ConfigModule::new);
app.push_module(WindowModule::new);
app.push_render_module(RenderModule::new);
event_loop.run(move |event, elwt| {
elwt.set_control_flow(ControlFlow::Poll);
if app
.process_event_loop(event, elwt)
.expect("Execution failed")
{
elwt.exit();
}
// event_tx.send(event.clone()).unwrap();
})?;
Ok(())
}

View file

@ -1,9 +0,0 @@
use flax::component;
use super::Config;
component! {
pub config: Config,
pub notify_file_event: notify::Event,
pub resources,
}

View file

@ -1,54 +0,0 @@
use flax::{Schedule, World};
use serde::{Deserialize, Serialize};
use crate::core::module::Module;
use self::systems::first_read_config_system;
pub mod components;
pub mod systems;
#[derive(Serialize, Deserialize, Debug, PartialEq, Eq)]
pub struct Config {
pub asset_path: String,
}
#[allow(dead_code)]
pub struct ConfigModule {
// watcher: INotifyWatcher,
// watcher_rx: std::sync::mpsc::Receiver<Result<notify::Event, notify::Error>>,
}
impl ConfigModule {
pub fn new(
schedule: &mut Schedule,
_world: &mut World,
_events: &mut crate::core::events::Events,
) -> Self {
let schedule_r = Schedule::builder()
// .with_system(read_config_system())
.with_system(first_read_config_system())
.build();
schedule.append(schedule_r);
Self {
// schedule,
// watcher,
// watcher_rx: rx,
}
}
}
impl Module for ConfigModule {
fn on_update(
&mut self,
_world: &mut World,
_events: &mut crate::core::events::Events,
_frame_time: std::time::Duration,
) -> anyhow::Result<()> {
// println!("ConfigModule on_update");
Ok(())
}
}

View file

@ -1,53 +0,0 @@
use std::{fs, path::Path};
use flax::{BoxedSystem, CommandBuffer, EntityBorrow, Query, System};
use serde_lexpr::from_str;
use super::{components::{config, notify_file_event, resources}, Config};
#[allow(dead_code)]
pub fn read_config_system() -> BoxedSystem {
let query = Query::new(notify_file_event()).entity(resources());
System::builder()
.with_name("read_config")
.with_cmd_mut()
.with_query(query)
.build(|cmd: &mut CommandBuffer, mut _q: EntityBorrow<_>| {
// if let Ok(n_event) = q.get() {
// println!("here");
// if (n_event as &notify::Event).kind.is_modify() {
// println!("file modified: {:?}", (n_event as &notify::Event).paths);
cmd.set(resources(), config(), read_engine_config());
// }
// }
})
.boxed()
}
fn read_engine_config() -> Config {
let config_path = Path::new("engine_config.scm");
let config_file = fs::read_to_string(config_path).unwrap();
let config: Config = from_str::<Config>(&config_file).expect("Failed to parse config file");
config
}
pub fn first_read_config_system() -> BoxedSystem {
let query = Query::new(config().as_mut()).entity(resources());
System::builder()
.with_name("first_read_config")
.with_cmd_mut()
.with_query(query)
.build(|cmd: &mut CommandBuffer, mut q: EntityBorrow<_>| {
if let Ok(_config) = q.get() {
return;
} else {
println!("read_notify_events_system: config read");
cmd.set(resources(), config(), read_engine_config());
}
std::thread::sleep(std::time::Duration::from_secs(3));
})
.boxed()
}

View file

@ -1,321 +0,0 @@
// Copyright (c) 2021 Okko Hakola, 2024 Klink
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT
// license <LICENSE-MIT or https://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
use std::sync::Arc;
use egui::{ClippedPrimitive, TexturesDelta};
use egui_winit::winit::event_loop::EventLoopWindowTarget;
use vulkano::{
command_buffer::CommandBuffer, device::Queue, format::{Format, NumericFormat}, image::{sampler::SamplerCreateInfo, view::ImageView, SampleCount}, render_pass::Subpass, swapchain::Surface, sync::GpuFuture
};
use winit::window::Window;
use super::{
renderer::{RenderResources, Renderer},
utils::{immutable_texture_from_bytes, immutable_texture_from_file},
};
pub struct GuiConfig {
/// Allows supplying sRGB ImageViews as render targets instead of just UNORM ImageViews, defaults to false.
/// **Using sRGB will cause minor discoloration of UI elements** due to blending in linear color space and not
/// sRGB as Egui expects.
///
/// If you would like to visually compare between UNORM and sRGB render targets, run the `demo_app` example of
/// this crate.
pub allow_srgb_render_target: bool,
/// Whether to render gui as overlay. Only relevant in the case of `Gui::new`, not when using
/// subpass. Determines whether the pipeline should clear the target image.
pub is_overlay: bool,
/// Multisample count. Defaults to 1. If you use more than 1, you'll have to ensure your
/// pipeline and target image matches that.
pub samples: SampleCount,
}
impl Default for GuiConfig {
fn default() -> Self {
GuiConfig {
allow_srgb_render_target: false,
is_overlay: false,
samples: SampleCount::Sample1,
}
}
}
impl GuiConfig {
pub fn validate(&self, output_format: Format) {
if output_format.numeric_format_color().unwrap() == NumericFormat::SRGB {
assert!(
self.allow_srgb_render_target,
"Using an output format with sRGB requires `GuiConfig::allow_srgb_render_target` \
to be set! Egui prefers UNORM render targets. Using sRGB will cause minor \
discoloration of UI elements due to blending in linear color space and not sRGB \
as Egui expects."
);
}
}
}
pub struct Gui {
pub egui_ctx: egui::Context,
pub egui_winit: egui_winit::State,
renderer: Renderer,
surface: Arc<Surface>,
shapes: Vec<egui::epaint::ClippedShape>,
textures_delta: egui::TexturesDelta,
}
impl Gui {
/// Creates new Egui to Vulkano integration by setting the necessary parameters
/// This is to be called once we have access to vulkano_win's winit window surface
/// and gfx queue. Created with this, the renderer will own a render pass which is useful to e.g. place your render pass' images
/// onto egui windows
pub fn new<T>(
event_loop: &EventLoopWindowTarget<T>,
surface: Arc<Surface>,
gfx_queue: Arc<Queue>,
output_format: Format,
config: GuiConfig,
) -> Gui {
config.validate(output_format);
let renderer = Renderer::new_with_render_pass(
gfx_queue,
output_format,
config.is_overlay,
config.samples,
);
Self::new_internal(event_loop, surface, renderer)
}
/// Same as `new` but instead of integration owning a render pass, egui renders on your subpass
pub fn new_with_subpass<T>(
event_loop: &EventLoopWindowTarget<T>,
surface: Arc<Surface>,
gfx_queue: Arc<Queue>,
subpass: Subpass,
output_format: Format,
config: GuiConfig,
) -> Gui {
config.validate(output_format);
let renderer = Renderer::new_with_subpass(gfx_queue, output_format, subpass);
Self::new_internal(event_loop, surface, renderer)
}
/// Same as `new` but instead of integration owning a render pass, egui renders on your subpass
fn new_internal<T>(
event_loop: &EventLoopWindowTarget<T>,
surface: Arc<Surface>,
renderer: Renderer,
) -> Gui {
let max_texture_side = renderer
.queue()
.device()
.physical_device()
.properties()
.max_image_dimension2_d as usize;
let egui_ctx: egui::Context = Default::default();
let egui_winit = egui_winit::State::new(
egui_ctx.clone(),
egui_ctx.viewport_id(),
event_loop,
Some(surface_window(&surface).scale_factor() as f32),
Some(max_texture_side),
);
Gui {
egui_ctx,
egui_winit,
renderer,
surface,
shapes: vec![],
textures_delta: Default::default(),
}
}
/// Returns the pixels per point of the window of this gui.
fn pixels_per_point(&self) -> f32 {
egui_winit::pixels_per_point(&self.egui_ctx, surface_window(&self.surface))
}
/// Returns a set of resources used to construct the render pipeline. These can be reused
/// to create additional pipelines and buffers to be rendered in a `PaintCallback`.
pub fn render_resources(&self) -> RenderResources {
self.renderer.render_resources()
}
/// Updates context state by winit window event.
/// Returns `true` if egui wants exclusive use of this event
/// (e.g. a mouse click on an egui window, or entering text into a text field).
/// For instance, if you use egui for a game, you want to first call this
/// and only when this returns `false` pass on the events to your game.
///
/// Note that egui uses `tab` to move focus between elements, so this will always return `true` for tabs.
pub fn update(&mut self, window: &Window, winit_event: &winit::event::WindowEvent) -> bool {
self.egui_winit
.on_window_event(window, winit_event)
.consumed
}
/// Begins Egui frame & determines what will be drawn later. This must be called before draw, and after `update` (winit event).
pub fn immediate_ui(&mut self, layout_function: impl FnOnce(&mut Self)) {
let raw_input = self
.egui_winit
.take_egui_input(surface_window(&self.surface));
self.egui_ctx.begin_frame(raw_input);
// Render Egui
layout_function(self);
}
/// If you wish to better control when to begin frame, do so by calling this function
/// (Finish by drawing)
pub fn begin_frame(&mut self) {
let raw_input = self
.egui_winit
.take_egui_input(surface_window(&self.surface));
self.egui_ctx.begin_frame(raw_input);
}
/// Renders ui on `final_image` & Updates cursor icon
/// Finishes Egui frame
/// - `before_future` = Vulkano's GpuFuture
/// - `final_image` = Vulkano's image (render target)
pub fn draw_on_image<F>(
&mut self,
before_future: F,
final_image: Arc<ImageView>,
) -> Box<dyn GpuFuture>
where
F: GpuFuture + 'static,
{
if !self.renderer.has_renderpass() {
panic!(
"Gui integration has been created with subpass, use `draw_on_subpass_image` \
instead"
)
}
let (clipped_meshes, textures_delta) = self.extract_draw_data_at_frame_end();
self.renderer.draw_on_image(
&clipped_meshes,
&textures_delta,
self.pixels_per_point(),
before_future,
final_image,
)
}
/// Creates commands for rendering ui on subpass' image and returns the command buffer for execution on your side
/// - Finishes Egui frame
/// - You must execute the secondary command buffer yourself
pub fn draw_on_subpass_image(
&mut self,
image_dimensions: [u32; 2],
) -> Arc<CommandBuffer> {
if self.renderer.has_renderpass() {
panic!(
"Gui integration has been created with its own render pass, use `draw_on_image` \
instead"
)
}
let (clipped_meshes, textures_delta) = self.extract_draw_data_at_frame_end();
self.renderer.draw_on_subpass_image(
&clipped_meshes,
&textures_delta,
self.pixels_per_point(),
image_dimensions,
)
}
fn extract_draw_data_at_frame_end(&mut self) -> (Vec<ClippedPrimitive>, TexturesDelta) {
self.end_frame();
let shapes = std::mem::take(&mut self.shapes);
let textures_delta = std::mem::take(&mut self.textures_delta);
let clipped_meshes = self.egui_ctx.tessellate(shapes, self.pixels_per_point());
(clipped_meshes, textures_delta)
}
fn end_frame(&mut self) {
let egui::FullOutput {
platform_output,
textures_delta,
shapes,
pixels_per_point: _,
viewport_output: _,
} = self.egui_ctx.end_frame();
self.egui_winit.handle_platform_output(
surface_window(&self.surface),
platform_output,
);
self.shapes = shapes;
self.textures_delta = textures_delta;
}
/// Registers a user image from Vulkano image view to be used by egui
pub fn register_user_image_view(
&mut self,
image: Arc<ImageView>,
sampler_create_info: SamplerCreateInfo,
) -> egui::TextureId {
self.renderer.register_image(image, sampler_create_info)
}
/// Registers a user image to be used by egui
/// - `image_file_bytes`: e.g. include_bytes!("./assets/tree.png")
/// - `format`: e.g. vulkano::format::Format::R8G8B8A8Unorm
pub fn register_user_image(
&mut self,
image_file_bytes: &[u8],
format: vulkano::format::Format,
sampler_create_info: SamplerCreateInfo,
) -> egui::TextureId {
let image = immutable_texture_from_file(
self.renderer.allocators(),
self.renderer.queue(),
image_file_bytes,
format,
)
.expect("Failed to create image");
self.renderer.register_image(image, sampler_create_info)
}
pub fn register_user_image_from_bytes(
&mut self,
image_byte_data: &[u8],
dimensions: [u32; 2],
format: vulkano::format::Format,
sampler_create_info: SamplerCreateInfo,
) -> egui::TextureId {
let image = immutable_texture_from_bytes(
self.renderer.allocators(),
self.renderer.queue(),
image_byte_data,
dimensions,
format,
)
.expect("Failed to create image");
self.renderer.register_image(image, sampler_create_info)
}
/// Unregisters a user image
pub fn unregister_user_image(&mut self, texture_id: egui::TextureId) {
self.renderer.unregister_image(texture_id);
}
/// Access egui's context (which can be used to e.g. set fonts, visuals etc)
pub fn context(&self) -> egui::Context {
self.egui_ctx.clone()
}
}
// Helper to retrieve Window from surface object
fn surface_window(surface: &Surface) -> &Window {
surface.object().unwrap().downcast_ref::<Window>().unwrap()
}

View file

@ -1,19 +0,0 @@
// Copyright (c) 2021 Okko Hakola, 2024 Klink
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT
// license <LICENSE-MIT or https://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
mod integration;
mod renderer;
mod utils;
pub use egui;
pub use integration::*;
#[allow(unused_imports)]
pub use renderer::{CallbackContext, CallbackFn, RenderResources};
#[allow(unused_imports)]
pub use utils::{immutable_texture_from_bytes, immutable_texture_from_file};

File diff suppressed because it is too large Load diff

View file

@ -1,144 +0,0 @@
// Copyright (c) 2021 Okko Hakola, 2024 Klink
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or
// https://www.apache.org/licenses/LICENSE-2.0> or the MIT
// license <LICENSE-MIT or https://opensource.org/licenses/MIT>,
// at your option. All files in the project carrying such
// notice may not be copied, modified, or distributed except
// according to those terms.
use std::sync::Arc;
use image::RgbaImage;
use vulkano::{
buffer::{AllocateBufferError, Buffer, BufferCreateInfo, BufferUsage},
command_buffer::{
allocator::{StandardCommandBufferAllocator, StandardCommandBufferAllocatorCreateInfo}, CommandBufferBeginInfo, CommandBufferLevel, CommandBufferUsage, CopyBufferToImageInfo, RecordingCommandBuffer
},
descriptor_set::allocator::StandardDescriptorSetAllocator,
device::{Device, Queue},
image::{view::ImageView, AllocateImageError, Image, ImageCreateInfo, ImageType, ImageUsage},
memory::allocator::{AllocationCreateInfo, MemoryTypeFilter, StandardMemoryAllocator},
Validated, ValidationError, VulkanError,
};
#[derive(Debug)]
pub enum ImageCreationError {
Vulkan(Validated<VulkanError>),
AllocateImage(Validated<AllocateImageError>),
AllocateBuffer(Validated<AllocateBufferError>),
Validation(Box<ValidationError>),
}
pub fn immutable_texture_from_bytes(
allocators: &Allocators,
queue: Arc<Queue>,
byte_data: &[u8],
dimensions: [u32; 2],
format: vulkano::format::Format,
) -> Result<Arc<ImageView>, ImageCreationError> {
let mut cbb = RecordingCommandBuffer::new(
allocators.command_buffer.clone(),
queue.queue_family_index(),
CommandBufferLevel::Primary,
CommandBufferBeginInfo {
usage: CommandBufferUsage::OneTimeSubmit,
..Default::default()
}
)
.map_err(ImageCreationError::Vulkan)?;
let texture_data_buffer = Buffer::from_iter(
allocators.memory.clone(),
BufferCreateInfo {
usage: BufferUsage::TRANSFER_SRC,
..Default::default()
},
AllocationCreateInfo {
memory_type_filter: MemoryTypeFilter::PREFER_HOST
| MemoryTypeFilter::HOST_SEQUENTIAL_WRITE,
..Default::default()
},
byte_data.iter().cloned(),
)
.map_err(ImageCreationError::AllocateBuffer)?;
let texture = Image::new(
allocators.memory.clone(),
ImageCreateInfo {
image_type: ImageType::Dim2d,
format,
extent: [dimensions[0], dimensions[1], 1],
usage: ImageUsage::TRANSFER_DST | ImageUsage::SAMPLED,
..Default::default()
},
AllocationCreateInfo::default(),
)
.map_err(ImageCreationError::AllocateImage)?;
cbb.copy_buffer_to_image(CopyBufferToImageInfo::buffer_image(
texture_data_buffer,
texture.clone(),
))
.map_err(ImageCreationError::Validation)?;
let _fut = cbb.end().unwrap().execute(queue).unwrap();
Ok(ImageView::new_default(texture).unwrap())
}
pub fn immutable_texture_from_file(
allocators: &Allocators,
queue: Arc<Queue>,
file_bytes: &[u8],
format: vulkano::format::Format,
) -> Result<Arc<ImageView>, ImageCreationError> {
use image::GenericImageView;
let img = image::load_from_memory(file_bytes).expect("Failed to load image from bytes");
let rgba = if let Some(rgba) = img.as_rgba8() {
rgba.to_owned().to_vec()
} else {
// Convert rgb to rgba
let rgb = img.as_rgb8().unwrap().to_owned();
let mut raw_data = vec![];
for val in rgb.chunks(3) {
raw_data.push(val[0]);
raw_data.push(val[1]);
raw_data.push(val[2]);
raw_data.push(255);
}
let new_rgba = RgbaImage::from_raw(rgb.width(), rgb.height(), raw_data).unwrap();
new_rgba.to_vec()
};
let dimensions = img.dimensions();
immutable_texture_from_bytes(
allocators,
queue,
&rgba,
[dimensions.0, dimensions.1],
format,
)
}
pub struct Allocators {
pub memory: Arc<StandardMemoryAllocator>,
pub descriptor_set: Arc<StandardDescriptorSetAllocator>,
pub command_buffer: Arc<StandardCommandBufferAllocator>,
}
impl Allocators {
pub fn new_default(device: &Arc<Device>) -> Self {
Self {
memory: Arc::new(StandardMemoryAllocator::new_default(device.clone())),
descriptor_set: Arc::new(StandardDescriptorSetAllocator::new(device.clone(), Default::default())),
command_buffer: Arc::new(StandardCommandBufferAllocator::new(
device.clone(),
StandardCommandBufferAllocatorCreateInfo {
secondary_buffer_count: 32,
..Default::default()
},
)),
}
}
}

View file

@ -1,7 +0,0 @@
#[derive(Debug, Clone, Copy, PartialEq)]
#[allow(dead_code)]
pub enum GraphicsEvent {
/// Signifies that the swapchain was recreated. This requires images that
/// reference the old swapchain to be recreated.
SwapchainRecreation,
}

View file

@ -1,286 +0,0 @@
use std::sync::Arc;
use flax::{entity_ids, BoxedSystem, Query, QueryBorrow, Schedule, System, World};
use vulkano::{
command_buffer::{
allocator::{CommandBufferAllocator, StandardCommandBufferAllocator},
CommandBufferBeginInfo, CommandBufferLevel, CommandBufferUsage, RecordingCommandBuffer,
RenderingAttachmentInfo, RenderingInfo,
},
image::view::ImageView,
pipeline::graphics::viewport::Viewport,
render_pass::{AttachmentLoadOp, AttachmentStoreOp},
sync::GpuFuture,
};
use vulkano_util::{
context::VulkanoContext, renderer::VulkanoWindowRenderer, window::VulkanoWindows,
};
use crate::core::{debug_gui::DebugGuiStack, module::RenderModule as ThreadLocalModule};
use self::{egui::Gui, test_pipeline::test_pipeline};
pub mod egui;
pub mod events;
mod test_pipeline;
pub struct RenderModule {
schedule: Schedule,
command_buffer_allocator: Arc<dyn CommandBufferAllocator>,
viewport: Viewport,
}
impl RenderModule {
pub fn new(
vk_context: &mut VulkanoContext,
_vk_windows: &mut VulkanoWindows,
_schedule: &mut Schedule,
_world: &mut World,
_events: &mut crate::core::events::Events,
) -> Self {
let schedule = Schedule::builder()
.with_system(add_distance_system())
.build();
let command_buffer_allocator = Arc::new(StandardCommandBufferAllocator::new(
vk_context.device().clone(),
Default::default(),
));
let viewport = Viewport {
offset: [0.0, 0.0],
extent: [0.0, 0.0],
depth_range: 0.0..=1.0,
};
Self {
schedule,
command_buffer_allocator,
viewport,
}
}
}
impl ThreadLocalModule for RenderModule {
fn on_update(
&mut self,
gui_stack: &mut DebugGuiStack,
vk_context: &mut VulkanoContext,
vk_windows: &mut vulkano_util::window::VulkanoWindows,
world: &mut World,
_events: &mut crate::core::events::Events,
_frame_time: std::time::Duration,
) -> anyhow::Result<()> {
self.schedule.execute_seq(world).unwrap();
let viewport = &mut self.viewport;
for (window_id, renderer) in vk_windows.iter_mut() {
let gui = gui_stack.get_mut(*window_id).unwrap();
draw(
self.command_buffer_allocator.clone(),
viewport,
vk_context,
renderer,
gui,
);
}
Ok(())
}
}
pub fn add_distance_system() -> BoxedSystem {
let query = Query::new(entity_ids());
System::builder()
.with_query(query)
.build(|mut query: QueryBorrow<'_, flax::EntityIds, _>| {
for _id in &mut query {
// println!("----------: {}", _id.index());
}
})
.boxed()
}
fn draw(
command_buffer_allocator: Arc<dyn CommandBufferAllocator>,
viewport: &mut Viewport,
context: &mut VulkanoContext,
renderer: &mut VulkanoWindowRenderer,
gui: &mut Gui,
) {
let (vertex_buffer, pipeline) = test_pipeline(
context.device().clone(),
context.memory_allocator().clone(),
renderer.swapchain_format(),
);
// Do not draw the frame when the screen size is zero. On Windows, this can
// occur when minimizing the application.
let image_extent: [u32; 2] = renderer.window().inner_size().into();
if image_extent.contains(&0) {
return;
}
// Begin rendering by acquiring the gpu future from the window renderer.
let previous_frame_end = renderer
.acquire(|swapchain_images| {
// Whenever the window resizes we need to recreate everything dependent
// on the window size. In this example that
// includes the swapchain, the framebuffers
// and the dynamic state viewport.
window_size_dependent_setup(swapchain_images, viewport);
})
.unwrap();
let mut builder = RecordingCommandBuffer::new(
command_buffer_allocator.clone(),
context.graphics_queue().queue_family_index(),
CommandBufferLevel::Primary,
CommandBufferBeginInfo {
usage: CommandBufferUsage::OneTimeSubmit,
..Default::default()
},
)
.unwrap();
builder
// Before we can draw, we have to *enter a render pass*. We specify which
// attachments we are going to use for rendering here, which needs to match
// what was previously specified when creating the pipeline.
.begin_rendering(RenderingInfo {
// As before, we specify one color attachment, but now we specify the image
// view to use as well as how it should be used.
color_attachments: vec![Some(RenderingAttachmentInfo {
// `Clear` means that we ask the GPU to clear the content of this
// attachment at the start of rendering.
load_op: AttachmentLoadOp::Clear,
// `Store` means that we ask the GPU to store the rendered output in
// the attachment image. We could also ask it to discard the result.
store_op: AttachmentStoreOp::Store,
// The value to clear the attachment with. Here we clear it with a blue
// color.
//
// Only attachments that have `AttachmentLoadOp::Clear` are provided
// with clear values, any others should use `None` as the clear value.
clear_value: Some([0.0, 0.0, 1.0, 1.0].into()),
..RenderingAttachmentInfo::image_view(
// We specify image view corresponding to the currently acquired
// swapchain image, to use for this attachment.
// attachment_image_views[image_index as usize].clone(),
renderer.swapchain_image_view().clone(),
)
})],
..Default::default()
})
.unwrap()
// We are now inside the first subpass of the render pass.
//
// TODO: Document state setting and how it affects subsequent draw commands.
.set_viewport(0, [viewport.clone()].into_iter().collect())
.unwrap()
.bind_pipeline_graphics(pipeline.clone())
.unwrap()
.bind_vertex_buffers(0, vertex_buffer.clone())
.unwrap();
unsafe {
builder
// We add a draw command.
.draw(vertex_buffer.len() as u32, 1, 0, 0)
.unwrap();
}
builder
// We leave the render pass.
.end_rendering()
.unwrap();
// Finish recording the command buffer by calling `end`.
let command_buffer = builder.end().unwrap();
draw_gui(gui);
let before_future = previous_frame_end
.then_execute(context.graphics_queue().clone(), command_buffer)
.unwrap()
.boxed();
let after_future = gui
.draw_on_image(before_future, renderer.swapchain_image_view())
.boxed();
// The color output is now expected to contain our triangle. But in order to
// show it on the screen, we have to *present* the image by calling
// `present` on the window renderer.
//
// This function does not actually present the image immediately. Instead it
// submits a present command at the end of the queue. This means that it will
// only be presented once the GPU has finished executing the command buffer
// that draws the triangle.
renderer.present(after_future, true);
}
fn draw_gui(gui: &mut Gui) {
let mut code = CODE.to_owned();
gui.immediate_ui(|gui| {
let ctx = gui.context();
egui::egui::Window::new("Colors")
.vscroll(true)
.show(&ctx, |ui| {
ui.vertical_centered(|ui| {
ui.add(egui::egui::widgets::Label::new("Hi there!"));
sized_text(ui, "Rich Text", 32.0);
});
ui.separator();
ui.columns(2, |columns| {
egui::egui::ScrollArea::vertical().id_source("source").show(
&mut columns[0],
|ui| {
ui.add(
egui::egui::TextEdit::multiline(&mut code)
.font(egui::egui::TextStyle::Monospace),
);
},
);
egui::egui::ScrollArea::vertical()
.id_source("rendered")
.show(&mut columns[1], |ui| {
ui.add(egui::egui::widgets::Label::new("Good day!"));
});
});
});
});
}
fn sized_text(ui: &mut egui::egui::Ui, text: impl Into<String>, size: f32) {
ui.label(
egui::egui::RichText::new(text)
.size(size)
.family(::egui::FontFamily::Monospace),
);
}
const CODE: &str = r"
# Some markup
```
let mut gui = Gui::new(&event_loop, renderer.surface(), None, renderer.queue(), SampleCount::Sample1);
```
";
fn window_size_dependent_setup(
image_views: &[Arc<ImageView>],
viewport: &mut Viewport,
) -> Vec<Arc<ImageView>> {
let extent = image_views[0].image().extent();
viewport.extent = [extent[0] as f32, extent[1] as f32];
image_views
.iter()
.map(|image_view| {
let image = image_view.image().clone();
ImageView::new_default(image).unwrap()
})
.collect::<Vec<_>>()
}

View file

@ -1,197 +0,0 @@
use std::sync::Arc;
use vulkano::{
buffer::{Buffer, BufferContents, BufferCreateInfo, BufferUsage, Subbuffer},
device::Device,
format::Format,
memory::allocator::{AllocationCreateInfo, MemoryAllocator, MemoryTypeFilter},
pipeline::{
graphics::{
color_blend::{ColorBlendAttachmentState, ColorBlendState},
input_assembly::InputAssemblyState,
multisample::MultisampleState,
rasterization::RasterizationState,
subpass::PipelineRenderingCreateInfo,
vertex_input::{Vertex, VertexDefinition},
viewport::ViewportState,
GraphicsPipelineCreateInfo,
},
layout::PipelineDescriptorSetLayoutCreateInfo,
DynamicState, GraphicsPipeline, PipelineLayout, PipelineShaderStageCreateInfo,
},
};
pub fn test_pipeline(
device: Arc<Device>,
memory_allocator: Arc<dyn MemoryAllocator>,
image_format: Format,
) -> (Subbuffer<[MyVertex]>, Arc<GraphicsPipeline>) {
let vertices = [
MyVertex {
position: [-0.5, -0.25],
},
MyVertex {
position: [0.0, 0.5],
},
MyVertex {
position: [0.25, -0.1],
},
];
let vertex_buffer = Buffer::from_iter(
memory_allocator,
BufferCreateInfo {
usage: BufferUsage::VERTEX_BUFFER,
..Default::default()
},
AllocationCreateInfo {
memory_type_filter: MemoryTypeFilter::PREFER_DEVICE
| MemoryTypeFilter::HOST_SEQUENTIAL_WRITE,
..Default::default()
},
vertices,
)
.unwrap();
let pipeline = {
// First, we load the shaders that the pipeline will use:
// the vertex shader and the fragment shader.
//
// A Vulkan shader can in theory contain multiple entry points, so we have to specify which
// one.
let vs = vs::load(device.clone())
.unwrap()
.entry_point("main")
.unwrap();
let fs = fs::load(device.clone())
.unwrap()
.entry_point("main")
.unwrap();
// Automatically generate a vertex input state from the vertex shader's input interface,
// that takes a single vertex buffer containing `Vertex` structs.
let vertex_input_state = MyVertex::per_vertex().definition(&vs).unwrap();
// Make a list of the shader stages that the pipeline will have.
let stages = [
PipelineShaderStageCreateInfo::new(vs),
PipelineShaderStageCreateInfo::new(fs),
];
// We must now create a **pipeline layout** object, which describes the locations and types
// of descriptor sets and push constants used by the shaders in the pipeline.
//
// Multiple pipelines can share a common layout object, which is more efficient.
// The shaders in a pipeline must use a subset of the resources described in its pipeline
// layout, but the pipeline layout is allowed to contain resources that are not present in
// the shaders; they can be used by shaders in other pipelines that share the same
// layout. Thus, it is a good idea to design shaders so that many pipelines have
// common resource locations, which allows them to share pipeline layouts.
let layout = PipelineLayout::new(
device.clone(),
// Since we only have one pipeline in this example, and thus one pipeline layout,
// we automatically generate the creation info for it from the resources used in the
// shaders. In a real application, you would specify this information manually so that
// you can re-use one layout in multiple pipelines.
PipelineDescriptorSetLayoutCreateInfo::from_stages(&stages)
.into_pipeline_layout_create_info(device.clone())
.unwrap(),
)
.unwrap();
// We describe the formats of attachment images where the colors, depth and/or stencil
// information will be written. The pipeline will only be usable with this particular
// configuration of the attachment images.
let subpass = PipelineRenderingCreateInfo {
// We specify a single color attachment that will be rendered to. When we begin
// rendering, we will specify a swapchain image to be used as this attachment, so here
// we set its format to be the same format as the swapchain.
color_attachment_formats: vec![Some(image_format)],
..Default::default()
};
// Finally, create the pipeline.
GraphicsPipeline::new(
device.clone(),
None,
GraphicsPipelineCreateInfo {
stages: stages.into_iter().collect(),
// How vertex data is read from the vertex buffers into the vertex shader.
vertex_input_state: Some(vertex_input_state),
// How vertices are arranged into primitive shapes.
// The default primitive shape is a triangle.
input_assembly_state: Some(InputAssemblyState::default()),
// How primitives are transformed and clipped to fit the framebuffer.
// We use a resizable viewport, set to draw over the entire window.
viewport_state: Some(ViewportState::default()),
// How polygons are culled and converted into a raster of pixels.
// The default value does not perform any culling.
rasterization_state: Some(RasterizationState::default()),
// How multiple fragment shader samples are converted to a single pixel value.
// The default value does not perform any multisampling.
multisample_state: Some(MultisampleState::default()),
// How pixel values are combined with the values already present in the framebuffer.
// The default value overwrites the old value with the new one, without any
// blending.
color_blend_state: Some(ColorBlendState::with_attachment_states(
subpass.color_attachment_formats.len() as u32,
ColorBlendAttachmentState::default(),
)),
// Dynamic states allows us to specify parts of the pipeline settings when
// recording the command buffer, before we perform drawing.
// Here, we specify that the viewport should be dynamic.
dynamic_state: [DynamicState::Viewport].into_iter().collect(),
subpass: Some(subpass.into()),
..GraphicsPipelineCreateInfo::layout(layout)
},
)
.unwrap()
};
(vertex_buffer, pipeline)
}
#[derive(BufferContents, Vertex)]
#[repr(C)]
pub struct MyVertex {
#[format(R32G32_SFLOAT)]
position: [f32; 2],
}
mod vs {
vulkano_shaders::shader! {
ty: "vertex",
src: r"
#version 450
layout(location = 0) in vec2 position;
layout(location = 0) out vec3 fragColor;
vec3 colors[3] = vec3[](vec3(1.0, 0.0, 0.0), vec3(0.0, 1.0, 0.0), vec3(0.0, 0.0, 1.0));
void main() {
gl_Position = vec4(position, 0.0, 1.0);
fragColor = colors[gl_VertexIndex];
}
",
}
}
mod fs {
vulkano_shaders::shader! {
ty: "fragment",
src: r"
#version 450
layout(location = 0) in vec3 fragColor;
layout(location = 0) out vec4 f_color;
void main() {
f_color = vec4(fragColor, 1.0);
}
",
}
}

View file

@ -1,4 +0,0 @@
pub mod config;
pub mod graphics;
pub mod window;
// pub mod steel;

View file

@ -1,104 +0,0 @@
use std::sync::Arc;
use flax::{component, BoxedSystem, EntityBorrow, Query, QueryBorrow, Schedule, System, World};
use steel::steel_vm::engine::Engine;
use steel::steel_vm::register_fn::RegisterFn;
use steel_derive::Steel;
use crate::core::module::Module;
component! {
steel_script: String,
steel_event_tx: flume::Sender<SteelEvent>,
resources,
}
pub fn execute_script_system() -> BoxedSystem {
let tx_query = Query::new(steel_event_tx()).entity(resources());
let script_query = Query::new(steel_script());
System::builder()
.with_query(tx_query)
.with_query(script_query)
.build(|mut tx_query: EntityBorrow<'_, flax::Component<flume::Sender<SteelEvent>>>, mut script_query: QueryBorrow<flax::Component<String>>| {
if let Ok(tx) = tx_query.get() {
for script in &mut script_query {
println!("Got script and tx");
tx.send(SteelEvent::Execute(script.into())).unwrap();
}
}
})
.boxed()
}
#[derive(Debug, Steel)]
enum SteelEvent {
Execute(String),
}
#[allow(dead_code)]
#[derive(Steel, Clone)]
pub struct SteelModule {
engine: Engine,
// schedule: Schedule,
rx: flume::Receiver<SteelEvent>,
}
impl SteelModule {
pub fn new(
schedule: &mut Schedule,
world: &mut World,
_events: &mut crate::core::events::Events,
) -> Self {
let mut engine = Engine::new();
let (tx, rx) = flume::unbounded::<SteelEvent>();
let schedule_r = Schedule::builder()
.with_system(execute_script_system())
.build();
schedule.append(schedule_r);
world.set(resources(), steel_event_tx(), tx).unwrap();
// Some testing
let entity = world.spawn();
world.set(entity, steel_script(), r#"
(require-builtin steel/time)
(display "Hello ")
(time/sleep-ms 5000)
(display "World!")"#.into()).unwrap();
Self {
engine,
// schedule,
rx,
}
}
}
impl Module for SteelModule {
fn on_update(
&mut self,
world: &mut World,
_events: &mut crate::core::events::Events,
_frame_time: std::time::Duration,
) -> anyhow::Result<()> {
// self.schedule.execute_par(world).unwrap();
if let Ok(event) = self.rx.recv() {
match event {
SteelEvent::Execute(script) => {
let handle = std::thread::spawn(|| {
let mut engine = Engine::new();
let val = engine.run(script).unwrap();
println!("Steel val: {:?}", val);
});
}
}
}
Ok(())
}
}

View file

@ -1,34 +0,0 @@
use flax::{Schedule, World};
use crate::core::module::Module;
pub struct WindowModule {
}
impl WindowModule {
pub fn new(
schedule: &mut Schedule,
_world: &mut World,
_events: &mut crate::core::events::Events,
) -> Self {
let schedule_r = Schedule::builder()
.build();
schedule.append(schedule_r);
Self {
}
}
}
impl Module for WindowModule {
fn on_update(
&mut self,
_world: &mut World,
_events: &mut crate::core::events::Events,
_frame_time: std::time::Duration,
) -> anyhow::Result<()> {
// println!("WindowModule on_update");
Ok(())
}
}

View file

@ -1,694 +0,0 @@
use std::{collections::HashMap, sync::Arc};
use super::components::EntityWindow;
use specs::prelude::*;
use vulkano::{
buffer::{Buffer, BufferContents, BufferCreateInfo, BufferUsage, Subbuffer},
command_buffer::{
allocator::StandardCommandBufferAllocator, CommandBufferBeginInfo, CommandBufferLevel,
CommandBufferUsage, RecordingCommandBuffer, RenderingAttachmentInfo, RenderingInfo,
},
device::{
physical::PhysicalDeviceType, Device, DeviceCreateInfo, DeviceExtensions, DeviceFeatures,
Queue, QueueCreateInfo, QueueFlags,
},
image::{view::ImageView, Image, ImageUsage},
instance::{Instance, InstanceCreateFlags, InstanceCreateInfo},
memory::allocator::{AllocationCreateInfo, MemoryTypeFilter, StandardMemoryAllocator},
pipeline::{
graphics::{
color_blend::{ColorBlendAttachmentState, ColorBlendState},
input_assembly::InputAssemblyState,
multisample::MultisampleState,
rasterization::RasterizationState,
subpass::PipelineRenderingCreateInfo,
vertex_input::{Vertex, VertexDefinition},
viewport::{Viewport, ViewportState},
GraphicsPipelineCreateInfo,
},
layout::PipelineDescriptorSetLayoutCreateInfo,
DynamicState, GraphicsPipeline, PipelineLayout, PipelineShaderStageCreateInfo,
},
render_pass::{AttachmentLoadOp, AttachmentStoreOp},
swapchain::{
acquire_next_image, Surface, Swapchain, SwapchainCreateInfo, SwapchainPresentInfo,
},
sync::{self, GpuFuture},
Validated, Version, VulkanError, VulkanLibrary,
};
use winit::window::{Window, WindowId};
pub struct Render {
renderers: HashMap<WindowId, VkRender>,
library: Arc<VulkanLibrary>,
}
impl<'a> System<'a> for Render {
type SystemData = (Entities<'a>, ReadStorage<'a, EntityWindow>);
fn run(&mut self, data: Self::SystemData) {
let (entities, windows) = data;
(&entities, &windows).join().for_each(|(_entity, window)| {
self.renderers
.entry(window.window.id())
.or_insert_with(|| VkRender::new(self.library.clone(), window.window.clone()));
self.renderers.values_mut().for_each(|rend| rend.render());
window.window.request_redraw();
});
}
fn setup(&mut self, world: &mut World) {
Self::SystemData::setup(world);
}
}
impl Default for Render {
fn default() -> Self {
Self {
renderers: HashMap::new(),
library: VulkanLibrary::new().unwrap(),
}
}
}
struct VkRender {
window: Arc<Window>,
device: Arc<Device>,
queue: Arc<Queue>,
command_buffer_allocator: Arc<StandardCommandBufferAllocator>,
viewport: Viewport,
vertex_buffer: Subbuffer<[MyVertex]>,
recreate_swapchain: bool,
swapchain: Arc<Swapchain>,
previous_frame_end: Option<Box<dyn GpuFuture>>,
attachment_image_views: Vec<Arc<ImageView>>,
pipeline: Arc<GraphicsPipeline>,
}
impl VkRender {
pub fn new(library: Arc<VulkanLibrary>, window: Arc<Window>) -> Self {
println!("Created new renderer for window: {:?}", window.id());
let required_extensions = Surface::required_extensions(&window).unwrap();
// Now creating the instance.
let instance = Instance::new(
library,
InstanceCreateInfo {
// Enable enumerating devices that use non-conformant Vulkan implementations.
// (e.g. MoltenVK)
flags: InstanceCreateFlags::ENUMERATE_PORTABILITY,
enabled_extensions: required_extensions,
..Default::default()
},
)
.unwrap();
let surface = Surface::from_window(instance.clone(), window.clone()).unwrap();
// Choose device extensions that we're going to use. In order to present images to a surface,
// we need a `Swapchain`, which is provided by the `khr_swapchain` extension.
let mut device_extensions = DeviceExtensions {
khr_swapchain: true,
..DeviceExtensions::empty()
};
// We then choose which physical device to use. First, we enumerate all the available physical
// devices, then apply filters to narrow them down to those that can support our needs.
let (physical_device, queue_family_index) = instance
.enumerate_physical_devices()
.unwrap()
.filter(|p| {
// For this example, we require at least Vulkan 1.3, or a device that has the
// `khr_dynamic_rendering` extension available.
p.api_version() >= Version::V1_3 || p.supported_extensions().khr_dynamic_rendering
})
.filter(|p| {
// Some devices may not support the extensions or features that your application, or
// report properties and limits that are not sufficient for your application. These
// should be filtered out here.
p.supported_extensions().contains(&device_extensions)
})
.filter_map(|p| {
// For each physical device, we try to find a suitable queue family that will execute
// our draw commands.
//
// Devices can provide multiple queues to run commands in parallel (for example a draw
// queue and a compute queue), similar to CPU threads. This is something you have to
// have to manage manually in Vulkan. Queues of the same type belong to the same queue
// family.
//
// Here, we look for a single queue family that is suitable for our purposes. In a
// real-world application, you may want to use a separate dedicated transfer queue to
// handle data transfers in parallel with graphics operations. You may also need a
// separate queue for compute operations, if your application uses those.
p.queue_family_properties()
.iter()
.enumerate()
.position(|(i, q)| {
// We select a queue family that supports graphics operations. When drawing to
// a window surface, as we do in this example, we also need to check that
// queues in this queue family are capable of presenting images to the surface.
q.queue_flags.intersects(QueueFlags::GRAPHICS)
&& p.surface_support(i as u32, &surface).unwrap_or(false)
})
// The code here searches for the first queue family that is suitable. If none is
// found, `None` is returned to `filter_map`, which disqualifies this physical
// device.
.map(|i| (p, i as u32))
})
// All the physical devices that pass the filters above are suitable for the application.
// However, not every device is equal, some are preferred over others. Now, we assign each
// physical device a score, and pick the device with the lowest ("best") score.
//
// In this example, we simply select the best-scoring device to use in the application.
// In a real-world setting, you may want to use the best-scoring device only as a "default"
// or "recommended" device, and let the user choose the device themself.
.min_by_key(|(p, _)| {
// We assign a lower score to device types that are likely to be faster/better.
match p.properties().device_type {
PhysicalDeviceType::DiscreteGpu => 0,
PhysicalDeviceType::IntegratedGpu => 1,
PhysicalDeviceType::VirtualGpu => 2,
PhysicalDeviceType::Cpu => 3,
PhysicalDeviceType::Other => 4,
_ => 5,
}
})
.expect("no suitable physical device found");
if physical_device.api_version() < Version::V1_3 {
device_extensions.khr_dynamic_rendering = true;
}
// Now initializing the device. This is probably the most important object of Vulkan.
//
// An iterator of created queues is returned by the function alongside the device.
let (device, mut queues) = Device::new(
// Which physical device to connect to.
physical_device,
DeviceCreateInfo {
// The list of queues that we are going to use. Here we only use one queue, from the
// previously chosen queue family.
queue_create_infos: vec![QueueCreateInfo {
queue_family_index,
..Default::default()
}],
// A list of optional features and extensions that our program needs to work correctly.
// Some parts of the Vulkan specs are optional and must be enabled manually at device
// creation. In this example the only things we are going to need are the
// `khr_swapchain` extension that allows us to draw to a window, and
// `khr_dynamic_rendering` if we don't have Vulkan 1.3 available.
enabled_extensions: device_extensions,
// In order to render with Vulkan 1.3's dynamic rendering, we need to enable it here.
// Otherwise, we are only allowed to render with a render pass object, as in the
// standard triangle example. The feature is required to be supported by the device if
// it supports Vulkan 1.3 and higher, or if the `khr_dynamic_rendering` extension is
// available, so we don't need to check for support.
enabled_features: DeviceFeatures {
dynamic_rendering: true,
..DeviceFeatures::empty()
},
..Default::default()
},
)
.unwrap();
let queue = queues.next().unwrap();
// Before we can draw on the surface, we have to create what is called a swapchain. Creating a
// swapchain allocates the color buffers that will contain the image that will ultimately be
// visible on the screen. These images are returned alongside the swapchain.
let (mut swapchain, images) = {
// Querying the capabilities of the surface. When we create the swapchain we can only pass
// values that are allowed by the capabilities.
let surface_capabilities = device
.physical_device()
.surface_capabilities(&surface, Default::default())
.unwrap();
// Choosing the internal format that the images will have.
let image_format = device
.physical_device()
.surface_formats(&surface, Default::default())
.unwrap()[0]
.0;
// Please take a look at the docs for the meaning of the parameters we didn't mention.
Swapchain::new(
device.clone(),
surface,
SwapchainCreateInfo {
// Some drivers report an `min_image_count` of 1, but fullscreen mode requires at
// least 2. Therefore we must ensure the count is at least 2, otherwise the program
// would crash when entering fullscreen mode on those drivers.
min_image_count: surface_capabilities.min_image_count.max(2),
image_format,
// The size of the window, only used to initially setup the swapchain.
//
// NOTE:
// On some drivers the swapchain extent is specified by
// `surface_capabilities.current_extent` and the swapchain size must use this
// extent. This extent is always the same as the window size.
//
// However, other drivers don't specify a value, i.e.
// `surface_capabilities.current_extent` is `None`. These drivers will allow
// anything, but the only sensible value is the window size.
//
// Both of these cases need the swapchain to use the window size, so we just
// use that.
image_extent: window.inner_size().into(),
image_usage: ImageUsage::COLOR_ATTACHMENT,
// The alpha mode indicates how the alpha value of the final image will behave. For
// example, you can choose whether the window will be opaque or transparent.
composite_alpha: surface_capabilities
.supported_composite_alpha
.into_iter()
.next()
.unwrap(),
..Default::default()
},
)
.unwrap()
};
let memory_allocator = Arc::new(StandardMemoryAllocator::new_default(device.clone()));
let vertices = [
MyVertex {
position: [-0.5, -0.25, 0.1],
},
MyVertex {
position: [0.0, 0.5, 0.1],
},
MyVertex {
position: [0.25, -0.1, 0.1],
},
];
let vertex_buffer = Buffer::from_iter(
memory_allocator,
BufferCreateInfo {
usage: BufferUsage::VERTEX_BUFFER,
..Default::default()
},
AllocationCreateInfo {
memory_type_filter: MemoryTypeFilter::PREFER_DEVICE
| MemoryTypeFilter::HOST_SEQUENTIAL_WRITE,
..Default::default()
},
vertices,
)
.unwrap();
mod vs {
vulkano_shaders::shader! {
ty: "vertex",
src: r"
#version 450
layout(location = 0) in vec3 position;
void main() {
gl_Position = vec4(position, 1.0);
}
",
}
}
mod fs {
vulkano_shaders::shader! {
ty: "fragment",
src: r"
#version 450
layout(location = 0) out vec4 f_color;
void main() {
f_color = vec4(1.0, 0.0, 0.0, 1.0);
}
",
}
}
// At this point, OpenGL initialization would be finished. However in Vulkan it is not. OpenGL
// implicitly does a lot of computation whenever you draw. In Vulkan, you have to do all this
// manually.
// Before we draw, we have to create what is called a **pipeline**. A pipeline describes how
// a GPU operation is to be performed. It is similar to an OpenGL program, but it also contains
// many settings for customization, all baked into a single object. For drawing, we create
// a **graphics** pipeline, but there are also other types of pipeline.
let pipeline = {
// First, we load the shaders that the pipeline will use:
// the vertex shader and the fragment shader.
//
// A Vulkan shader can in theory contain multiple entry points, so we have to specify which
// one.
let vs = vs::load(device.clone())
.unwrap()
.entry_point("main")
.unwrap();
let fs = fs::load(device.clone())
.unwrap()
.entry_point("main")
.unwrap();
// Automatically generate a vertex input state from the vertex shader's input interface,
// that takes a single vertex buffer containing `Vertex` structs.
let vertex_input_state = MyVertex::per_vertex().definition(&vs).unwrap();
// Make a list of the shader stages that the pipeline will have.
let stages = [
PipelineShaderStageCreateInfo::new(vs),
PipelineShaderStageCreateInfo::new(fs),
];
// We must now create a **pipeline layout** object, which describes the locations and types of
// descriptor sets and push constants used by the shaders in the pipeline.
//
// Multiple pipelines can share a common layout object, which is more efficient.
// The shaders in a pipeline must use a subset of the resources described in its pipeline
// layout, but the pipeline layout is allowed to contain resources that are not present in the
// shaders; they can be used by shaders in other pipelines that share the same layout.
// Thus, it is a good idea to design shaders so that many pipelines have common resource
// locations, which allows them to share pipeline layouts.
let layout = PipelineLayout::new(
device.clone(),
// Since we only have one pipeline in this example, and thus one pipeline layout,
// we automatically generate the creation info for it from the resources used in the
// shaders. In a real application, you would specify this information manually so that you
// can re-use one layout in multiple pipelines.
PipelineDescriptorSetLayoutCreateInfo::from_stages(&stages)
.into_pipeline_layout_create_info(device.clone())
.unwrap(),
)
.unwrap();
// We describe the formats of attachment images where the colors, depth and/or stencil
// information will be written. The pipeline will only be usable with this particular
// configuration of the attachment images.
let subpass = PipelineRenderingCreateInfo {
// We specify a single color attachment that will be rendered to. When we begin
// rendering, we will specify a swapchain image to be used as this attachment, so here
// we set its format to be the same format as the swapchain.
color_attachment_formats: vec![Some(swapchain.image_format())],
..Default::default()
};
// Finally, create the pipeline.
GraphicsPipeline::new(
device.clone(),
None,
GraphicsPipelineCreateInfo {
stages: stages.into_iter().collect(),
// How vertex data is read from the vertex buffers into the vertex shader.
vertex_input_state: Some(vertex_input_state),
// How vertices are arranged into primitive shapes.
// The default primitive shape is a triangle.
input_assembly_state: Some(InputAssemblyState::default()),
// How primitives are transformed and clipped to fit the framebuffer.
// We use a resizable viewport, set to draw over the entire window.
viewport_state: Some(ViewportState::default()),
// How polygons are culled and converted into a raster of pixels.
// The default value does not perform any culling.
rasterization_state: Some(RasterizationState::default()),
// How multiple fragment shader samples are converted to a single pixel value.
// The default value does not perform any multisampling.
multisample_state: Some(MultisampleState::default()),
// How pixel values are combined with the values already present in the framebuffer.
// The default value overwrites the old value with the new one, without any blending.
color_blend_state: Some(ColorBlendState::with_attachment_states(
subpass.color_attachment_formats.len() as u32,
ColorBlendAttachmentState::default(),
)),
// Dynamic states allows us to specify parts of the pipeline settings when
// recording the command buffer, before we perform drawing.
// Here, we specify that the viewport should be dynamic.
dynamic_state: [DynamicState::Viewport].into_iter().collect(),
subpass: Some(subpass.into()),
..GraphicsPipelineCreateInfo::layout(layout)
},
)
.unwrap()
};
// Dynamic viewports allow us to recreate just the viewport when the window is resized.
// Otherwise we would have to recreate the whole pipeline.
let mut viewport = Viewport {
offset: [0.0, 0.0],
extent: [0.0, 0.0],
depth_range: 0.0..=1.0,
};
// When creating the swapchain, we only created plain images. To use them as an attachment for
// rendering, we must wrap then in an image view.
//
// Since we need to draw to multiple images, we are going to create a different image view for
// each image.
let mut attachment_image_views = window_size_dependent_setup(&images, &mut viewport);
// Before we can start creating and recording command buffers, we need a way of allocating
// them. Vulkano provides a command buffer allocator, which manages raw Vulkan command pools
// underneath and provides a safe interface for them.
let command_buffer_allocator = Arc::new(StandardCommandBufferAllocator::new(
device.clone(),
Default::default(),
));
// Initialization is finally finished!
// In some situations, the swapchain will become invalid by itself. This includes for example
// when the window is resized (as the images of the swapchain will no longer match the
// window's) or, on Android, when the application went to the background and goes back to the
// foreground.
//
// In this situation, acquiring a swapchain image or presenting it will return an error.
// Rendering to an image of that swapchain will not produce any error, but may or may not work.
// To continue rendering, we need to recreate the swapchain by creating a new swapchain. Here,
// we remember that we need to do this for the next loop iteration.
let mut recreate_swapchain = false;
// In the loop below we are going to submit commands to the GPU. Submitting a command produces
// an object that implements the `GpuFuture` trait, which holds the resources for as long as
// they are in use by the GPU.
//
// Destroying the `GpuFuture` blocks until the GPU is finished executing it. In order to avoid
// that, we store the submission of the previous frame here.
let mut previous_frame_end = Some(sync::now(device.clone()).boxed());
Self {
window,
device,
queue,
command_buffer_allocator,
viewport,
vertex_buffer,
recreate_swapchain,
swapchain,
previous_frame_end,
attachment_image_views,
pipeline,
}
}
pub fn render(&mut self) {
// Do not draw the frame when the screen size is zero. On Windows, this can
// occur when minimizing the application.
let image_extent: [u32; 2] = self.window.inner_size().into();
if image_extent.contains(&0) {
return;
}
// It is important to call this function from time to time, otherwise resources
// will keep accumulating and you will eventually reach an out of memory error.
// Calling this function polls various fences in order to determine what the GPU
// has already processed, and frees the resources that are no longer needed.
self.previous_frame_end.as_mut().unwrap().cleanup_finished();
// Whenever the window resizes we need to recreate everything dependent on the
// window size. In this example that includes the swapchain, the framebuffers and
// the dynamic state viewport.
if self.recreate_swapchain {
let (new_swapchain, new_images) = self
.swapchain
.recreate(SwapchainCreateInfo {
image_extent,
..self.swapchain.create_info()
})
.expect("failed to recreate swapchain");
self.swapchain = new_swapchain;
// Now that we have new swapchain images, we must create new image views from
// them as well.
self.attachment_image_views =
window_size_dependent_setup(&new_images, &mut self.viewport);
self.recreate_swapchain = false;
}
// Before we can draw on the output, we have to *acquire* an image from the
// swapchain. If no image is available (which happens if you submit draw commands
// too quickly), then the function will block. This operation returns the index of
// the image that we are allowed to draw upon.
//
// This function can block if no image is available. The parameter is an optional
// timeout after which the function call will return an error.
let (image_index, suboptimal, acquire_future) =
match acquire_next_image(self.swapchain.clone(), None).map_err(Validated::unwrap) {
Ok(r) => r,
Err(VulkanError::OutOfDate) => {
self.recreate_swapchain = true;
return;
}
Err(e) => panic!("failed to acquire next image: {e}"),
};
// `acquire_next_image` can be successful, but suboptimal. This means that the
// swapchain image will still work, but it may not display correctly. With some
// drivers this can be when the window resizes, but it may not cause the swapchain
// to become out of date.
if suboptimal {
self.recreate_swapchain = true;
}
// In order to draw, we have to build a *command buffer*. The command buffer object
// holds the list of commands that are going to be executed.
//
// Building a command buffer is an expensive operation (usually a few hundred
// microseconds), but it is known to be a hot path in the driver and is expected to
// be optimized.
//
// Note that we have to pass a queue family when we create the command buffer. The
// command buffer will only be executable on that given queue family.
let mut builder = RecordingCommandBuffer::new(
self.command_buffer_allocator.clone(),
self.queue.queue_family_index(),
CommandBufferLevel::Primary,
CommandBufferBeginInfo {
usage: CommandBufferUsage::OneTimeSubmit,
..Default::default()
},
)
.unwrap();
builder
// Before we can draw, we have to *enter a render pass*. We specify which
// attachments we are going to use for rendering here, which needs to match
// what was previously specified when creating the pipeline.
.begin_rendering(RenderingInfo {
// As before, we specify one color attachment, but now we specify the image
// view to use as well as how it should be used.
color_attachments: vec![Some(RenderingAttachmentInfo {
// `Clear` means that we ask the GPU to clear the content of this
// attachment at the start of rendering.
load_op: AttachmentLoadOp::Clear,
// `Store` means that we ask the GPU to store the rendered output in
// the attachment image. We could also ask it to discard the result.
store_op: AttachmentStoreOp::Store,
// The value to clear the attachment with. Here we clear it with a blue
// color.
//
// Only attachments that have `AttachmentLoadOp::Clear` are provided
// with clear values, any others should use `None` as the clear value.
clear_value: Some([0.0, 0.0, 1.0, 1.0].into()),
..RenderingAttachmentInfo::image_view(
// We specify image view corresponding to the currently acquired
// swapchain image, to use for this attachment.
self.attachment_image_views[image_index as usize].clone(),
)
})],
..Default::default()
})
.unwrap()
// We are now inside the first subpass of the render pass.
//
// TODO: Document state setting and how it affects subsequent draw commands.
.set_viewport(0, [self.viewport.clone()].into_iter().collect())
.unwrap()
.bind_pipeline_graphics(self.pipeline.clone())
.unwrap()
.bind_vertex_buffers(0, self.vertex_buffer.clone())
.unwrap();
unsafe {
builder
// We add a draw command.
.draw(self.vertex_buffer.len() as u32, 1, 0, 0)
.unwrap();
}
builder
// We leave the render pass.
.end_rendering()
.unwrap();
// Finish recording the command buffer by calling `end`.
let command_buffer = builder.end().unwrap();
let future = self
.previous_frame_end
.take()
.unwrap()
.join(acquire_future)
.then_execute(self.queue.clone(), command_buffer)
.unwrap()
// The color output is now expected to contain our triangle. But in order to
// show it on the screen, we have to *present* the image by calling
// `then_swapchain_present`.
//
// This function does not actually present the image immediately. Instead it
// submits a present command at the end of the queue. This means that it will
// only be presented once the GPU has finished executing the command buffer
// that draws the triangle.
.then_swapchain_present(
self.queue.clone(),
SwapchainPresentInfo::swapchain_image_index(self.swapchain.clone(), image_index),
)
.then_signal_fence_and_flush();
match future.map_err(Validated::unwrap) {
Ok(future) => {
self.previous_frame_end = Some(future.boxed());
}
Err(VulkanError::OutOfDate) => {
self.recreate_swapchain = true;
self.previous_frame_end = Some(sync::now(self.device.clone()).boxed());
}
Err(e) => {
println!("failed to flush future: {e}");
self.previous_frame_end = Some(sync::now(self.device.clone()).boxed());
}
}
}
}
#[derive(BufferContents, Vertex)]
#[repr(C)]
struct MyVertex {
#[format(R32G32B32_SFLOAT)]
position: [f32; 3],
}
fn window_size_dependent_setup(
images: &[Arc<Image>],
viewport: &mut Viewport,
) -> Vec<Arc<ImageView>> {
let extent = images[0].extent();
viewport.extent = [extent[0] as f32, extent[1] as f32];
images
.iter()
.map(|image| ImageView::new_default(image.clone()).unwrap())
.collect::<Vec<_>>()
}