From 506ed7c7b2b573afa18ab403442f561c54126fae Mon Sep 17 00:00:00 2001 From: Joe O'Connor Date: Fri, 28 Oct 2022 13:29:23 +0100 Subject: [PATCH 1/2] Add async future for Fence Signal --- vulkano/src/sync/future/fence_signal.rs | 34 +++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/vulkano/src/sync/future/fence_signal.rs b/vulkano/src/sync/future/fence_signal.rs index 983f5810d1..2bf05c9e23 100644 --- a/vulkano/src/sync/future/fence_signal.rs +++ b/vulkano/src/sync/future/fence_signal.rs @@ -18,6 +18,9 @@ use crate::{ }; use parking_lot::{Mutex, MutexGuard}; use std::{mem::replace, ops::Range, sync::Arc, time::Duration}; +use std::future::Future; +use std::pin::Pin; +use std::task::{Context, Poll}; /// Builds a new fence signal future. pub fn then_signal_fence(future: F, behavior: FenceSignalFutureBehavior) -> FenceSignalFuture @@ -54,6 +57,11 @@ pub enum FenceSignalFutureBehavior { /// Contrary to most other future types, it is possible to block the current thread until the event /// happens. This is done by calling the `wait()` function. /// +/// This can also be done through Rust's Async system by simply `.await`ing this object. Note though +/// that (due to the Vulkan API fence design) this will spin to check the fence, rather than +/// blocking in the driver. Therefore if you have a long-running task, blocking may be less +/// CPU intense (depending on the driver's implementation). +/// /// Also note that the `GpuFuture` trait is implemented on `Arc>`. /// This means that you can put this future in an `Arc` and keep a copy of it somewhere in order /// to know when the execution reached that point. @@ -359,6 +367,32 @@ where } } +impl Future for FenceSignalFuture + where + F: GpuFuture, +{ + type Output = (); + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + // Vulkan only allows polling of the fence status, so we have to use a spin future. + // This is still better than blocking in async applications, since a smart-enough async engine + // can choose to run some other tasks between probing this one. + + // Check if we are done without blocking (much) + // A minimal non-zero wait time indicates to the driver that it can do some non-zero + // amount of work, hence potentially reducing the CPU load of a spin lock as well as reducing + // the time taken for the future to resolve. + // TODO: Test this hypothesis + if let Ok(()) = self.wait(Some(Duration::from_micros(1))) { + return Poll::Ready(()) + }; + + // Otherwise spin + cx.waker().wake_by_ref(); + return Poll::Pending; + } +} + impl FenceSignalFutureState { fn get_prev(&self) -> Option<&F> { match self { From 3e14ea064f4cd2da2a80be838f1f3edb16ceca98 Mon Sep 17 00:00:00 2001 From: Joe O'Connor Date: Fri, 28 Oct 2022 18:09:10 +0100 Subject: [PATCH 2/2] Add async future for Fence --- vulkano/src/sync/fence.rs | 43 +++++++++++++++++++++---- vulkano/src/sync/future/fence_signal.rs | 28 +++++++--------- 2 files changed, 48 insertions(+), 23 deletions(-) diff --git a/vulkano/src/sync/fence.rs b/vulkano/src/sync/fence.rs index 173d65bcef..9ba3b3f454 100644 --- a/vulkano/src/sync/fence.rs +++ b/vulkano/src/sync/fence.rs @@ -24,6 +24,9 @@ use std::{ sync::{Arc, Weak}, time::Duration, }; +use std::future::Future; +use std::pin::Pin; +use std::task::{Context, Poll}; /// A two-state synchronization primitive that is signalled by the device and waited on by the host. /// @@ -32,17 +35,17 @@ use std::{ /// The primary use of a fence is to know when execution of a queue has reached a particular point. /// When adding a command to a queue, a fence can be provided with the command, to be signaled /// when the operation finishes. You can check for a fence's current status by calling -/// `is_signaled` or `wait` on it. If the fence is found to be signaled, that means that the queue -/// has completed the operation that is associated with the fence, and all operations that were -/// submitted before it have been completed as well. +/// `is_signaled`, `wait` or `await` on it. If the fence is found to be signaled, that means that +/// the queue has completed the operation that is associated with the fence, and all operations that +/// were submitted before it have been completed as well. /// /// When a queue command accesses a resource, it must be kept alive until the queue command has /// finished executing, and you may not be allowed to perform certain other operations (or even any) -/// while the resource is in use. By calling `is_signaled` or `wait`, the queue will be notified -/// when the fence is signaled, so that all resources of the associated queue operation and +/// while the resource is in use. By calling `is_signaled`, `wait` or `await`, the queue will be +/// notified when the fence is signaled, so that all resources of the associated queue operation and /// preceding operations can be released. /// -/// Because of this, it is highly recommended to call `is_signaled` or `wait` on your fences. +/// Because of this, it is highly recommended to call `is_signaled`, `wait` or `await` on your fences. /// Otherwise, the queue will hold onto resources indefinitely (using up memory) /// and resource locks will not be released, which may cause errors when submitting future /// queue operations. It is not strictly necessary to wait for *every* fence, as a fence @@ -1056,6 +1059,25 @@ impl Fence { pub(crate) fn state(&self) -> MutexGuard<'_, FenceState> { self.state.lock() } + + // Shared by Fence and FenceSignalFuture + pub(crate) fn poll_impl(&self, cx: &mut Context<'_>) -> Poll> { + // Vulkan only allows polling of the fence status, so we have to use a spin future. + // This is still better than blocking in async applications, since a smart-enough async engine + // can choose to run some other tasks between probing this one. + + // Check if we are done without blocking + match self.is_signaled() { + Err(e) => return Poll::Ready(Err(e)), + Ok(signalled) => if signalled { + return Poll::Ready(Ok(())) + } + } + + // Otherwise spin + cx.waker().wake_by_ref(); + return Poll::Pending; + } } impl Drop for Fence { @@ -1073,6 +1095,15 @@ impl Drop for Fence { } } +impl Future for Fence +{ + type Output = Result<(), OomError>; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + self.poll_impl(cx) + } +} + unsafe impl VulkanObject for Fence { type Handle = ash::vk::Fence; diff --git a/vulkano/src/sync/future/fence_signal.rs b/vulkano/src/sync/future/fence_signal.rs index 2bf05c9e23..136279295e 100644 --- a/vulkano/src/sync/future/fence_signal.rs +++ b/vulkano/src/sync/future/fence_signal.rs @@ -371,25 +371,19 @@ impl Future for FenceSignalFuture where F: GpuFuture, { - type Output = (); + type Output = Result<(), OomError>; fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - // Vulkan only allows polling of the fence status, so we have to use a spin future. - // This is still better than blocking in async applications, since a smart-enough async engine - // can choose to run some other tasks between probing this one. - - // Check if we are done without blocking (much) - // A minimal non-zero wait time indicates to the driver that it can do some non-zero - // amount of work, hence potentially reducing the CPU load of a spin lock as well as reducing - // the time taken for the future to resolve. - // TODO: Test this hypothesis - if let Ok(()) = self.wait(Some(Duration::from_micros(1))) { - return Poll::Ready(()) - }; - - // Otherwise spin - cx.waker().wake_by_ref(); - return Poll::Pending; + // Implement through fence + let state = self.state.lock(); + + match &*state { + FenceSignalFutureState::Pending(_, fence) + | FenceSignalFutureState::PartiallyFlushed(_, fence) + | FenceSignalFutureState::Flushed(_, fence) => fence.poll_impl(cx), + FenceSignalFutureState::Cleaned => Poll::Ready(Ok(())), + FenceSignalFutureState::Poisoned => unreachable!(), + } } }