diff --git a/vulkano/src/sync/fence.rs b/vulkano/src/sync/fence.rs index 26749022a9..908e1fd6b9 100644 --- a/vulkano/src/sync/fence.rs +++ b/vulkano/src/sync/fence.rs @@ -24,6 +24,9 @@ use std::{ sync::{Arc, Weak}, time::Duration, }; +use std::future::Future; +use std::pin::Pin; +use std::task::{Context, Poll}; /// A two-state synchronization primitive that is signalled by the device and waited on by the host. /// @@ -32,17 +35,17 @@ use std::{ /// The primary use of a fence is to know when execution of a queue has reached a particular point. /// When adding a command to a queue, a fence can be provided with the command, to be signaled /// when the operation finishes. You can check for a fence's current status by calling -/// `is_signaled` or `wait` on it. If the fence is found to be signaled, that means that the queue -/// has completed the operation that is associated with the fence, and all operations that were -/// submitted before it have been completed as well. +/// `is_signaled`, `wait` or `await` on it. If the fence is found to be signaled, that means that +/// the queue has completed the operation that is associated with the fence, and all operations that +/// were submitted before it have been completed as well. /// /// When a queue command accesses a resource, it must be kept alive until the queue command has /// finished executing, and you may not be allowed to perform certain other operations (or even any) -/// while the resource is in use. By calling `is_signaled` or `wait`, the queue will be notified -/// when the fence is signaled, so that all resources of the associated queue operation and +/// while the resource is in use. By calling `is_signaled`, `wait` or `await`, the queue will be +/// notified when the fence is signaled, so that all resources of the associated queue operation and /// preceding operations can be released. /// -/// Because of this, it is highly recommended to call `is_signaled` or `wait` on your fences. +/// Because of this, it is highly recommended to call `is_signaled`, `wait` or `await` on your fences. /// Otherwise, the queue will hold onto resources indefinitely (using up memory) /// and resource locks will not be released, which may cause errors when submitting future /// queue operations. It is not strictly necessary to wait for *every* fence, as a fence @@ -1054,6 +1057,25 @@ impl Fence { pub(crate) fn state(&self) -> MutexGuard<'_, FenceState> { self.state.lock() } + + // Shared by Fence and FenceSignalFuture + pub(crate) fn poll_impl(&self, cx: &mut Context<'_>) -> Poll> { + // Vulkan only allows polling of the fence status, so we have to use a spin future. + // This is still better than blocking in async applications, since a smart-enough async engine + // can choose to run some other tasks between probing this one. + + // Check if we are done without blocking + match self.is_signaled() { + Err(e) => return Poll::Ready(Err(e)), + Ok(signalled) => if signalled { + return Poll::Ready(Ok(())) + } + } + + // Otherwise spin + cx.waker().wake_by_ref(); + return Poll::Pending; + } } impl Drop for Fence { @@ -1071,6 +1093,15 @@ impl Drop for Fence { } } +impl Future for Fence +{ + type Output = Result<(), OomError>; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + self.poll_impl(cx) + } +} + unsafe impl VulkanObject for Fence { type Handle = ash::vk::Fence; diff --git a/vulkano/src/sync/future/fence_signal.rs b/vulkano/src/sync/future/fence_signal.rs index 686d9cb6ff..2624275a50 100644 --- a/vulkano/src/sync/future/fence_signal.rs +++ b/vulkano/src/sync/future/fence_signal.rs @@ -19,6 +19,9 @@ use crate::{ }; use parking_lot::{Mutex, MutexGuard}; use std::{mem::replace, ops::Range, sync::Arc, time::Duration}; +use std::future::Future; +use std::pin::Pin; +use std::task::{Context, Poll}; /// Builds a new fence signal future. pub fn then_signal_fence(future: F, behavior: FenceSignalFutureBehavior) -> FenceSignalFuture @@ -55,6 +58,11 @@ pub enum FenceSignalFutureBehavior { /// Contrary to most other future types, it is possible to block the current thread until the event /// happens. This is done by calling the `wait()` function. /// +/// This can also be done through Rust's Async system by simply `.await`ing this object. Note though +/// that (due to the Vulkan API fence design) this will spin to check the fence, rather than +/// blocking in the driver. Therefore if you have a long-running task, blocking may be less +/// CPU intense (depending on the driver's implementation). +/// /// Also note that the `GpuFuture` trait is implemented on `Arc>`. /// This means that you can put this future in an `Arc` and keep a copy of it somewhere in order /// to know when the execution reached that point. @@ -357,6 +365,26 @@ where } } +impl Future for FenceSignalFuture + where + F: GpuFuture, +{ + type Output = Result<(), OomError>; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + // Implement through fence + let state = self.state.lock(); + + match &*state { + FenceSignalFutureState::Pending(_, fence) + | FenceSignalFutureState::PartiallyFlushed(_, fence) + | FenceSignalFutureState::Flushed(_, fence) => fence.poll_impl(cx), + FenceSignalFutureState::Cleaned => Poll::Ready(Ok(())), + FenceSignalFutureState::Poisoned => unreachable!(), + } + } +} + impl FenceSignalFutureState { fn get_prev(&self) -> Option<&F> { match self {