diff --git a/vulkano/src/buffer/subbuffer.rs b/vulkano/src/buffer/subbuffer.rs index 7d4c5fb60c..b09784a09a 100644 --- a/vulkano/src/buffer/subbuffer.rs +++ b/vulkano/src/buffer/subbuffer.rs @@ -33,6 +33,7 @@ use std::{ ops::{Deref, DerefMut, Range, RangeBounds}, ptr::{self, NonNull}, sync::Arc, + thread, }; pub use vulkano_macros::BufferContents; @@ -637,7 +638,7 @@ impl Drop for BufferWriteGuard<'_, T> { BufferMemory::Sparse => unreachable!(), }; - if allocation.atom_size().is_some() { + if allocation.atom_size().is_some() && !thread::panicking() { unsafe { allocation.flush_range(self.range.clone()).unwrap() }; } diff --git a/vulkano/src/command_buffer/allocator.rs b/vulkano/src/command_buffer/allocator.rs index 8a49bf102c..a2ca514690 100644 --- a/vulkano/src/command_buffer/allocator.rs +++ b/vulkano/src/command_buffer/allocator.rs @@ -33,6 +33,7 @@ use std::{ marker::PhantomData, mem::ManuallyDrop, sync::Arc, + thread, }; use thread_local::ThreadLocal; @@ -498,6 +499,11 @@ impl Pool { impl Drop for Pool { fn drop(&mut self) { let inner = unsafe { ManuallyDrop::take(&mut self.inner) }; + + if thread::panicking() { + return; + } + unsafe { inner.inner.reset(false) }.unwrap(); inner.primary_allocations.set(0); inner.secondary_allocations.set(0); @@ -623,6 +629,8 @@ impl Drop for StandardCommandBufferAlloc { CommandBufferLevel::Primary => &self.pool.inner.primary_pool, CommandBufferLevel::Secondary => &self.pool.inner.secondary_pool, }; + // This can't panic, because if an allocation from a particular kind of pool was made, then + // the pool must exist. let _ = pool.as_ref().unwrap().push(inner); } } diff --git a/vulkano/src/command_buffer/traits.rs b/vulkano/src/command_buffer/traits.rs index d585f3331f..80469d8b9b 100644 --- a/vulkano/src/command_buffer/traits.rs +++ b/vulkano/src/command_buffer/traits.rs @@ -34,6 +34,7 @@ use std::{ atomic::{AtomicBool, Ordering}, Arc, }, + thread, }; pub unsafe trait PrimaryCommandBufferAbstract: @@ -441,14 +442,12 @@ where F: GpuFuture, { fn drop(&mut self) { - unsafe { - if !*self.finished.get_mut() { - // TODO: handle errors? - self.flush().unwrap(); - // Block until the queue finished. - self.queue.with(|mut q| q.wait_idle()).unwrap(); - self.previous.signal_finished(); - } + if !*self.finished.get_mut() && !thread::panicking() { + // TODO: handle errors? + self.flush().unwrap(); + // Block until the queue finished. + self.queue.with(|mut q| q.wait_idle()).unwrap(); + unsafe { self.previous.signal_finished() }; } } } diff --git a/vulkano/src/descriptor_set/allocator.rs b/vulkano/src/descriptor_set/allocator.rs index 18ac003339..954db3fbed 100644 --- a/vulkano/src/descriptor_set/allocator.rs +++ b/vulkano/src/descriptor_set/allocator.rs @@ -30,7 +30,7 @@ use crate::{ OomError, }; use crossbeam_queue::ArrayQueue; -use std::{cell::UnsafeCell, mem::ManuallyDrop, num::NonZeroU64, sync::Arc}; +use std::{cell::UnsafeCell, mem::ManuallyDrop, num::NonZeroU64, sync::Arc, thread}; use thread_local::ThreadLocal; const MAX_POOLS: usize = 32; @@ -417,7 +417,11 @@ impl VariablePool { impl Drop for VariablePool { fn drop(&mut self) { let inner = unsafe { ManuallyDrop::take(&mut self.inner) }; - // TODO: This should not return `Result`, resetting a pool can't fail. + + if thread::panicking() { + return; + } + unsafe { inner.reset() }.unwrap(); // If there is not enough space in the reserve, we destroy the pool. The only way this can diff --git a/vulkano/src/swapchain/swapchain.rs b/vulkano/src/swapchain/swapchain.rs index ed67718b69..400b2b9d8f 100644 --- a/vulkano/src/swapchain/swapchain.rs +++ b/vulkano/src/swapchain/swapchain.rs @@ -42,6 +42,7 @@ use std::{ atomic::{AtomicBool, AtomicU64, Ordering}, Arc, }, + thread, time::Duration, }; @@ -1743,7 +1744,11 @@ unsafe impl GpuFuture for SwapchainAcquireFuture { impl Drop for SwapchainAcquireFuture { fn drop(&mut self) { - if let Some(ref fence) = self.fence { + if thread::panicking() { + return; + } + + if let Some(fence) = &self.fence { fence.wait(None).unwrap(); // TODO: handle error? self.semaphore = None; } @@ -2184,6 +2189,10 @@ where P: GpuFuture, { fn drop(&mut self) { + if thread::panicking() { + return; + } + unsafe { if !*self.flushed.get_mut() { // Flushing may fail, that's okay. We will still wait for the queue later, so any diff --git a/vulkano/src/sync/future/fence_signal.rs b/vulkano/src/sync/future/fence_signal.rs index 9e78be78b7..7f71d884b2 100644 --- a/vulkano/src/sync/future/fence_signal.rs +++ b/vulkano/src/sync/future/fence_signal.rs @@ -29,6 +29,7 @@ use std::{ pin::Pin, sync::Arc, task::{Context, Poll}, + thread, time::Duration, }; @@ -523,6 +524,10 @@ where F: GpuFuture, { fn drop(&mut self) { + if thread::panicking() { + return; + } + let mut state = self.state.lock(); // We ignore any possible error while submitting for now. Problems are handled below. diff --git a/vulkano/src/sync/future/semaphore_signal.rs b/vulkano/src/sync/future/semaphore_signal.rs index 5293b37236..f31d37ac5a 100644 --- a/vulkano/src/sync/future/semaphore_signal.rs +++ b/vulkano/src/sync/future/semaphore_signal.rs @@ -25,6 +25,7 @@ use std::{ atomic::{AtomicBool, Ordering}, Arc, }, + thread, }; /// Builds a new semaphore signal future. @@ -250,14 +251,12 @@ where F: GpuFuture, { fn drop(&mut self) { - unsafe { - if !*self.finished.get_mut() { - // TODO: handle errors? - self.flush().unwrap(); - // Block until the queue finished. - self.queue().unwrap().with(|mut q| q.wait_idle()).unwrap(); - self.previous.signal_finished(); - } + if !*self.finished.get_mut() && !thread::panicking() { + // TODO: handle errors? + self.flush().unwrap(); + // Block until the queue finished. + self.queue().unwrap().with(|mut q| q.wait_idle()).unwrap(); + unsafe { self.previous.signal_finished() }; } } }