From d4bc64c2d4b1550830094d7ab0e7ae5152c14183 Mon Sep 17 00:00:00 2001 From: marc0246 <40955683+marc0246@users.noreply.github.com> Date: Mon, 31 Oct 2022 06:37:52 +0100 Subject: [PATCH] Merge single layout descriptor pools and allocator --- vulkano/src/descriptor_set/allocator.rs | 408 ++++++++++++-- vulkano/src/descriptor_set/layout.rs | 4 + vulkano/src/descriptor_set/mod.rs | 2 - .../src/descriptor_set/single_layout_pool.rs | 512 ------------------ 4 files changed, 356 insertions(+), 570 deletions(-) delete mode 100644 vulkano/src/descriptor_set/single_layout_pool.rs diff --git a/vulkano/src/descriptor_set/allocator.rs b/vulkano/src/descriptor_set/allocator.rs index 366dc9ed59..1b921126ff 100644 --- a/vulkano/src/descriptor_set/allocator.rs +++ b/vulkano/src/descriptor_set/allocator.rs @@ -16,11 +16,12 @@ //! [`DescriptorSetAllocator`] trait, which you can implement yourself or use the vulkano-provided //! [`StandardDescriptorSetAllocator`]. +use self::sorted_map::SortedMap; use super::{ layout::DescriptorSetLayout, - single_layout_pool::{ - SingleLayoutDescriptorSetPool, SingleLayoutPoolAlloc, - SingleLayoutVariableDescriptorSetPool, SingleLayoutVariablePoolAlloc, + pool::{ + DescriptorPool, DescriptorPoolAllocError, DescriptorPoolCreateInfo, + DescriptorSetAllocateInfo, }, sys::UnsafeDescriptorSet, }; @@ -28,10 +29,14 @@ use crate::{ device::{Device, DeviceOwned}, OomError, }; -use ahash::HashMap; -use std::{cell::UnsafeCell, sync::Arc}; +use crossbeam_queue::ArrayQueue; +use std::{cell::UnsafeCell, mem::ManuallyDrop, num::NonZeroU64, sync::Arc}; use thread_local::ThreadLocal; +const MAX_POOLS: usize = 32; + +const MAX_SETS: usize = 256; + /// Types that manage the memory of descriptor sets. /// /// # Safety @@ -74,15 +79,14 @@ pub trait DescriptorSetAlloc: Send + Sync { /// the program, in order to avoid creating and destroying [`DescriptorPool`]s, as that is /// expensive. /// -/// Internally, this allocator uses one [`SingleLayoutDescriptorSetPool`] / -/// [`SingleLayoutVariableDescriptorSetPool`] per descriptor set layout per thread, using -/// Thread-Local Storage. When a thread first allocates, an entry is reserved for it in the TLS. -/// After a thread exits and the allocator wasn't dropped yet, its entry is freed, but the pools -/// it used are not dropped. The next time a new thread allocates for the first time, the entry is -/// reused along with the pools. If all threads drop their reference to the allocator, all entries -/// along with the allocator are dropped, even if the threads didn't exit yet, which is why you -/// should keep the allocator alive for as long as you need to allocate so that the pools can keep -/// being reused. +/// Internally, this allocator uses one or more `DescriptorPool`s per descriptor set layout per +/// thread, using Thread-Local Storage. When a thread first allocates, an entry is reserved for the +/// thread and descriptor set layout combination. After a thread exits and the allocator wasn't +/// dropped yet, its entries are freed, but the pools it used are not dropped. The next time a new +/// thread allocates for the first time, the entries are reused along with the pools. If all +/// threads drop their reference to the allocator, all entries along with the allocator are +/// dropped, even if the threads didn't exit yet, which is why you should keep the allocator alive +/// for as long as you need to allocate so that the pools can keep being reused. /// /// This allocator only needs to lock when a thread first allocates or when a thread that /// previously allocated exits. In all other cases, allocation is lock-free. @@ -91,15 +95,20 @@ pub trait DescriptorSetAlloc: Send + Sync { #[derive(Debug)] pub struct StandardDescriptorSetAllocator { device: Arc, - pools: ThreadLocal, Pool>>>, + pools: ThreadLocal>>, } #[derive(Debug)] -enum Pool { - Fixed(SingleLayoutDescriptorSetPool), - Variable(SingleLayoutVariableDescriptorSetPool), +enum Entry { + Fixed(FixedEntry), + Variable(VariableEntry), } +// This is needed because of the blanket impl of `Send` on `Arc`, which requires that `T` is +// `Send + Sync`. `FixedPool` and `VariablePool` are `Send + !Sync` because `DescriptorPool` is +// `!Sync`. That's fine however because we never access the `DescriptorPool` concurrently. +unsafe impl Send for Entry {} + impl StandardDescriptorSetAllocator { /// Creates a new `StandardDescriptorSetAllocator`. #[inline] @@ -109,6 +118,25 @@ impl StandardDescriptorSetAllocator { pools: ThreadLocal::new(), } } + + /// Clears the entry for the given descriptor set layout and the current thread. This does not + /// mean that the pools are dropped immediately. A pool is kept alive for as long as descriptor + /// sets allocated from it exist. + /// + /// This has no effect if the entry was not initialized yet. + #[inline] + pub fn clear(&self, layout: &Arc) { + unsafe { &mut *self.pools.get_or(Default::default).get() }.remove(layout.id()) + } + + /// Clears all entries for the current thread. This does not mean that the pools are dropped + /// immediately. A pool is kept alive for as long as descriptor sets allocated from it exist. + /// + /// This has no effect if no entries were initialized yet. + #[inline] + pub fn clear_all(&self) { + unsafe { *self.pools.get_or(Default::default).get() = SortedMap::default() }; + } } unsafe impl DescriptorSetAllocator for StandardDescriptorSetAllocator { @@ -144,66 +172,333 @@ unsafe impl DescriptorSetAllocator for StandardDescriptorSetAllocator { max_count, ); - let pools = self.pools.get_or(|| UnsafeCell::new(HashMap::default())); - let pools = unsafe { &mut *pools.get() }; + let pools = self.pools.get_or(Default::default); + let entry = unsafe { &mut *pools.get() }.get_or_try_insert(layout.id(), || { + if max_count == 0 { + FixedEntry::new(layout.clone()).map(Entry::Fixed) + } else { + VariableEntry::new(layout.clone()).map(Entry::Variable) + } + })?; + + match entry { + Entry::Fixed(entry) => entry.allocate(), + Entry::Variable(entry) => entry.allocate(variable_descriptor_count), + } + } +} + +unsafe impl DeviceOwned for StandardDescriptorSetAllocator { + #[inline] + fn device(&self) -> &Arc { + &self.device + } +} + +#[derive(Debug)] +struct FixedEntry { + // The `FixedPool` struct contains an actual Vulkan pool. Every time it is full we create + // a new pool and replace the current one with the new one. + pool: Arc, + // The amount of sets available to use when we create a new Vulkan pool. + set_count: usize, + // The descriptor set layout that this pool is for. + layout: Arc, +} + +impl FixedEntry { + fn new(layout: Arc) -> Result { + Ok(FixedEntry { + pool: FixedPool::new(&layout, MAX_SETS)?, + set_count: MAX_SETS, + layout, + }) + } - // We do this instead of using `HashMap::entry` directly because that would involve cloning - // an `Arc` every time. `hash_raw_entry` is still not stabilized >:( - let pool = if let Some(pool) = pools.get_mut(layout) { - pool + fn allocate(&mut self) -> Result { + let inner = if let Some(inner) = self.pool.reserve.pop() { + inner } else { - pools.entry(layout.clone()).or_insert(if max_count == 0 { - Pool::Fixed(SingleLayoutDescriptorSetPool::new(layout.clone())?) + self.set_count *= 2; + self.pool = FixedPool::new(&self.layout, self.set_count)?; + + self.pool.reserve.pop().unwrap() + }; + + Ok(StandardDescriptorSetAlloc { + inner: ManuallyDrop::new(inner), + parent: AllocParent::Fixed(self.pool.clone()), + }) + } +} + +#[derive(Debug)] +struct FixedPool { + // The actual Vulkan descriptor pool. This field isn't actually used anywhere, but we need to + // keep the pool alive in order to keep the descriptor sets valid. + _inner: DescriptorPool, + // List of descriptor sets. When `alloc` is called, a descriptor will be extracted from this + // list. When a `SingleLayoutPoolAlloc` is dropped, its descriptor set is put back in this list. + reserve: ArrayQueue, +} + +impl FixedPool { + fn new(layout: &Arc, set_count: usize) -> Result, OomError> { + let inner = DescriptorPool::new( + layout.device().clone(), + DescriptorPoolCreateInfo { + max_sets: set_count as u32, + pool_sizes: layout + .descriptor_counts() + .iter() + .map(|(&ty, &count)| (ty, count * set_count as u32)) + .collect(), + ..Default::default() + }, + )?; + + let allocate_infos = (0..set_count).map(|_| DescriptorSetAllocateInfo { + layout, + variable_descriptor_count: 0, + }); + + let reserve = match unsafe { inner.allocate_descriptor_sets(allocate_infos) } { + Ok(allocs) => { + let reserve = ArrayQueue::new(set_count); + for alloc in allocs { + let _ = reserve.push(alloc); + } + + reserve + } + Err(DescriptorPoolAllocError::OutOfHostMemory) => { + return Err(OomError::OutOfHostMemory); + } + Err(DescriptorPoolAllocError::OutOfDeviceMemory) => { + return Err(OomError::OutOfDeviceMemory); + } + Err(DescriptorPoolAllocError::FragmentedPool) => { + // This can't happen as we don't free individual sets. + unreachable!(); + } + Err(DescriptorPoolAllocError::OutOfPoolMemory) => { + // We created the pool with an exact size. + unreachable!(); + } + }; + + Ok(Arc::new(FixedPool { + _inner: inner, + reserve, + })) + } +} + +#[derive(Debug)] +struct VariableEntry { + // The `VariablePool` struct contains an actual Vulkan pool. Every time it is full + // we grab one from the reserve, or create a new pool if there are none. + pool: Arc, + // When a `VariablePool` is dropped, it returns its Vulkan pool here for reuse. + reserve: Arc>, + // The descriptor set layout that this pool is for. + layout: Arc, + // The number of sets currently allocated from the Vulkan pool. + allocations: usize, +} + +impl VariableEntry { + fn new(layout: Arc) -> Result { + let reserve = Arc::new(ArrayQueue::new(MAX_POOLS)); + + Ok(VariableEntry { + pool: VariablePool::new(&layout, reserve.clone())?, + reserve, + layout, + allocations: 0, + }) + } + + fn allocate( + &mut self, + variable_descriptor_count: u32, + ) -> Result { + if self.allocations >= MAX_SETS { + self.pool = if let Some(inner) = self.reserve.pop() { + Arc::new(VariablePool { + inner: ManuallyDrop::new(inner), + reserve: self.reserve.clone(), + }) } else { - Pool::Variable(SingleLayoutVariableDescriptorSetPool::new(layout.clone())?) - }) + VariablePool::new(&self.layout, self.reserve.clone())? + }; + self.allocations = 0; + } + + let allocate_info = DescriptorSetAllocateInfo { + layout: &self.layout, + variable_descriptor_count, }; - let inner = match pool { - Pool::Fixed(pool) => PoolAlloc::Fixed(pool.next_alloc()?), - Pool::Variable(pool) => { - PoolAlloc::Variable(pool.next_alloc(variable_descriptor_count)?) + let inner = match unsafe { self.pool.inner.allocate_descriptor_sets([allocate_info]) } { + Ok(mut sets) => sets.next().unwrap(), + Err(DescriptorPoolAllocError::OutOfHostMemory) => { + return Err(OomError::OutOfHostMemory); + } + Err(DescriptorPoolAllocError::OutOfDeviceMemory) => { + return Err(OomError::OutOfDeviceMemory); + } + Err(DescriptorPoolAllocError::FragmentedPool) => { + // This can't happen as we don't free individual sets. + unreachable!(); + } + Err(DescriptorPoolAllocError::OutOfPoolMemory) => { + // We created the pool to fit the maximum variable descriptor count. + unreachable!(); } }; - Ok(StandardDescriptorSetAlloc { inner }) + self.allocations += 1; + + Ok(StandardDescriptorSetAlloc { + inner: ManuallyDrop::new(inner), + parent: AllocParent::Variable(self.pool.clone()), + }) } } -unsafe impl DeviceOwned for StandardDescriptorSetAllocator { - #[inline] - fn device(&self) -> &Arc { - &self.device +#[derive(Debug)] +struct VariablePool { + // The actual Vulkan descriptor pool. + inner: ManuallyDrop, + // Where we return the Vulkan descriptor pool in our `Drop` impl. + reserve: Arc>, +} + +impl VariablePool { + fn new( + layout: &Arc, + reserve: Arc>, + ) -> Result, OomError> { + DescriptorPool::new( + layout.device().clone(), + DescriptorPoolCreateInfo { + max_sets: MAX_SETS as u32, + pool_sizes: layout + .descriptor_counts() + .iter() + .map(|(&ty, &count)| (ty, count * MAX_SETS as u32)) + .collect(), + ..Default::default() + }, + ) + .map(|inner| { + Arc::new(Self { + inner: ManuallyDrop::new(inner), + reserve, + }) + }) + } +} + +impl Drop for VariablePool { + fn drop(&mut self) { + let inner = unsafe { ManuallyDrop::take(&mut self.inner) }; + // TODO: This should not return `Result`, resetting a pool can't fail. + unsafe { inner.reset() }.unwrap(); + + // If there is not enough space in the reserve, we destroy the pool. The only way this can + // happen is if something is resource hogging, forcing new pools to be created such that + // the number exceeds `MAX_POOLS`, and then drops them all at once. + let _ = self.reserve.push(inner); } } /// A descriptor set allocated from a [`StandardDescriptorSetAllocator`]. #[derive(Debug)] pub struct StandardDescriptorSetAlloc { - // The actual descriptor alloc. - inner: PoolAlloc, + // The actual descriptor set. + inner: ManuallyDrop, + // The pool where we allocated from. Needed for our `Drop` impl. + parent: AllocParent, } #[derive(Debug)] -enum PoolAlloc { - Fixed(SingleLayoutPoolAlloc), - Variable(SingleLayoutVariablePoolAlloc), +enum AllocParent { + Fixed(Arc), + Variable(Arc), } +// This is needed because of the blanket impl of `Send` on `Arc`, which requires that `T` is +// `Send + Sync`. `FixedPool` and `VariablePool` are `Send + !Sync` because `DescriptorPool` is +// `!Sync`. That's fine however because we never access the `DescriptorPool` concurrently. +unsafe impl Send for StandardDescriptorSetAlloc {} +unsafe impl Sync for StandardDescriptorSetAlloc {} + impl DescriptorSetAlloc for StandardDescriptorSetAlloc { #[inline] fn inner(&self) -> &UnsafeDescriptorSet { - match &self.inner { - PoolAlloc::Fixed(alloc) => alloc.inner(), - PoolAlloc::Variable(alloc) => alloc.inner(), - } + &self.inner } #[inline] fn inner_mut(&mut self) -> &mut UnsafeDescriptorSet { - match &mut self.inner { - PoolAlloc::Fixed(alloc) => alloc.inner_mut(), - PoolAlloc::Variable(alloc) => alloc.inner_mut(), + &mut self.inner + } +} + +impl Drop for StandardDescriptorSetAlloc { + #[inline] + fn drop(&mut self) { + let inner = unsafe { ManuallyDrop::take(&mut self.inner) }; + + match &self.parent { + AllocParent::Fixed(pool) => { + let _ = pool.reserve.push(inner); + } + AllocParent::Variable(_) => {} + } + } +} + +mod sorted_map { + use smallvec::SmallVec; + + /// Minimal implementation of a `SortedMap`. This outperforms both a [`BTreeMap`] and + /// [`HashMap`] for small numbers of elements. In Vulkan, having too many descriptor set + /// layouts is highly discouraged, which is why this optimization makes sense. + #[derive(Debug)] + pub(super) struct SortedMap { + inner: SmallVec<[(K, V); 8]>, + } + + impl Default for SortedMap { + fn default() -> Self { + Self { + inner: SmallVec::default(), + } + } + } + + impl SortedMap { + pub fn get_or_try_insert( + &mut self, + key: K, + f: impl FnOnce() -> Result, + ) -> Result<&mut V, E> { + match self.inner.binary_search_by_key(&key, |&(k, _)| k) { + Ok(index) => Ok(&mut self.inner[index].1), + Err(index) => { + self.inner.insert(index, (key, f()?)); + Ok(&mut self.inner[index].1) + } + } + } + + pub fn remove(&mut self, key: K) { + if let Ok(index) = self.inner.binary_search_by_key(&key, |&(k, _)| k) { + self.inner.remove(index); + } } } } @@ -242,16 +537,17 @@ mod tests { let allocator = StandardDescriptorSetAllocator::new(device); - let pool1 = if let PoolAlloc::Fixed(alloc) = allocator.allocate(&layout, 0).unwrap().inner { - alloc.pool().handle() - } else { - unreachable!() - }; + let pool1 = + if let AllocParent::Fixed(pool) = &allocator.allocate(&layout, 0).unwrap().parent { + pool._inner.handle() + } else { + unreachable!() + }; thread::spawn(move || { let pool2 = - if let PoolAlloc::Fixed(alloc) = allocator.allocate(&layout, 0).unwrap().inner { - alloc.pool().handle() + if let AllocParent::Fixed(pool) = &allocator.allocate(&layout, 0).unwrap().parent { + pool._inner.handle() } else { unreachable!() }; diff --git a/vulkano/src/descriptor_set/layout.rs b/vulkano/src/descriptor_set/layout.rs index 8447d547b6..64693b417f 100644 --- a/vulkano/src/descriptor_set/layout.rs +++ b/vulkano/src/descriptor_set/layout.rs @@ -372,6 +372,10 @@ impl DescriptorSetLayout { Ok(handle) } + pub(crate) fn id(&self) -> NonZeroU64 { + self.id + } + /// Returns the bindings of the descriptor set layout. #[inline] pub fn bindings(&self) -> &BTreeMap { diff --git a/vulkano/src/descriptor_set/mod.rs b/vulkano/src/descriptor_set/mod.rs index 061d8c9b49..e468c02b49 100644 --- a/vulkano/src/descriptor_set/mod.rs +++ b/vulkano/src/descriptor_set/mod.rs @@ -82,7 +82,6 @@ pub(crate) use self::update::{check_descriptor_write, DescriptorWriteInfo}; pub use self::{ collection::DescriptorSetsCollection, persistent::PersistentDescriptorSet, - single_layout_pool::{SingleLayoutDescriptorSetPool, SingleLayoutVariableDescriptorSetPool}, update::{DescriptorSetUpdateError, WriteDescriptorSet, WriteDescriptorSetElements}, }; use self::{layout::DescriptorSetLayout, sys::UnsafeDescriptorSet}; @@ -109,7 +108,6 @@ mod collection; pub mod layout; pub mod persistent; pub mod pool; -pub mod single_layout_pool; pub mod sys; mod update; diff --git a/vulkano/src/descriptor_set/single_layout_pool.rs b/vulkano/src/descriptor_set/single_layout_pool.rs deleted file mode 100644 index 0a421fd36c..0000000000 --- a/vulkano/src/descriptor_set/single_layout_pool.rs +++ /dev/null @@ -1,512 +0,0 @@ -// Copyright (c) 2017 The vulkano developers -// Licensed under the Apache License, Version 2.0 -// or the MIT -// license , -// at your option. All files in the project carrying such -// notice may not be copied, modified, or distributed except -// according to those terms. - -use super::{ - allocator::DescriptorSetAlloc, - layout::DescriptorSetLayout, - pool::{ - DescriptorPool, DescriptorPoolAllocError, DescriptorPoolCreateInfo, - DescriptorSetAllocateInfo, - }, - sys::UnsafeDescriptorSet, - DescriptorSet, DescriptorSetCreationError, DescriptorSetInner, DescriptorSetResources, - WriteDescriptorSet, -}; -use crate::{ - device::{Device, DeviceOwned}, - OomError, VulkanObject, -}; -use crossbeam_queue::ArrayQueue; -use std::{ - cell::{Cell, UnsafeCell}, - hash::{Hash, Hasher}, - mem::ManuallyDrop, - sync::Arc, -}; - -const MAX_SETS: usize = 32; - -const MAX_POOLS: usize = 32; - -/// `SingleLayoutDescriptorSetPool` is a convenience wrapper provided by Vulkano not to be confused -/// with `VkDescriptorPool`. Its function is to provide access to pool(s) to allocate descriptor -/// sets from and optimizes for a specific layout which must not have a variable descriptor count. -/// If you need a variable descriptor count see [`SingleLayoutVariableDescriptorSetPool`]. For a -/// general-purpose descriptor set allocator see [`StandardDescriptorSetAllocator`]. -/// -/// [`StandardDescriptorSetAllocator`]: super::allocator::StandardDescriptorSetAllocator -#[derive(Debug)] -pub struct SingleLayoutDescriptorSetPool { - // The `SingleLayoutPool` struct contains an actual Vulkan pool. Every time it is full we create - // a new pool and replace the current one with the new one. - inner: UnsafeCell>, - // The amount of sets available to use when we create a new Vulkan pool. - set_count: Cell, - // The descriptor set layout that this pool is for. - layout: Arc, -} - -// This is needed because of the blanket impl of `Send` on `Arc`, which requires that `T` is -// `Send + Sync`. `SingleLayoutPool` is `Send + !Sync` because `DescriptorPool` is `!Sync`. That's -// fine however because we never access the `DescriptorPool`. -unsafe impl Send for SingleLayoutDescriptorSetPool {} - -impl SingleLayoutDescriptorSetPool { - /// Initializes a new pool. The pool is configured to allocate sets that corresponds to the - /// parameters passed to this function. - /// - /// # Panics - /// - /// - Panics if the provided `layout` is for push descriptors rather than regular descriptor - /// sets. - /// - Panics if the provided `layout` has a binding with a variable descriptor count. - #[inline] - pub fn new(layout: Arc) -> Result { - assert!( - !layout.push_descriptor(), - "the provided descriptor set layout is for push descriptors, and cannot be used to \ - build a descriptor set object", - ); - assert!( - layout.variable_descriptor_count() == 0, - "the provided descriptor set layout has a binding with a variable descriptor count, \ - which cannot be used with SingleLayoutDescriptorSetPool", - ); - - Ok(Self { - inner: UnsafeCell::new(SingleLayoutPool::new(&layout, MAX_SETS)?), - set_count: Cell::new(MAX_SETS), - layout, - }) - } - - /// Returns a new descriptor set, either by creating a new one or returning an existing one - /// from the internal reserve. - pub fn next( - &self, - descriptor_writes: impl IntoIterator, - ) -> Result, DescriptorSetCreationError> { - let alloc = self.next_alloc()?; - let inner = DescriptorSetInner::new( - alloc.inner().handle(), - self.layout.clone(), - 0, - descriptor_writes, - )?; - - Ok(Arc::new(SingleLayoutDescSet { alloc, inner })) - } - - pub(crate) fn next_alloc(&self) -> Result { - let inner = unsafe { &mut *self.inner.get() }; - loop { - if let Some(existing) = inner.reserve.pop() { - return Ok(SingleLayoutPoolAlloc { - pool: inner.clone(), - inner: ManuallyDrop::new(existing), - }); - } - - self.set_count.set(self.set_count.get() * 2); - - *inner = SingleLayoutPool::new(&self.layout, self.set_count.get())?; - } - } -} - -#[derive(Debug)] -struct SingleLayoutPool { - // The actual Vulkan descriptor pool. This field isn't actually used anywhere, but we need to - // keep the pool alive in order to keep the descriptor sets valid. - _inner: DescriptorPool, - // List of descriptor sets. When `alloc` is called, a descriptor will be extracted from this - // list. When a `SingleLayoutPoolAlloc` is dropped, its descriptor set is put back in this list. - reserve: ArrayQueue, -} - -impl SingleLayoutPool { - fn new(layout: &Arc, set_count: usize) -> Result, OomError> { - let inner = DescriptorPool::new( - layout.device().clone(), - DescriptorPoolCreateInfo { - max_sets: set_count as u32, - pool_sizes: layout - .descriptor_counts() - .iter() - .map(|(&ty, &count)| (ty, count * set_count as u32)) - .collect(), - ..Default::default() - }, - )?; - - let allocate_infos = (0..set_count).map(|_| DescriptorSetAllocateInfo { - layout, - variable_descriptor_count: 0, - }); - - let reserve = match unsafe { inner.allocate_descriptor_sets(allocate_infos) } { - Ok(alloc_iter) => { - let reserve = ArrayQueue::new(set_count); - - for alloc in alloc_iter { - reserve.push(alloc).unwrap(); - } - - reserve - } - Err(DescriptorPoolAllocError::OutOfHostMemory) => { - return Err(OomError::OutOfHostMemory); - } - Err(DescriptorPoolAllocError::OutOfDeviceMemory) => { - return Err(OomError::OutOfDeviceMemory); - } - Err(DescriptorPoolAllocError::FragmentedPool) => { - // This can't happen as we don't free individual sets. - unreachable!(); - } - Err(DescriptorPoolAllocError::OutOfPoolMemory) => { - // We created the pool with an exact size. - unreachable!(); - } - }; - - Ok(Arc::new(Self { - _inner: inner, - reserve, - })) - } -} - -#[derive(Debug)] -pub(crate) struct SingleLayoutPoolAlloc { - // The actual descriptor set. - inner: ManuallyDrop, - // The `SingleLayoutPool` where we allocated from. We need to keep a copy of it in each - // allocation so that we can put back the allocation in the list in our `Drop` impl. - pool: Arc, -} - -impl SingleLayoutPoolAlloc { - #[cfg(test)] - pub(crate) fn pool(&self) -> &DescriptorPool { - &self.pool._inner - } -} - -// This is required for the same reason as for `SingleLayoutDescriptorSetPool`. -unsafe impl Send for SingleLayoutPoolAlloc {} -// `DescriptorPool` is `!Sync`, but we never access it, only keep it alive. -unsafe impl Sync for SingleLayoutPoolAlloc {} - -impl DescriptorSetAlloc for SingleLayoutPoolAlloc { - fn inner(&self) -> &UnsafeDescriptorSet { - &self.inner - } - - fn inner_mut(&mut self) -> &mut UnsafeDescriptorSet { - &mut self.inner - } -} - -impl Drop for SingleLayoutPoolAlloc { - fn drop(&mut self) { - let inner = unsafe { ManuallyDrop::take(&mut self.inner) }; - self.pool.reserve.push(inner).unwrap(); - } -} - -/// A descriptor set created from a [`SingleLayoutDescriptorSetPool`]. -pub struct SingleLayoutDescSet { - alloc: SingleLayoutPoolAlloc, - inner: DescriptorSetInner, -} - -unsafe impl DescriptorSet for SingleLayoutDescSet { - #[inline] - fn inner(&self) -> &UnsafeDescriptorSet { - self.alloc.inner() - } - - #[inline] - fn layout(&self) -> &Arc { - self.inner.layout() - } - - #[inline] - fn resources(&self) -> &DescriptorSetResources { - self.inner.resources() - } -} - -unsafe impl DeviceOwned for SingleLayoutDescSet { - #[inline] - fn device(&self) -> &Arc { - self.inner.layout().device() - } -} - -impl PartialEq for SingleLayoutDescSet { - #[inline] - fn eq(&self, other: &Self) -> bool { - self.inner() == other.inner() - } -} - -impl Eq for SingleLayoutDescSet {} - -impl Hash for SingleLayoutDescSet { - fn hash(&self, state: &mut H) { - self.inner().hash(state); - } -} - -/// Much like [`SingleLayoutDescriptorSetPool`], except that it allows you to allocate descriptor -/// sets with a variable descriptor count. As this has more overhead, you should only use this pool -/// if you need the functionality and prefer [`SingleLayoutDescriptorSetPool`] otherwise. For a -/// more general purpose descriptor set allocator see [`StandardDescriptorSetAllocator`]. -/// -/// [`StandardDescriptorSetAllocator`]: super::allocator::StandardDescriptorSetAllocator -#[derive(Debug)] -pub struct SingleLayoutVariableDescriptorSetPool { - // The `SingleLayoutVariablePool` struct contains an actual Vulkan pool. Every time it is full - // we grab one from the reserve, or create a new pool if there are none. - inner: UnsafeCell>, - // When a `SingleLayoutVariablePool` is dropped, it returns its Vulkan pool here for reuse. - reserve: Arc>, - // The descriptor set layout that this pool is for. - layout: Arc, - // The number of sets currently allocated from the Vulkan pool. - allocated_sets: Cell, -} - -// This is needed because of the blanket impl of `Send` on `Arc`, which requires that `T` is -// `Send + Sync`. `SingleLayoutVariablePool` is `Send + !Sync` because `DescriptorPool` is `!Sync`. -// That's fine however because we never access the `DescriptorPool` concurrently, only drop it once -// the `Arc` containing it is dropped. -unsafe impl Send for SingleLayoutVariableDescriptorSetPool {} - -impl SingleLayoutVariableDescriptorSetPool { - /// Initializes a new pool. The pool is configured to allocate sets that corresponds to the - /// parameters passed to this function. - /// - /// # Panics - /// - /// - Panics if the provided `layout` is for push descriptors rather than regular descriptor - /// sets. - #[inline] - pub fn new(layout: Arc) -> Result { - assert!( - !layout.push_descriptor(), - "the provided descriptor set layout is for push descriptors, and cannot be used to \ - build a descriptor set object", - ); - - let reserve = Arc::new(ArrayQueue::new(MAX_POOLS)); - - Ok(Self { - inner: UnsafeCell::new(SingleLayoutVariablePool::new(&layout, reserve.clone())?), - reserve, - layout, - allocated_sets: Cell::new(0), - }) - } - - /// Allocates a new descriptor set. - /// - /// # Panics - /// - /// - Panics if the provided `variable_descriptor_count` exceeds the maximum for the layout. - pub fn next( - &self, - variable_descriptor_count: u32, - descriptor_writes: impl IntoIterator, - ) -> Result { - let max_count = self.layout.variable_descriptor_count(); - - assert!( - variable_descriptor_count <= max_count, - "the provided variable_descriptor_count ({}) is greater than the maximum number of \ - variable count descriptors in the set ({})", - variable_descriptor_count, - max_count, - ); - - let alloc = self.next_alloc(variable_descriptor_count)?; - let inner = DescriptorSetInner::new( - alloc.inner().handle(), - self.layout.clone(), - 0, - descriptor_writes, - )?; - - Ok(SingleLayoutVariableDescSet { inner, alloc }) - } - - pub(crate) fn next_alloc( - &self, - variable_descriptor_count: u32, - ) -> Result { - if self.allocated_sets.get() >= MAX_SETS { - *unsafe { &mut *self.inner.get() } = if let Some(unsafe_pool) = self.reserve.pop() { - Arc::new(SingleLayoutVariablePool { - inner: ManuallyDrop::new(unsafe_pool), - reserve: self.reserve.clone(), - }) - } else { - SingleLayoutVariablePool::new(&self.layout, self.reserve.clone())? - }; - self.allocated_sets.set(0); - } - - let allocate_info = DescriptorSetAllocateInfo { - layout: &self.layout, - variable_descriptor_count, - }; - - let pool = unsafe { &*self.inner.get() }.clone(); - - let inner = match unsafe { pool.inner.allocate_descriptor_sets([allocate_info]) } { - Ok(mut sets) => sets.next().unwrap(), - Err(DescriptorPoolAllocError::OutOfHostMemory) => { - return Err(OomError::OutOfHostMemory); - } - Err(DescriptorPoolAllocError::OutOfDeviceMemory) => { - return Err(OomError::OutOfDeviceMemory); - } - Err(DescriptorPoolAllocError::FragmentedPool) => { - // This can't happen as we don't free individual sets. - unreachable!(); - } - Err(DescriptorPoolAllocError::OutOfPoolMemory) => { - // We created the pool to fit the maximum variable descriptor count. - unreachable!(); - } - }; - - self.allocated_sets.set(self.allocated_sets.get() + 1); - - Ok(SingleLayoutVariablePoolAlloc { inner, _pool: pool }) - } -} - -#[derive(Debug)] -struct SingleLayoutVariablePool { - // The actual Vulkan descriptor pool. - inner: ManuallyDrop, - // Where we return the Vulkan descriptor pool in our `Drop` impl. - reserve: Arc>, -} - -impl SingleLayoutVariablePool { - fn new( - layout: &Arc, - reserve: Arc>, - ) -> Result, OomError> { - let unsafe_pool = DescriptorPool::new( - layout.device().clone(), - DescriptorPoolCreateInfo { - max_sets: MAX_SETS as u32, - pool_sizes: layout - .descriptor_counts() - .iter() - .map(|(&ty, &count)| (ty, count * MAX_SETS as u32)) - .collect(), - ..Default::default() - }, - )?; - - Ok(Arc::new(Self { - inner: ManuallyDrop::new(unsafe_pool), - reserve, - })) - } -} - -impl Drop for SingleLayoutVariablePool { - fn drop(&mut self) { - let inner = unsafe { ManuallyDrop::take(&mut self.inner) }; - // TODO: This should not return `Result`, resetting a pool can't fail. - unsafe { inner.reset() }.unwrap(); - - // If there is not enough space in the reserve, we destroy the pool. The only way this can - // happen is if something is resource hogging, forcing new pools to be created such that - // the number exceeds `MAX_POOLS`, and then drops them all at once. - let _ = self.reserve.push(inner); - } -} - -#[derive(Debug)] -pub(crate) struct SingleLayoutVariablePoolAlloc { - // The actual descriptor set. - inner: UnsafeDescriptorSet, - // The `SingleLayoutVariablePool` where we allocated from. We need to keep a copy of it in each - // allocation so that we can put back the pool in the reserve once all allocations have been - // dropped. - _pool: Arc, -} - -// This is required for the same reason as for `SingleLayoutVariableDescriptorSetPool`. -unsafe impl Send for SingleLayoutVariablePoolAlloc {} -// `DescriptorPool` is `!Sync`, but we never access it, only keep it alive. -unsafe impl Sync for SingleLayoutVariablePoolAlloc {} - -impl DescriptorSetAlloc for SingleLayoutVariablePoolAlloc { - fn inner(&self) -> &UnsafeDescriptorSet { - &self.inner - } - - fn inner_mut(&mut self) -> &mut UnsafeDescriptorSet { - &mut self.inner - } -} - -/// A descriptor set created from a [`SingleLayoutVariableDescriptorSetPool`]. -pub struct SingleLayoutVariableDescSet { - alloc: SingleLayoutVariablePoolAlloc, - inner: DescriptorSetInner, -} - -unsafe impl DescriptorSet for SingleLayoutVariableDescSet { - #[inline] - fn inner(&self) -> &UnsafeDescriptorSet { - self.alloc.inner() - } - - #[inline] - fn layout(&self) -> &Arc { - self.inner.layout() - } - - #[inline] - fn resources(&self) -> &DescriptorSetResources { - self.inner.resources() - } -} - -unsafe impl DeviceOwned for SingleLayoutVariableDescSet { - #[inline] - fn device(&self) -> &Arc { - self.inner.layout().device() - } -} - -impl PartialEq for SingleLayoutVariableDescSet { - #[inline] - fn eq(&self, other: &Self) -> bool { - self.inner() == other.inner() - } -} - -impl Eq for SingleLayoutVariableDescSet {} - -impl Hash for SingleLayoutVariableDescSet { - fn hash(&self, state: &mut H) { - self.inner().hash(state); - } -}