vk_mem_alloc: Update to upstream + Adapt approach to small objects pooling

This updates VMA and instead of using the custom small pool approach from 4e6c9d3ae9, lazily creates pools for the relevant memory type indices, which doesn't require patching VMA.

Also, patches already merged upstream or not needed any longer are removed.
This commit is contained in:
Pedro J. Estébanez 2022-02-24 12:47:34 +01:00
parent 6a35864a6d
commit 801741e787
7 changed files with 3020 additions and 4541 deletions

View file

@ -1335,8 +1335,13 @@ Error RenderingDeviceVulkan::_buffer_allocate(Buffer *p_buffer, uint32_t p_size,
allocInfo.requiredFlags = 0;
allocInfo.preferredFlags = 0;
allocInfo.memoryTypeBits = 0;
allocInfo.pool = p_size <= SMALL_ALLOCATION_MAX_SIZE ? small_allocs_pool : nullptr;
allocInfo.pool = nullptr;
allocInfo.pUserData = nullptr;
if (p_size <= SMALL_ALLOCATION_MAX_SIZE) {
uint32_t mem_type_index = 0;
vmaFindMemoryTypeIndexForBufferInfo(allocator, &bufferInfo, &allocInfo, &mem_type_index);
allocInfo.pool = _find_or_create_small_allocs_pool(mem_type_index);
}
VkResult err = vmaCreateBuffer(allocator, &bufferInfo, &allocInfo, &p_buffer->buffer, &p_buffer->allocation, nullptr);
ERR_FAIL_COND_V_MSG(err, ERR_CANT_CREATE, "Can't create buffer of size: " + itos(p_size) + ", error " + itos(err) + ".");
@ -1843,12 +1848,17 @@ RID RenderingDeviceVulkan::texture_create(const TextureFormat &p_format, const T
VmaAllocationCreateInfo allocInfo;
allocInfo.flags = 0;
allocInfo.pool = image_size <= SMALL_ALLOCATION_MAX_SIZE ? small_allocs_pool : nullptr;
allocInfo.pool = nullptr;
allocInfo.usage = p_format.usage_bits & TEXTURE_USAGE_CPU_READ_BIT ? VMA_MEMORY_USAGE_CPU_ONLY : VMA_MEMORY_USAGE_GPU_ONLY;
allocInfo.requiredFlags = 0;
allocInfo.preferredFlags = 0;
allocInfo.memoryTypeBits = 0;
allocInfo.pUserData = nullptr;
if (image_size <= SMALL_ALLOCATION_MAX_SIZE) {
uint32_t mem_type_index = 0;
vmaFindMemoryTypeIndexForImageInfo(allocator, &image_create_info, &allocInfo, &mem_type_index);
allocInfo.pool = _find_or_create_small_allocs_pool(mem_type_index);
}
Texture texture;
@ -8714,6 +8724,30 @@ void RenderingDeviceVulkan::sync() {
local_device_processing = false;
}
VmaPool RenderingDeviceVulkan::_find_or_create_small_allocs_pool(uint32_t p_mem_type_index) {
if (small_allocs_pools.has(p_mem_type_index)) {
return small_allocs_pools[p_mem_type_index];
}
print_verbose("Creating VMA small objects pool for memory type index " + itos(p_mem_type_index));
VmaPoolCreateInfo pci;
pci.memoryTypeIndex = p_mem_type_index;
pci.flags = 0;
pci.blockSize = 0;
pci.minBlockCount = 0;
pci.maxBlockCount = SIZE_MAX;
pci.priority = 0.5f;
pci.minAllocationAlignment = 0;
pci.pMemoryAllocateNext = nullptr;
VmaPool pool = VK_NULL_HANDLE;
VkResult res = vmaCreatePool(allocator, &pci, &pool);
small_allocs_pools[p_mem_type_index] = pool; // Don't try to create it again if failed the first time
ERR_FAIL_COND_V_MSG(res, pool, "vmaCreatePool failed with error " + itos(res) + ".");
return pool;
}
void RenderingDeviceVulkan::_free_pending_resources(int p_frame) {
//free in dependency usage order, so nothing weird happens
//pipelines
@ -8834,9 +8868,9 @@ uint64_t RenderingDeviceVulkan::get_memory_usage(MemoryType p_type) const {
} else if (p_type == MEMORY_TEXTURES) {
return image_memory;
} else {
VmaStats stats;
vmaCalculateStats(allocator, &stats);
return stats.total.usedBytes;
VmaTotalStatistics stats;
vmaCalculateStatistics(allocator, &stats);
return stats.total.statistics.allocationBytes;
}
}
@ -8935,18 +8969,6 @@ void RenderingDeviceVulkan::initialize(VulkanContext *p_context, bool p_local_de
vmaCreateAllocator(&allocatorInfo, &allocator);
}
{ //create pool for small objects
VmaPoolCreateInfo pci;
pci.flags = VMA_POOL_CREATE_LINEAR_ALGORITHM_BIT;
pci.blockSize = 0;
pci.minBlockCount = 0;
pci.maxBlockCount = SIZE_MAX;
pci.priority = 0.5f;
pci.minAllocationAlignment = 0;
pci.pMemoryAllocateNext = nullptr;
vmaCreatePool(allocator, &pci, &small_allocs_pool);
}
frames = memnew_arr(Frame, frame_count);
frame = 0;
//create setup and frame buffers
@ -9415,7 +9437,11 @@ void RenderingDeviceVulkan::finalize() {
for (int i = 0; i < staging_buffer_blocks.size(); i++) {
vmaDestroyBuffer(allocator, staging_buffer_blocks[i].buffer, staging_buffer_blocks[i].allocation);
}
vmaDestroyPool(allocator, small_allocs_pool);
while (small_allocs_pools.size()) {
Map<uint32_t, VmaPool>::Element *E = small_allocs_pools.front();
vmaDestroyPool(allocator, E->get());
small_allocs_pools.erase(E);
}
vmaDestroyAllocator(allocator);
while (vertex_formats.size()) {

View file

@ -1016,7 +1016,8 @@ class RenderingDeviceVulkan : public RenderingDevice {
void _free_pending_resources(int p_frame);
VmaAllocator allocator = nullptr;
VmaPool small_allocs_pool = nullptr;
Map<uint32_t, VmaPool> small_allocs_pools;
VmaPool _find_or_create_small_allocs_pool(uint32_t p_mem_type_index);
VulkanContext *context = nullptr;

View file

@ -690,10 +690,10 @@ Files extracted from upstream source:
SDK release: https://github.com/KhronosGroup/Vulkan-ValidationLayers/blob/master/layers/generated/vk_enum_string_helper.h
`vk_mem_alloc.h` is taken from https://github.com/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator
Version: 3.0.0-development (2022-02-08), commit `a1895bc76547370564d604faa27e0b73de747df1`
Version: 3.0.0-development (2022-02-24), commit `dc3f6bb9159df22ceed69c7765ddfb4fbb1b6ed0`
`vk_mem_alloc.cpp` is a Godot file and should be preserved on updates.
Patches in the `patches` directory should be re-applied after updates (order must be followed among the number-prefixed ones).
Patches in the `patches` directory should be re-applied after updates.
## wslay

View file

@ -1,80 +0,0 @@
diff --git a/thirdparty/vulkan/vk_mem_alloc.h b/thirdparty/vulkan/vk_mem_alloc.h
index 52b403bede..d88c305a7c 100644
--- a/thirdparty/vulkan/vk_mem_alloc.h
+++ b/thirdparty/vulkan/vk_mem_alloc.h
@@ -2366,7 +2366,7 @@ VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaIsVirtualBlockEmpty(
*/
VMA_CALL_PRE void VMA_CALL_POST vmaGetVirtualAllocationInfo(
VmaVirtualBlock VMA_NOT_NULL virtualBlock,
- VmaVirtualAllocation allocation, VmaVirtualAllocationInfo* VMA_NOT_NULL pVirtualAllocInfo);
+ VmaVirtualAllocation VMA_NOT_NULL_NON_DISPATCHABLE allocation, VmaVirtualAllocationInfo* VMA_NOT_NULL pVirtualAllocInfo);
/** \brief Allocates new virtual allocation inside given #VmaVirtualBlock.
diff --git a/thirdparty/vulkan/vk_mem_alloc.h b/thirdparty/vulkan/vk_mem_alloc.h
index d1138a7bc8..74c66b9789 100644
--- a/thirdparty/vulkan/vk_mem_alloc.h
+++ b/thirdparty/vulkan/vk_mem_alloc.h
@@ -2386,7 +2386,7 @@ If the allocation fails due to not enough free space available, `VK_ERROR_OUT_OF
VMA_CALL_PRE VkResult VMA_CALL_POST vmaVirtualAllocate(
VmaVirtualBlock VMA_NOT_NULL virtualBlock,
const VmaVirtualAllocationCreateInfo* VMA_NOT_NULL pCreateInfo,
- VmaVirtualAllocation* VMA_NOT_NULL pAllocation,
+ VmaVirtualAllocation VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pAllocation,
VkDeviceSize* VMA_NULLABLE pOffset);
/** \brief Frees virtual allocation inside given #VmaVirtualBlock.
@@ -2391,7 +2391,7 @@ It is correct to call this function with `allocation == VK_NULL_HANDLE` - it doe
*/
VMA_CALL_PRE void VMA_CALL_POST vmaVirtualFree(
VmaVirtualBlock VMA_NOT_NULL virtualBlock,
- VmaVirtualAllocation allocation);
+ VmaVirtualAllocation VMA_NULLABLE_NON_DISPATCHABLE allocation);
/** \brief Frees all virtual allocations inside given #VmaVirtualBlock.
@@ -2408,7 +2408,7 @@ VMA_CALL_PRE void VMA_CALL_POST vmaClearVirtualBlock(
*/
VMA_CALL_PRE void VMA_CALL_POST vmaSetVirtualAllocationUserData(
VmaVirtualBlock VMA_NOT_NULL virtualBlock,
- VmaVirtualAllocation allocation,
+ VmaVirtualAllocation VMA_NOT_NULL_NON_DISPATCHABLE allocation,
void* VMA_NULLABLE pUserData);
/** \brief Calculates and returns statistics about virtual allocations and memory usage in given #VmaVirtualBlock.
@@ -17835,7 +17835,7 @@ VMA_CALL_PRE VkBool32 VMA_CALL_POST vmaIsVirtualBlockEmpty(VmaVirtualBlock VMA_N
}
VMA_CALL_PRE void VMA_CALL_POST vmaGetVirtualAllocationInfo(VmaVirtualBlock VMA_NOT_NULL virtualBlock,
- VmaVirtualAllocation allocation, VmaVirtualAllocationInfo* VMA_NOT_NULL pVirtualAllocInfo)
+ VmaVirtualAllocation VMA_NOT_NULL_NON_DISPATCHABLE allocation, VmaVirtualAllocationInfo* VMA_NOT_NULL pVirtualAllocInfo)
{
VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && pVirtualAllocInfo != VMA_NULL);
VMA_DEBUG_LOG("vmaGetVirtualAllocationInfo");
@@ -17853,7 +17853,7 @@ VMA_CALL_PRE VkResult VMA_CALL_POST vmaVirtualAllocate(VmaVirtualBlock VMA_NOT_N
return virtualBlock->Allocate(*pCreateInfo, *pAllocation, pOffset);
}
-VMA_CALL_PRE void VMA_CALL_POST vmaVirtualFree(VmaVirtualBlock VMA_NOT_NULL virtualBlock, VmaVirtualAllocation allocation)
+VMA_CALL_PRE void VMA_CALL_POST vmaVirtualFree(VmaVirtualBlock VMA_NOT_NULL virtualBlock, VmaVirtualAllocation VMA_NULLABLE_NON_DISPATCHABLE allocation)
{
if(allocation != VK_NULL_HANDLE)
{
@@ -17873,7 +17873,7 @@ VMA_CALL_PRE void VMA_CALL_POST vmaClearVirtualBlock(VmaVirtualBlock VMA_NOT_NUL
}
VMA_CALL_PRE void VMA_CALL_POST vmaSetVirtualAllocationUserData(VmaVirtualBlock VMA_NOT_NULL virtualBlock,
- VmaVirtualAllocation allocation, void* VMA_NULLABLE pUserData)
+ VmaVirtualAllocation VMA_NOT_NULL_NON_DISPATCHABLE allocation, void* VMA_NULLABLE pUserData)
{
VMA_ASSERT(virtualBlock != VK_NULL_HANDLE);
VMA_DEBUG_LOG("vmaSetVirtualAllocationUserData");
@@ -17848,7 +17848,7 @@ VMA_CALL_PRE void VMA_CALL_POST vmaGetVirtualAllocationInfo(VmaVirtualBlock VMA_
}
VMA_CALL_PRE VkResult VMA_CALL_POST vmaVirtualAllocate(VmaVirtualBlock VMA_NOT_NULL virtualBlock,
- const VmaVirtualAllocationCreateInfo* VMA_NOT_NULL pCreateInfo, VmaVirtualAllocation* VMA_NOT_NULL pAllocation,
+ const VmaVirtualAllocationCreateInfo* VMA_NOT_NULL pCreateInfo, VmaVirtualAllocation VMA_NULLABLE_NON_DISPATCHABLE* VMA_NOT_NULL pAllocation,
VkDeviceSize* VMA_NULLABLE pOffset)
{
VMA_ASSERT(virtualBlock != VK_NULL_HANDLE && pCreateInfo != VMA_NULL && pAllocation != VMA_NULL);

View file

@ -1,567 +0,0 @@
diff --git a/thirdparty/vulkan/vk_mem_alloc.h b/thirdparty/vulkan/vk_mem_alloc.h
index 74c66b9789..89e00e6326 100644
--- a/thirdparty/vulkan/vk_mem_alloc.h
+++ b/thirdparty/vulkan/vk_mem_alloc.h
@@ -1127,31 +1127,26 @@ typedef struct VmaAllocationCreateInfo
/** \brief Intended usage of memory.
You can leave #VMA_MEMORY_USAGE_UNKNOWN if you specify memory requirements in other way. \n
- If `pool` is not null, this member is ignored.
*/
VmaMemoryUsage usage;
/** \brief Flags that must be set in a Memory Type chosen for an allocation.
- Leave 0 if you specify memory requirements in other way. \n
- If `pool` is not null, this member is ignored.*/
+ Leave 0 if you specify memory requirements in other way.*/
VkMemoryPropertyFlags requiredFlags;
/** \brief Flags that preferably should be set in a memory type chosen for an allocation.
- Set to 0 if no additional flags are preferred. \n
- If `pool` is not null, this member is ignored. */
+ Set to 0 if no additional flags are preferred.*/
VkMemoryPropertyFlags preferredFlags;
/** \brief Bitmask containing one bit set for every memory type acceptable for this allocation.
Value 0 is equivalent to `UINT32_MAX` - it means any memory type is accepted if
it meets other requirements specified by this structure, with no further
restrictions on memory type index. \n
- If `pool` is not null, this member is ignored.
*/
uint32_t memoryTypeBits;
/** \brief Pool that this allocation should be created in.
- Leave `VK_NULL_HANDLE` to allocate from default pool. If not null, members:
- `usage`, `requiredFlags`, `preferredFlags`, `memoryTypeBits` are ignored.
+ Leave `VK_NULL_HANDLE` to allocate from default pool.
*/
VmaPool VMA_NULLABLE pool;
/** \brief Custom general-purpose pointer that will be stored in #VmaAllocation, can be read as VmaAllocationInfo::pUserData and changed using vmaSetAllocationUserData().
@@ -1173,9 +1168,6 @@ typedef struct VmaAllocationCreateInfo
/// Describes parameter of created #VmaPool.
typedef struct VmaPoolCreateInfo
{
- /** \brief Vulkan memory type index to allocate this pool from.
- */
- uint32_t memoryTypeIndex;
/** \brief Use combination of #VmaPoolCreateFlagBits.
*/
VmaPoolCreateFlags flags;
@@ -10904,13 +10896,12 @@ struct VmaPool_T
friend struct VmaPoolListItemTraits;
VMA_CLASS_NO_COPY(VmaPool_T)
public:
- VmaBlockVector m_BlockVector;
- VmaDedicatedAllocationList m_DedicatedAllocations;
+ VmaBlockVector* m_pBlockVectors[VK_MAX_MEMORY_TYPES];
+ VmaDedicatedAllocationList m_DedicatedAllocations[VK_MAX_MEMORY_TYPES];
VmaPool_T(
VmaAllocator hAllocator,
- const VmaPoolCreateInfo& createInfo,
- VkDeviceSize preferredBlockSize);
+ const VmaPoolCreateInfo& createInfo);
~VmaPool_T();
uint32_t GetId() const { return m_Id; }
@@ -10924,6 +10915,7 @@ public:
#endif
private:
+ const VmaAllocator m_hAllocator;
uint32_t m_Id;
char* m_Name;
VmaPool_T* m_PrevPool = VMA_NULL;
@@ -11405,8 +11397,10 @@ private:
void ValidateVulkanFunctions();
+public: // I'm sorry
VkDeviceSize CalcPreferredBlockSize(uint32_t memTypeIndex);
+private:
VkResult AllocateMemoryOfType(
VmaPool pool,
VkDeviceSize size,
@@ -14176,30 +14170,36 @@ void VmaDefragmentationContext_T::AddPools(uint32_t poolCount, const VmaPool* pP
{
VmaPool pool = pPools[poolIndex];
VMA_ASSERT(pool);
- // Pools with algorithm other than default are not defragmented.
- if (pool->m_BlockVector.GetAlgorithm() == 0)
+ for(uint32_t memTypeIndex = 0; memTypeIndex < m_hAllocator->GetMemoryTypeCount(); ++memTypeIndex)
{
- VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
-
- for (size_t i = m_CustomPoolContexts.size(); i--; )
+ if(pool->m_pBlockVectors[memTypeIndex])
{
- if (m_CustomPoolContexts[i]->GetCustomPool() == pool)
+ // Pools with algorithm other than default are not defragmented.
+ if (pool->m_pBlockVectors[memTypeIndex]->GetAlgorithm() == 0)
{
- pBlockVectorDefragCtx = m_CustomPoolContexts[i];
- break;
- }
- }
+ VmaBlockVectorDefragmentationContext* pBlockVectorDefragCtx = VMA_NULL;
- if (!pBlockVectorDefragCtx)
- {
- pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
- m_hAllocator,
- pool,
- &pool->m_BlockVector);
- m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
- }
+ for (size_t i = m_CustomPoolContexts.size(); i--; )
+ {
+ if (m_CustomPoolContexts[i]->GetCustomPool() == pool)
+ {
+ pBlockVectorDefragCtx = m_CustomPoolContexts[i];
+ break;
+ }
+ }
+
+ if (!pBlockVectorDefragCtx)
+ {
+ pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
+ m_hAllocator,
+ pool,
+ pool->m_pBlockVectors[memTypeIndex]);
+ m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
+ }
- pBlockVectorDefragCtx->AddAll();
+ pBlockVectorDefragCtx->AddAll();
+ }
+ }
}
}
}
@@ -14214,6 +14214,7 @@ void VmaDefragmentationContext_T::AddAllocations(
{
const VmaAllocation hAlloc = pAllocations[allocIndex];
VMA_ASSERT(hAlloc);
+ const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
// DedicatedAlloc cannot be defragmented.
if (hAlloc->GetType() == VmaAllocation_T::ALLOCATION_TYPE_BLOCK)
{
@@ -14224,7 +14225,7 @@ void VmaDefragmentationContext_T::AddAllocations(
if (hAllocPool != VK_NULL_HANDLE)
{
// Pools with algorithm other than default are not defragmented.
- if (hAllocPool->m_BlockVector.GetAlgorithm() == 0)
+ if (hAllocPool->m_pBlockVectors[memTypeIndex]->GetAlgorithm() == 0)
{
for (size_t i = m_CustomPoolContexts.size(); i--; )
{
@@ -14239,7 +14240,7 @@ void VmaDefragmentationContext_T::AddAllocations(
pBlockVectorDefragCtx = vma_new(m_hAllocator, VmaBlockVectorDefragmentationContext)(
m_hAllocator,
hAllocPool,
- &hAllocPool->m_BlockVector);
+ hAllocPool->m_pBlockVectors[memTypeIndex]);
m_CustomPoolContexts.push_back(pBlockVectorDefragCtx);
}
}
@@ -14247,7 +14248,6 @@ void VmaDefragmentationContext_T::AddAllocations(
// This allocation belongs to default pool.
else
{
- const uint32_t memTypeIndex = hAlloc->GetMemoryTypeIndex();
pBlockVectorDefragCtx = m_DefaultPoolContexts[memTypeIndex];
if (!pBlockVectorDefragCtx)
{
@@ -14481,41 +14481,61 @@ VkResult VmaDefragmentationContext_T::DefragmentPassEnd()
#ifndef _VMA_POOL_T_FUNCTIONS
VmaPool_T::VmaPool_T(
VmaAllocator hAllocator,
- const VmaPoolCreateInfo& createInfo,
- VkDeviceSize preferredBlockSize)
- : m_BlockVector(
- hAllocator,
- this, // hParentPool
- createInfo.memoryTypeIndex,
- createInfo.blockSize != 0 ? createInfo.blockSize : preferredBlockSize,
- createInfo.minBlockCount,
- createInfo.maxBlockCount,
- (createInfo.flags& VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
- createInfo.blockSize != 0, // explicitBlockSize
- createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK, // algorithm
- createInfo.priority,
- VMA_MAX(hAllocator->GetMemoryTypeMinAlignment(createInfo.memoryTypeIndex), createInfo.minAllocationAlignment),
- createInfo.pMemoryAllocateNext),
+ const VmaPoolCreateInfo& createInfo) :
+ m_hAllocator(hAllocator),
+ m_pBlockVectors{},
m_Id(0),
- m_Name(VMA_NULL) {}
+ m_Name(VMA_NULL)
+{
+ for(uint32_t memTypeIndex = 0; memTypeIndex < hAllocator->GetMemoryTypeCount(); ++memTypeIndex)
+ {
+ // Create only supported types
+ if((hAllocator->GetGlobalMemoryTypeBits() & (1u << memTypeIndex)) != 0)
+ {
+ m_pBlockVectors[memTypeIndex] = vma_new(hAllocator, VmaBlockVector)(
+ hAllocator,
+ this, // hParentPool
+ memTypeIndex,
+ createInfo.blockSize != 0 ? createInfo.blockSize : hAllocator->CalcPreferredBlockSize(memTypeIndex),
+ createInfo.minBlockCount,
+ createInfo.maxBlockCount,
+ (createInfo.flags& VMA_POOL_CREATE_IGNORE_BUFFER_IMAGE_GRANULARITY_BIT) != 0 ? 1 : hAllocator->GetBufferImageGranularity(),
+ false, // explicitBlockSize
+ createInfo.flags & VMA_POOL_CREATE_ALGORITHM_MASK, // algorithm
+ createInfo.priority,
+ VMA_MAX(hAllocator->GetMemoryTypeMinAlignment(memTypeIndex), createInfo.minAllocationAlignment),
+ createInfo.pMemoryAllocateNext);
+ }
+ }
+}
VmaPool_T::~VmaPool_T()
{
VMA_ASSERT(m_PrevPool == VMA_NULL && m_NextPool == VMA_NULL);
+ for(uint32_t memTypeIndex = 0; memTypeIndex < m_hAllocator->GetMemoryTypeCount(); ++memTypeIndex)
+ {
+ vma_delete(m_hAllocator, m_pBlockVectors[memTypeIndex]);
+ }
}
void VmaPool_T::SetName(const char* pName)
{
- const VkAllocationCallbacks* allocs = m_BlockVector.GetAllocator()->GetAllocationCallbacks();
- VmaFreeString(allocs, m_Name);
-
- if (pName != VMA_NULL)
- {
- m_Name = VmaCreateStringCopy(allocs, pName);
- }
- else
+ for(uint32_t memTypeIndex = 0; memTypeIndex < m_hAllocator->GetMemoryTypeCount(); ++memTypeIndex)
{
- m_Name = VMA_NULL;
+ if(m_pBlockVectors[memTypeIndex])
+ {
+ const VkAllocationCallbacks* allocs = m_pBlockVectors[memTypeIndex]->GetAllocator()->GetAllocationCallbacks();
+ VmaFreeString(allocs, m_Name);
+
+ if (pName != VMA_NULL)
+ {
+ m_Name = VmaCreateStringCopy(allocs, pName);
+ }
+ else
+ {
+ m_Name = VMA_NULL;
+ }
+ }
}
}
#endif // _VMA_POOL_T_FUNCTIONS
@@ -15377,15 +15397,22 @@ VkResult VmaAllocator_T::CalcAllocationParams(
inoutCreateInfo.flags |= VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT;
}
- if(inoutCreateInfo.pool != VK_NULL_HANDLE)
+ if(inoutCreateInfo.pool != VK_NULL_HANDLE && (inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
{
- if(inoutCreateInfo.pool->m_BlockVector.HasExplicitBlockSize() &&
- (inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0)
+ // Assuming here every block has the same block size and priority.
+ for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
{
- VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT while current custom pool doesn't support dedicated allocations.");
- return VK_ERROR_FEATURE_NOT_PRESENT;
+ if(inoutCreateInfo.pool->m_pBlockVectors[memTypeIndex])
+ {
+ if(inoutCreateInfo.pool->m_pBlockVectors[memTypeIndex]->HasExplicitBlockSize())
+ {
+ VMA_ASSERT(0 && "Specifying VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT while current custom pool doesn't support dedicated allocations.");
+ return VK_ERROR_FEATURE_NOT_PRESENT;
+ }
+ inoutCreateInfo.priority = inoutCreateInfo.pool->m_pBlockVectors[memTypeIndex]->GetPriority();
+ break;
+ }
}
- inoutCreateInfo.priority = inoutCreateInfo.pool->m_BlockVector.GetPriority();
}
if((inoutCreateInfo.flags & VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT) != 0 &&
@@ -15429,67 +15456,46 @@ VkResult VmaAllocator_T::AllocateMemory(
if(res != VK_SUCCESS)
return res;
- if(createInfoFinal.pool != VK_NULL_HANDLE)
+ // Bit mask of memory Vulkan types acceptable for this allocation.
+ uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
+ uint32_t memTypeIndex = UINT32_MAX;
+ res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfoFinal, &memTypeIndex);
+ // Can't find any single memory type matching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
+ if(res != VK_SUCCESS)
+ return res;
+ do
{
- VmaBlockVector& blockVector = createInfoFinal.pool->m_BlockVector;
- return AllocateMemoryOfType(
+ VmaBlockVector* blockVector = createInfoFinal.pool == VK_NULL_HANDLE ? m_pBlockVectors[memTypeIndex] : createInfoFinal.pool->m_pBlockVectors[memTypeIndex];
+ VMA_ASSERT(blockVector && "Trying to use unsupported memory type!");
+ VmaDedicatedAllocationList& dedicatedAllocations = createInfoFinal.pool == VK_NULL_HANDLE ? m_DedicatedAllocations[memTypeIndex] : createInfoFinal.pool->m_DedicatedAllocations[memTypeIndex];
+ res = AllocateMemoryOfType(
createInfoFinal.pool,
vkMemReq.size,
vkMemReq.alignment,
- prefersDedicatedAllocation,
+ requiresDedicatedAllocation || prefersDedicatedAllocation,
dedicatedBuffer,
dedicatedBufferUsage,
dedicatedImage,
createInfoFinal,
- blockVector.GetMemoryTypeIndex(),
+ memTypeIndex,
suballocType,
- createInfoFinal.pool->m_DedicatedAllocations,
- blockVector,
+ dedicatedAllocations,
+ *blockVector,
allocationCount,
pAllocations);
- }
- else
- {
- // Bit mask of memory Vulkan types acceptable for this allocation.
- uint32_t memoryTypeBits = vkMemReq.memoryTypeBits;
- uint32_t memTypeIndex = UINT32_MAX;
- res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfoFinal, &memTypeIndex);
- // Can't find any single memory type matching requirements. res is VK_ERROR_FEATURE_NOT_PRESENT.
- if(res != VK_SUCCESS)
- return res;
- do
- {
- VmaBlockVector* blockVector = m_pBlockVectors[memTypeIndex];
- VMA_ASSERT(blockVector && "Trying to use unsupported memory type!");
- res = AllocateMemoryOfType(
- VK_NULL_HANDLE,
- vkMemReq.size,
- vkMemReq.alignment,
- requiresDedicatedAllocation || prefersDedicatedAllocation,
- dedicatedBuffer,
- dedicatedBufferUsage,
- dedicatedImage,
- createInfoFinal,
- memTypeIndex,
- suballocType,
- m_DedicatedAllocations[memTypeIndex],
- *blockVector,
- allocationCount,
- pAllocations);
- // Allocation succeeded
- if(res == VK_SUCCESS)
- return VK_SUCCESS;
+ // Allocation succeeded
+ if(res == VK_SUCCESS)
+ return VK_SUCCESS;
- // Remove old memTypeIndex from list of possibilities.
- memoryTypeBits &= ~(1u << memTypeIndex);
- // Find alternative memTypeIndex.
- res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfoFinal, &memTypeIndex);
- } while(res == VK_SUCCESS);
+ // Remove old memTypeIndex from list of possibilities.
+ memoryTypeBits &= ~(1u << memTypeIndex);
+ // Find alternative memTypeIndex.
+ res = vmaFindMemoryTypeIndex(this, memoryTypeBits, &createInfoFinal, &memTypeIndex);
+ } while(res == VK_SUCCESS);
- // No other matching memory type index could be found.
- // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
- return VK_ERROR_OUT_OF_DEVICE_MEMORY;
- }
+ // No other matching memory type index could be found.
+ // Not returning res, which is VK_ERROR_FEATURE_NOT_PRESENT, because we already failed to allocate once.
+ return VK_ERROR_OUT_OF_DEVICE_MEMORY;
}
void VmaAllocator_T::FreeMemory(
@@ -15515,16 +15521,16 @@ void VmaAllocator_T::FreeMemory(
{
VmaBlockVector* pBlockVector = VMA_NULL;
VmaPool hPool = allocation->GetParentPool();
+ const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
if(hPool != VK_NULL_HANDLE)
{
- pBlockVector = &hPool->m_BlockVector;
+ pBlockVector = hPool->m_pBlockVectors[memTypeIndex];
}
else
{
- const uint32_t memTypeIndex = allocation->GetMemoryTypeIndex();
pBlockVector = m_pBlockVectors[memTypeIndex];
- VMA_ASSERT(pBlockVector && "Trying to free memory of unsupported type!");
}
+ VMA_ASSERT(pBlockVector && "Trying to free memory of unsupported type!");
pBlockVector->Free(allocation);
}
break;
@@ -15564,11 +15570,17 @@ void VmaAllocator_T::CalculateStats(VmaStats* pStats)
VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
for(VmaPool pool = m_Pools.Front(); pool != VMA_NULL; pool = m_Pools.GetNext(pool))
{
- VmaBlockVector& blockVector = pool->m_BlockVector;
- blockVector.AddStats(pStats);
- const uint32_t memTypeIndex = blockVector.GetMemoryTypeIndex();
- const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
- pool->m_DedicatedAllocations.AddStats(pStats, memTypeIndex, memHeapIndex);
+ for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
+ {
+ if (pool->m_pBlockVectors[memTypeIndex])
+ {
+ VmaBlockVector& blockVector = *pool->m_pBlockVectors[memTypeIndex];
+ blockVector.AddStats(pStats);
+ const uint32_t memTypeIndex = blockVector.GetMemoryTypeIndex();
+ const uint32_t memHeapIndex = MemoryTypeIndexToHeapIndex(memTypeIndex);
+ pool->m_DedicatedAllocations[memTypeIndex].AddStats(pStats, memTypeIndex, memHeapIndex);
+ }
+ }
}
}
@@ -15720,27 +15732,26 @@ VkResult VmaAllocator_T::CreatePool(const VmaPoolCreateInfo* pCreateInfo, VmaPoo
{
return VK_ERROR_INITIALIZATION_FAILED;
}
- // Memory type index out of range or forbidden.
- if(pCreateInfo->memoryTypeIndex >= GetMemoryTypeCount() ||
- ((1u << pCreateInfo->memoryTypeIndex) & m_GlobalMemoryTypeBits) == 0)
- {
- return VK_ERROR_FEATURE_NOT_PRESENT;
- }
if(newCreateInfo.minAllocationAlignment > 0)
{
VMA_ASSERT(VmaIsPow2(newCreateInfo.minAllocationAlignment));
}
- const VkDeviceSize preferredBlockSize = CalcPreferredBlockSize(newCreateInfo.memoryTypeIndex);
-
- *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo, preferredBlockSize);
+ *pPool = vma_new(this, VmaPool_T)(this, newCreateInfo);
- VkResult res = (*pPool)->m_BlockVector.CreateMinBlocks();
- if(res != VK_SUCCESS)
+ for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
{
- vma_delete(this, *pPool);
- *pPool = VMA_NULL;
- return res;
+ // Create only supported types
+ if((m_GlobalMemoryTypeBits & (1u << memTypeIndex)) != 0)
+ {
+ VkResult res = (*pPool)->m_pBlockVectors[memTypeIndex]->CreateMinBlocks();
+ if(res != VK_SUCCESS)
+ {
+ vma_delete(this, *pPool);
+ *pPool = VMA_NULL;
+ return res;
+ }
+ }
}
// Add to m_Pools.
@@ -15772,8 +15783,14 @@ void VmaAllocator_T::GetPoolStats(VmaPool pool, VmaPoolStats* pPoolStats)
pPoolStats->unusedRangeCount = 0;
pPoolStats->blockCount = 0;
- pool->m_BlockVector.AddPoolStats(pPoolStats);
- pool->m_DedicatedAllocations.AddPoolStats(pPoolStats);
+ for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
+ {
+ if((m_GlobalMemoryTypeBits & (1u << memTypeIndex)) != 0)
+ {
+ pool->m_pBlockVectors[memTypeIndex]->AddPoolStats(pPoolStats);
+ pool->m_DedicatedAllocations[memTypeIndex].AddPoolStats(pPoolStats);
+ }
+ }
}
void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
@@ -15790,7 +15807,13 @@ void VmaAllocator_T::SetCurrentFrameIndex(uint32_t frameIndex)
VkResult VmaAllocator_T::CheckPoolCorruption(VmaPool hPool)
{
- return hPool->m_BlockVector.CheckCorruption();
+ for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
+ {
+ if((m_GlobalMemoryTypeBits & (1u << memTypeIndex)) != 0)
+ {
+ return hPool->m_pBlockVectors[memTypeIndex]->CheckCorruption();
+ }
+ }
}
VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
@@ -15822,18 +15845,21 @@ VkResult VmaAllocator_T::CheckCorruption(uint32_t memoryTypeBits)
VmaMutexLockRead lock(m_PoolsMutex, m_UseMutex);
for(VmaPool pool = m_Pools.Front(); pool != VMA_NULL; pool = m_Pools.GetNext(pool))
{
- if(((1u << pool->m_BlockVector.GetMemoryTypeIndex()) & memoryTypeBits) != 0)
+ for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
{
- VkResult localRes = pool->m_BlockVector.CheckCorruption();
- switch(localRes)
+ if(pool->m_pBlockVectors[memTypeIndex] && ((1u << memTypeIndex) & memoryTypeBits) != 0)
{
- case VK_ERROR_FEATURE_NOT_PRESENT:
- break;
- case VK_SUCCESS:
- finalRes = VK_SUCCESS;
- break;
- default:
- return localRes;
+ VkResult localRes = pool->m_pBlockVectors[memTypeIndex]->CheckCorruption();
+ switch(localRes)
+ {
+ case VK_ERROR_FEATURE_NOT_PRESENT:
+ break;
+ case VK_SUCCESS:
+ finalRes = VK_SUCCESS;
+ break;
+ default:
+ return localRes;
+ }
}
}
}
@@ -16155,7 +16181,7 @@ void VmaAllocator_T::FreeDedicatedMemory(const VmaAllocation allocation)
else
{
// Custom pool
- parentPool->m_DedicatedAllocations.Unregister(allocation);
+ parentPool->m_DedicatedAllocations[memTypeIndex].Unregister(allocation);
}
VkDeviceMemory hMemory = allocation->GetMemory();
@@ -16430,12 +16456,18 @@ void VmaAllocator_T::PrintDetailedMap(VmaJsonWriter& json)
json.EndString();
json.BeginObject();
- pool->m_BlockVector.PrintDetailedMap(json);
-
- if (!pool->m_DedicatedAllocations.IsEmpty())
+ for(uint32_t memTypeIndex = 0; memTypeIndex < GetMemoryTypeCount(); ++memTypeIndex)
{
- json.WriteString("DedicatedAllocations");
- pool->m_DedicatedAllocations.BuildStatsString(json);
+ if (pool->m_pBlockVectors[memTypeIndex])
+ {
+ pool->m_pBlockVectors[memTypeIndex]->PrintDetailedMap(json);
+ }
+
+ if (!pool->m_DedicatedAllocations[memTypeIndex].IsEmpty())
+ {
+ json.WriteString("DedicatedAllocations");
+ pool->m_DedicatedAllocations->BuildStatsString(json);
+ }
}
json.EndObject();
}

File diff suppressed because it is too large Load diff