diff --git a/src/client/refresh/vk/header/qvk.h b/src/client/refresh/vk/header/qvk.h index f0352e35..aa3ccdeb 100644 --- a/src/client/refresh/vk/header/qvk.h +++ b/src/client/refresh/vk/header/qvk.h @@ -30,7 +30,7 @@ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. #endif #include -#include "vk_mem_alloc.h" +#include "vk_util.h" #include "vk_shaders.h" // Vulkan device @@ -77,10 +77,8 @@ typedef enum // texture object typedef struct { - VkImage image; - VmaAllocation allocation; - VmaAllocationInfo allocInfo; - VmaAllocationCreateFlags vmaFlags; + ImageResource_t resource; + VkImageView imageView; VkSharingMode sharingMode; VkSampleCountFlagBits sampleCount; @@ -90,13 +88,11 @@ typedef struct } qvktexture_t; #define QVVKTEXTURE_INIT { \ - .image = VK_NULL_HANDLE, \ - .allocation = VK_NULL_HANDLE, \ - .allocInfo = { \ - .pMappedData = VK_NULL_HANDLE, \ - .pUserData = VK_NULL_HANDLE, \ + .resource = { \ + .image = VK_NULL_HANDLE, \ + .memory = VK_NULL_HANDLE, \ + .size = 0, \ }, \ - .vmaFlags = 0, \ .imageView = VK_NULL_HANDLE, \ .sharingMode = VK_SHARING_MODE_MAX_ENUM, \ .sampleCount = VK_SAMPLE_COUNT_1_BIT, \ @@ -106,9 +102,9 @@ typedef struct } #define QVVKTEXTURE_CLEAR(i) { \ - (i).image = VK_NULL_HANDLE; \ - (i).allocation = VK_NULL_HANDLE; \ - (i).vmaFlags = 0; \ + (i).resource.image = VK_NULL_HANDLE; \ + (i).resource.memory = VK_NULL_HANDLE; \ + (i).resource.size = 0; \ (i).imageView = VK_NULL_HANDLE; \ (i).sharingMode = VK_SHARING_MODE_MAX_ENUM; \ (i).sampleCount = VK_SAMPLE_COUNT_1_BIT; \ @@ -127,19 +123,22 @@ typedef struct // Vulkan buffer typedef struct { - VkBuffer buffer; - VmaAllocation allocation; - VmaAllocationInfo allocInfo; VkDeviceSize currentOffset; + + BufferResource_t resource; + void *pMappedData; } qvkbuffer_t; // Vulkan staging buffer typedef struct { - qvkbuffer_t buffer; + VkDeviceSize currentOffset; VkCommandBuffer cmdBuffer; VkFence fence; qboolean submitted; + + BufferResource_t resource; + void *pMappedData; } qvkstagingbuffer_t; // Vulkan buffer options @@ -148,8 +147,6 @@ typedef struct VkBufferUsageFlags usage; VkMemoryPropertyFlags reqMemFlags; VkMemoryPropertyFlags prefMemFlags; - VmaMemoryUsage vmaUsage; - VmaAllocationCreateFlags vmaFlags; } qvkbufferopts_t; // Vulkan pipeline @@ -209,8 +206,6 @@ extern VkInstance vk_instance; extern VkSurfaceKHR vk_surface; // Vulkan device extern qvkdevice_t vk_device; -// Vulkan memory allocator -extern VmaAllocator vk_malloc; // Vulkan swapchain extern qvkswapchain_t vk_swapchain; // Vulkan command buffer currently in use @@ -276,7 +271,7 @@ VkResult QVk_CreateSwapchain(void); VkFormat QVk_FindDepthFormat(void); VkResult QVk_CreateCommandPool(VkCommandPool *commandPool, uint32_t queueFamilyIndex); VkResult QVk_CreateImageView(const VkImage *image, VkImageAspectFlags aspectFlags, VkImageView *imageView, VkFormat format, uint32_t mipLevels); -VkResult QVk_CreateImage(uint32_t width, uint32_t height, VkFormat format, VkImageTiling tiling, VkImageUsageFlags usage, VmaMemoryUsage memUsage, qvktexture_t *texture); +VkResult QVk_CreateImage(uint32_t width, uint32_t height, VkFormat format, VkImageTiling tiling, VkImageUsageFlags usage, qvktexture_t *texture); void QVk_CreateDepthBuffer(VkSampleCountFlagBits sampleCount, qvktexture_t *depthBuffer); void QVk_CreateColorBuffer(VkSampleCountFlagBits sampleCount, qvktexture_t *colorBuffer, int extraFlags); void QVk_CreateTexture(qvktexture_t *texture, const unsigned char *data, uint32_t width, uint32_t height, qvksampler_t samplerType); @@ -290,9 +285,10 @@ const char* QVk_GetError(VkResult errorCode); VkResult QVk_BeginFrame(void); VkResult QVk_EndFrame(qboolean force); void QVk_BeginRenderpass(qvkrenderpasstype_t rpType); +void QVk_FreeStagingBuffer(qvkstagingbuffer_t *buffer); VkResult QVk_CreateBuffer(VkDeviceSize size, qvkbuffer_t *dstBuffer, const qvkbufferopts_t options); void QVk_FreeBuffer(qvkbuffer_t *buffer); -VkResult QVk_CreateStagingBuffer(VkDeviceSize size, qvkbuffer_t *dstBuffer, VkMemoryPropertyFlags reqMemFlags, VkMemoryPropertyFlags prefMemFlags); +VkResult QVk_CreateStagingBuffer(VkDeviceSize size, qvkstagingbuffer_t *dstBuffer, VkMemoryPropertyFlags reqMemFlags, VkMemoryPropertyFlags prefMemFlags); VkResult QVk_CreateUniformBuffer(VkDeviceSize size, qvkbuffer_t *dstBuffer, VkMemoryPropertyFlags reqMemFlags, VkMemoryPropertyFlags prefMemFlags); void QVk_CreateVertexBuffer(const void *data, VkDeviceSize size, qvkbuffer_t *dstBuffer, VkMemoryPropertyFlags reqMemFlags, VkMemoryPropertyFlags prefMemFlags); void QVk_CreateIndexBuffer(const void *data, VkDeviceSize size, qvkbuffer_t *dstBuffer, VkMemoryPropertyFlags reqMemFlags, VkMemoryPropertyFlags prefMemFlags); diff --git a/src/client/refresh/vk/vk_buffer.c b/src/client/refresh/vk/vk_buffer.c index 0f02aeef..ede775c4 100644 --- a/src/client/refresh/vk/vk_buffer.c +++ b/src/client/refresh/vk/vk_buffer.c @@ -40,24 +40,23 @@ static void copyBuffer(const VkBuffer *src, VkBuffer *dst, VkDeviceSize size) // internal helper static void createStagedBuffer(const void *data, VkDeviceSize size, qvkbuffer_t *dstBuffer, qvkbufferopts_t bufferOpts) { - qvkbuffer_t *stgBuffer; - // create/release internal staging buffer - stgBuffer = (qvkbuffer_t *)malloc(sizeof(qvkbuffer_t)); + qvkstagingbuffer_t *stgBuffer; + stgBuffer = (qvkstagingbuffer_t *)malloc(sizeof(qvkstagingbuffer_t)); VK_VERIFY(QVk_CreateStagingBuffer(size, stgBuffer, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT, VK_MEMORY_PROPERTY_HOST_CACHED_BIT)); if (data) { void *dst; // staging buffers in vkQuake2 are required to be host coherent, so no flushing/invalidation is involved - VK_VERIFY(vmaMapMemory(vk_malloc, stgBuffer->allocation, &dst)); + dst = buffer_map(&stgBuffer->resource); memcpy(dst, data, (size_t)size); - vmaUnmapMemory(vk_malloc, stgBuffer->allocation); + buffer_unmap(&stgBuffer->resource); } VK_VERIFY(QVk_CreateBuffer(size, dstBuffer, bufferOpts)); - copyBuffer(&stgBuffer->buffer, &dstBuffer->buffer, size); + copyBuffer(&stgBuffer->resource.buffer, &dstBuffer->resource.buffer, size); - QVk_FreeBuffer(stgBuffer); + QVk_FreeStagingBuffer(stgBuffer); free(stgBuffer); } @@ -76,46 +75,46 @@ VkResult QVk_CreateBuffer(VkDeviceSize size, qvkbuffer_t *dstBuffer, const qvkbu // separate transfer queue makes sense only if the buffer is targetted for being transfered to GPU, so ignore it if it's CPU-only uint32_t queueFamilies[] = { (uint32_t)vk_device.gfxFamilyIndex, (uint32_t)vk_device.transferFamilyIndex }; - if (options.vmaUsage != VMA_MEMORY_USAGE_CPU_ONLY && vk_device.gfxFamilyIndex != vk_device.transferFamilyIndex) + if (vk_device.gfxFamilyIndex != vk_device.transferFamilyIndex) { bcInfo.sharingMode = VK_SHARING_MODE_CONCURRENT; bcInfo.queueFamilyIndexCount = 2; bcInfo.pQueueFamilyIndices = queueFamilies; } - VmaAllocationCreateInfo vmallocInfo = { - .flags = options.vmaFlags, - .usage = options.vmaUsage, - .requiredFlags = options.reqMemFlags, - .preferredFlags = options.prefMemFlags, - .memoryTypeBits = 0, - .pool = VK_NULL_HANDLE, - .pUserData = NULL - }; - dstBuffer->currentOffset = 0; - return vmaCreateBuffer(vk_malloc, &bcInfo, &vmallocInfo, &dstBuffer->buffer, &dstBuffer->allocation, &dstBuffer->allocInfo); + return buffer_create(&dstBuffer->resource, size, bcInfo, options.reqMemFlags, options.prefMemFlags); } void QVk_FreeBuffer(qvkbuffer_t *buffer) { - vmaDestroyBuffer(vk_malloc, buffer->buffer, buffer->allocation); - buffer->buffer = VK_NULL_HANDLE; - buffer->allocation = VK_NULL_HANDLE; + buffer_destroy(&buffer->resource); + buffer->resource.buffer = VK_NULL_HANDLE; buffer->currentOffset = 0; } -VkResult QVk_CreateStagingBuffer(VkDeviceSize size, qvkbuffer_t *dstBuffer, VkMemoryPropertyFlags reqMemFlags, VkMemoryPropertyFlags prefMemFlags) +void QVk_FreeStagingBuffer(qvkstagingbuffer_t *buffer) { - qvkbufferopts_t stagingOpts = { + buffer_destroy(&buffer->resource); + buffer->resource.buffer = VK_NULL_HANDLE; + buffer->currentOffset = 0; +} + +VkResult QVk_CreateStagingBuffer(VkDeviceSize size, qvkstagingbuffer_t *dstBuffer, VkMemoryPropertyFlags reqMemFlags, VkMemoryPropertyFlags prefMemFlags) +{ + VkBufferCreateInfo bcInfo = { + .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, + .pNext = NULL, + .flags = 0, + .size = size, .usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT, - .reqMemFlags = reqMemFlags, - .prefMemFlags = prefMemFlags, - .vmaUsage = VMA_MEMORY_USAGE_CPU_ONLY, - .vmaFlags = 0 + .sharingMode = VK_SHARING_MODE_EXCLUSIVE, + .queueFamilyIndexCount = 0, + .pQueueFamilyIndices = NULL, }; - return QVk_CreateBuffer(size, dstBuffer, stagingOpts); + dstBuffer->currentOffset = 0; + return buffer_create(&dstBuffer->resource, size, bcInfo, reqMemFlags, prefMemFlags); } VkResult QVk_CreateUniformBuffer(VkDeviceSize size, qvkbuffer_t *dstBuffer, VkMemoryPropertyFlags reqMemFlags, VkMemoryPropertyFlags prefMemFlags) @@ -124,13 +123,6 @@ VkResult QVk_CreateUniformBuffer(VkDeviceSize size, qvkbuffer_t *dstBuffer, VkMe .usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, .reqMemFlags = reqMemFlags, .prefMemFlags = prefMemFlags, - .vmaUsage = VMA_MEMORY_USAGE_CPU_TO_GPU, - // When resizing dynamic uniform buffers on Intel, the Linux driver may throw a warning: - // "Mapping an image with layout VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL can result in undefined behavior if this memory is used by the device. Only GENERAL or PREINITIALIZED should be used." - // Minor annoyance but we don't want any validation warnings, so we create dedicated allocation for uniform buffer. - // more details: https://github.com/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator/issues/34 - // Note that this is a false positive which in other cases could be ignored: https://gpuopen-librariesandsdks.github.io/VulkanMemoryAllocator/html/general_considerations.html#general_considerations_validation_layer_warnings - .vmaFlags = VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT }; return QVk_CreateBuffer(size, dstBuffer, dstOpts); @@ -142,8 +134,6 @@ void QVk_CreateVertexBuffer(const void *data, VkDeviceSize size, qvkbuffer_t *ds .usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, .reqMemFlags = reqMemFlags, .prefMemFlags = prefMemFlags, - .vmaUsage = VMA_MEMORY_USAGE_GPU_ONLY, - .vmaFlags = 0 }; createStagedBuffer(data, size, dstBuffer, dstOpts); @@ -155,8 +145,6 @@ void QVk_CreateIndexBuffer(const void *data, VkDeviceSize size, qvkbuffer_t *dst .usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_INDEX_BUFFER_BIT, .reqMemFlags = reqMemFlags, .prefMemFlags = prefMemFlags, - .vmaUsage = VMA_MEMORY_USAGE_GPU_ONLY, - .vmaFlags = 0 }; createStagedBuffer(data, size, dstBuffer, dstOpts); diff --git a/src/client/refresh/vk/vk_common.c b/src/client/refresh/vk/vk_common.c index 2e397932..25c72d9b 100644 --- a/src/client/refresh/vk/vk_common.c +++ b/src/client/refresh/vk/vk_common.c @@ -34,7 +34,6 @@ Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. // Vulkan instance, surface and memory allocator VkInstance vk_instance = VK_NULL_HANDLE; VkSurfaceKHR vk_surface = VK_NULL_HANDLE; -VmaAllocator vk_malloc = VK_NULL_HANDLE; // Vulkan device qvkdevice_t vk_device = { @@ -726,11 +725,11 @@ static void CreateDrawBuffers() // internal helper static void DestroyDrawBuffer(qvktexture_t *drawBuffer) { - if (drawBuffer->image != VK_NULL_HANDLE) + if (drawBuffer->resource.image != VK_NULL_HANDLE) { - vmaDestroyImage(vk_malloc, drawBuffer->image, drawBuffer->allocation); + image_destroy(&drawBuffer->resource); vkDestroyImageView(vk_device.logical, drawBuffer->imageView, NULL); - drawBuffer->image = VK_NULL_HANDLE; + drawBuffer->resource.image = VK_NULL_HANDLE; drawBuffer->imageView = VK_NULL_HANDLE; } } @@ -933,11 +932,11 @@ static void CreateDynamicBuffers() QVk_CreateIndexBuffer(NULL, vk_config.index_buffer_size, &vk_dynIndexBuffers[i], VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, VK_MEMORY_PROPERTY_HOST_CACHED_BIT); VK_VERIFY(QVk_CreateUniformBuffer(vk_config.uniform_buffer_size, &vk_dynUniformBuffers[i], VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, VK_MEMORY_PROPERTY_HOST_CACHED_BIT)); // keep dynamic buffers persistently mapped - VK_VERIFY(vmaMapMemory(vk_malloc, vk_dynVertexBuffers[i].allocation, &vk_dynVertexBuffers[i].allocInfo.pMappedData)); - VK_VERIFY(vmaMapMemory(vk_malloc, vk_dynIndexBuffers[i].allocation, &vk_dynIndexBuffers[i].allocInfo.pMappedData)); - VK_VERIFY(vmaMapMemory(vk_malloc, vk_dynUniformBuffers[i].allocation, &vk_dynUniformBuffers[i].allocInfo.pMappedData)); + vk_dynVertexBuffers[i].pMappedData = buffer_map(&vk_dynVertexBuffers[i].resource); + vk_dynIndexBuffers[i].pMappedData = buffer_map(&vk_dynIndexBuffers[i].resource); + vk_dynUniformBuffers[i].pMappedData = buffer_map(&vk_dynUniformBuffers[i].resource); // create descriptor set for the uniform buffer - CreateUboDescriptorSet(&vk_uboDescriptorSets[i], vk_dynUniformBuffers[i].buffer); + CreateUboDescriptorSet(&vk_uboDescriptorSets[i], vk_dynUniformBuffers[i].resource.buffer); QVk_DebugSetObjectName((uint64_t)vk_uboDescriptorSets[i], VK_OBJECT_TYPE_DESCRIPTOR_SET, va("Dynamic UBO Descriptor Set #%d", i)); QVk_DebugSetObjectName((uint64_t)vk_dynVertexBuffers[i].buffer, VK_OBJECT_TYPE_BUFFER, va("Dynamic Vertex Buffer #%d", i)); @@ -1009,15 +1008,15 @@ static void RebuildTriangleFanIndexBuffer() for (int i = 0; i < NUM_DYNBUFFERS; ++i) { vk_activeDynBufferIdx = (vk_activeDynBufferIdx + 1) % NUM_DYNBUFFERS; - vmaInvalidateAllocation(vk_malloc, vk_dynIndexBuffers[i].allocation, 0, VK_WHOLE_SIZE); + VK_VERIFY(buffer_invalidate(&vk_dynIndexBuffers[i].resource)); iboData = (uint16_t *)QVk_GetIndexBuffer(bufferSize, &dstOffset); memcpy(iboData, fanData, bufferSize); - vmaFlushAllocation(vk_malloc, vk_dynIndexBuffers[i].allocation, 0, VK_WHOLE_SIZE); + VK_VERIFY(buffer_flush(&vk_dynIndexBuffers[i].resource)); } - vk_triangleFanIbo = &vk_dynIndexBuffers[vk_activeDynBufferIdx].buffer; + vk_triangleFanIbo = &vk_dynIndexBuffers[vk_activeDynBufferIdx].resource.buffer; vk_triangleFanIboUsage = ((bufferSize % 4) == 0) ? bufferSize : (bufferSize + 4 - (bufferSize % 4)); free(fanData); } @@ -1035,8 +1034,8 @@ static void CreateStagingBuffers() for (int i = 0; i < NUM_DYNBUFFERS; ++i) { - VK_VERIFY(QVk_CreateStagingBuffer(STAGING_BUFFER_MAXSIZE, &vk_stagingBuffers[i].buffer, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT, VK_MEMORY_PROPERTY_HOST_CACHED_BIT)); - VK_VERIFY(vmaMapMemory(vk_malloc, vk_stagingBuffers[i].buffer.allocation, &vk_stagingBuffers[i].buffer.allocInfo.pMappedData)); + VK_VERIFY(QVk_CreateStagingBuffer(STAGING_BUFFER_MAXSIZE, &vk_stagingBuffers[i], VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT, VK_MEMORY_PROPERTY_HOST_CACHED_BIT)); + vk_stagingBuffers[i].pMappedData = buffer_map(&vk_stagingBuffers[i].resource); vk_stagingBuffers[i].submitted = false; VK_VERIFY(vkCreateFence(vk_device.logical, &fCreateInfo, NULL, &vk_stagingBuffers[i].fence)); @@ -1399,25 +1398,25 @@ void QVk_Shutdown( void ) QVk_FreeBuffer(&vk_rectIbo); for (int i = 0; i < NUM_DYNBUFFERS; ++i) { - if (vk_dynUniformBuffers[i].buffer != VK_NULL_HANDLE) + if (vk_dynUniformBuffers[i].resource.buffer != VK_NULL_HANDLE) { - vmaUnmapMemory(vk_malloc, vk_dynUniformBuffers[i].allocation); + buffer_unmap(&vk_dynUniformBuffers[i].resource); QVk_FreeBuffer(&vk_dynUniformBuffers[i]); } - if (vk_dynIndexBuffers[i].buffer != VK_NULL_HANDLE) + if (vk_dynIndexBuffers[i].resource.buffer != VK_NULL_HANDLE) { - vmaUnmapMemory(vk_malloc, vk_dynIndexBuffers[i].allocation); + buffer_unmap(&vk_dynIndexBuffers[i].resource); QVk_FreeBuffer(&vk_dynIndexBuffers[i]); } - if (vk_dynVertexBuffers[i].buffer != VK_NULL_HANDLE) + if (vk_dynVertexBuffers[i].resource.buffer != VK_NULL_HANDLE) { - vmaUnmapMemory(vk_malloc, vk_dynVertexBuffers[i].allocation); + buffer_unmap(&vk_dynVertexBuffers[i].resource); QVk_FreeBuffer(&vk_dynVertexBuffers[i]); } - if (vk_stagingBuffers[i].buffer.buffer != VK_NULL_HANDLE) + if (vk_stagingBuffers[i].resource.buffer != VK_NULL_HANDLE) { - vmaUnmapMemory(vk_malloc, vk_stagingBuffers[i].buffer.allocation); - QVk_FreeBuffer(&vk_stagingBuffers[i].buffer); + buffer_unmap(&vk_stagingBuffers[i].resource); + QVk_FreeStagingBuffer(&vk_stagingBuffers[i]); vkDestroyFence(vk_device.logical, vk_stagingBuffers[i].fence, NULL); } } @@ -1468,8 +1467,6 @@ void QVk_Shutdown( void ) vkDestroyFence(vk_device.logical, vk_fences[i], NULL); } } - if (vk_malloc != VK_NULL_HANDLE) - vmaDestroyAllocator(vk_malloc); if (vk_device.logical != VK_NULL_HANDLE) vkDestroyDevice(vk_device.logical, NULL); if(vk_surface != VK_NULL_HANDLE) @@ -1630,28 +1627,6 @@ qboolean QVk_Init(SDL_Window *window) } QVk_DebugSetObjectName((uint64_t)vk_device.physical, VK_OBJECT_TYPE_PHYSICAL_DEVICE, va("Physical Device: %s", vk_config.vendor_name)); - // create memory allocator - VmaAllocatorCreateInfo allocInfo = { - .flags = 0, - .physicalDevice = vk_device.physical, - .device = vk_device.logical, - .preferredLargeHeapBlockSize = 0, - .pAllocationCallbacks = NULL, - .pDeviceMemoryCallbacks = NULL, - .frameInUseCount = 0, - .pHeapSizeLimit = NULL, - .pVulkanFunctions = NULL, - .pRecordSettings = NULL - }; - - res = vmaCreateAllocator(&allocInfo, &vk_malloc); - if (res != VK_SUCCESS) - { - R_Printf(PRINT_ALL, "%s(): Could not create Vulkan memory allocator: %s\n", __func__, QVk_GetError(res)); - return false; - } - R_Printf(PRINT_ALL, "...created Vulkan memory allocator\n"); - // setup swapchain res = QVk_CreateSwapchain(); if (res != VK_SUCCESS) @@ -1839,9 +1814,9 @@ VkResult QVk_BeginFrame() vk_dynVertexBuffers[vk_activeDynBufferIdx].currentOffset = 0; // triangle fan index data is placed in the beginning of the buffer vk_dynIndexBuffers[vk_activeDynBufferIdx].currentOffset = vk_triangleFanIboUsage; - vmaInvalidateAllocation(vk_malloc, vk_dynUniformBuffers[vk_activeDynBufferIdx].allocation, 0, VK_WHOLE_SIZE); - vmaInvalidateAllocation(vk_malloc, vk_dynVertexBuffers[vk_activeDynBufferIdx].allocation, 0, VK_WHOLE_SIZE); - vmaInvalidateAllocation(vk_malloc, vk_dynIndexBuffers[vk_activeDynBufferIdx].allocation, 0, VK_WHOLE_SIZE); + VK_VERIFY(buffer_invalidate(&vk_dynUniformBuffers[vk_activeDynBufferIdx].resource)); + VK_VERIFY(buffer_invalidate(&vk_dynVertexBuffers[vk_activeDynBufferIdx].resource)); + VK_VERIFY(buffer_invalidate(&vk_dynIndexBuffers[vk_activeDynBufferIdx].resource)); // for VK_OUT_OF_DATE_KHR and VK_SUBOPTIMAL_KHR it'd be fine to just rebuild the swapchain but let's take the easy way out and restart video system if (result == VK_ERROR_OUT_OF_DATE_KHR || result == VK_SUBOPTIMAL_KHR || result == VK_ERROR_SURFACE_LOST_KHR) @@ -1889,9 +1864,9 @@ VkResult QVk_EndFrame(qboolean force) // submit QVk_SubmitStagingBuffers(); - vmaFlushAllocation(vk_malloc, vk_dynUniformBuffers[vk_activeDynBufferIdx].allocation, 0, VK_WHOLE_SIZE); - vmaFlushAllocation(vk_malloc, vk_dynVertexBuffers[vk_activeDynBufferIdx].allocation, 0, VK_WHOLE_SIZE); - vmaFlushAllocation(vk_malloc, vk_dynIndexBuffers[vk_activeDynBufferIdx].allocation, 0, VK_WHOLE_SIZE); + VK_VERIFY(buffer_flush(&vk_dynUniformBuffers[vk_activeDynBufferIdx].resource)); + VK_VERIFY(buffer_flush(&vk_dynVertexBuffers[vk_activeDynBufferIdx].resource)); + VK_VERIFY(buffer_flush(&vk_dynIndexBuffers[vk_activeDynBufferIdx].resource)); vkCmdEndRenderPass(vk_commandbuffers[vk_activeBufferIdx]); QVk_DebugLabelEnd(&vk_commandbuffers[vk_activeBufferIdx]); @@ -2034,10 +2009,10 @@ uint8_t *QVk_GetVertexBuffer(VkDeviceSize size, VkBuffer *dstBuffer, VkDeviceSiz for (int i = 0; i < NUM_DYNBUFFERS; ++i) { vk_swapBuffers[vk_activeSwapBufferIdx][swapBufferOffset + i] = vk_dynVertexBuffers[i]; - vmaUnmapMemory(vk_malloc, vk_dynVertexBuffers[i].allocation); + buffer_unmap(&vk_dynVertexBuffers[i].resource); QVk_CreateVertexBuffer(NULL, vk_config.vertex_buffer_size, &vk_dynVertexBuffers[i], VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, VK_MEMORY_PROPERTY_HOST_CACHED_BIT); - VK_VERIFY(vmaMapMemory(vk_malloc, vk_dynVertexBuffers[i].allocation, &vk_dynVertexBuffers[i].allocInfo.pMappedData)); + vk_dynVertexBuffers[i].pMappedData = buffer_map(&vk_dynVertexBuffers[i].resource); QVk_DebugSetObjectName((uint64_t)vk_dynVertexBuffers[i].buffer, VK_OBJECT_TYPE_BUFFER, va("Dynamic Vertex Buffer #%d", i)); QVk_DebugSetObjectName((uint64_t)vk_dynVertexBuffers[i].allocInfo.deviceMemory, VK_OBJECT_TYPE_DEVICE_MEMORY, va("Memory: Dynamic Vertex Buffer #%d", i)); @@ -2045,14 +2020,14 @@ uint8_t *QVk_GetVertexBuffer(VkDeviceSize size, VkBuffer *dstBuffer, VkDeviceSiz } *dstOffset = vk_dynVertexBuffers[vk_activeDynBufferIdx].currentOffset; - *dstBuffer = vk_dynVertexBuffers[vk_activeDynBufferIdx].buffer; + *dstBuffer = vk_dynVertexBuffers[vk_activeDynBufferIdx].resource.buffer; vk_dynVertexBuffers[vk_activeDynBufferIdx].currentOffset += size; vk_config.vertex_buffer_usage = vk_dynVertexBuffers[vk_activeDynBufferIdx].currentOffset; if (vk_config.vertex_buffer_max_usage < vk_config.vertex_buffer_usage) vk_config.vertex_buffer_max_usage = vk_config.vertex_buffer_usage; - return (uint8_t *)vk_dynVertexBuffers[vk_activeDynBufferIdx].allocInfo.pMappedData + (*dstOffset); + return (uint8_t *)vk_dynVertexBuffers[vk_activeDynBufferIdx].pMappedData + (*dstOffset); } static uint8_t *QVk_GetIndexBuffer(VkDeviceSize size, VkDeviceSize *dstOffset) @@ -2077,10 +2052,10 @@ static uint8_t *QVk_GetIndexBuffer(VkDeviceSize size, VkDeviceSize *dstOffset) for (int i = 0; i < NUM_DYNBUFFERS; ++i) { vk_swapBuffers[vk_activeSwapBufferIdx][swapBufferOffset + i] = vk_dynIndexBuffers[i]; - vmaUnmapMemory(vk_malloc, vk_dynIndexBuffers[i].allocation); + buffer_unmap(&vk_dynIndexBuffers[i].resource); QVk_CreateIndexBuffer(NULL, vk_config.index_buffer_size, &vk_dynIndexBuffers[i], VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, VK_MEMORY_PROPERTY_HOST_CACHED_BIT); - VK_VERIFY(vmaMapMemory(vk_malloc, vk_dynIndexBuffers[i].allocation, &vk_dynIndexBuffers[i].allocInfo.pMappedData)); + vk_dynIndexBuffers[i].pMappedData = buffer_map(&vk_dynIndexBuffers[i].resource); QVk_DebugSetObjectName((uint64_t)vk_dynIndexBuffers[i].buffer, VK_OBJECT_TYPE_BUFFER, va("Dynamic Index Buffer #%d", i)); QVk_DebugSetObjectName((uint64_t)vk_dynIndexBuffers[i].allocInfo.deviceMemory, VK_OBJECT_TYPE_DEVICE_MEMORY, va("Memory: Dynamic Index Buffer #%d", i)); @@ -2094,7 +2069,7 @@ static uint8_t *QVk_GetIndexBuffer(VkDeviceSize size, VkDeviceSize *dstOffset) if (vk_config.index_buffer_max_usage < vk_config.index_buffer_usage) vk_config.index_buffer_max_usage = vk_config.index_buffer_usage; - return (uint8_t *)vk_dynIndexBuffers[vk_activeDynBufferIdx].allocInfo.pMappedData + (*dstOffset); + return (uint8_t *)vk_dynIndexBuffers[vk_activeDynBufferIdx].pMappedData + (*dstOffset); } uint8_t *QVk_GetUniformBuffer(VkDeviceSize size, uint32_t *dstOffset, VkDescriptorSet *dstUboDescriptorSet) @@ -2126,12 +2101,12 @@ uint8_t *QVk_GetUniformBuffer(VkDeviceSize size, uint32_t *dstOffset, VkDescript for (int i = 0; i < NUM_DYNBUFFERS; ++i) { vk_swapBuffers[vk_activeSwapBufferIdx][swapBufferOffset + i] = vk_dynUniformBuffers[i]; - vk_swapDescriptorSets[vk_activeSwapBufferIdx][swapDescSetsOffset + i] = vk_uboDescriptorSets[i];; - vmaUnmapMemory(vk_malloc, vk_dynUniformBuffers[i].allocation); + vk_swapDescriptorSets[vk_activeSwapBufferIdx][swapDescSetsOffset + i] = vk_uboDescriptorSets[i]; + buffer_unmap(&vk_dynUniformBuffers[i].resource); VK_VERIFY(QVk_CreateUniformBuffer(vk_config.uniform_buffer_size, &vk_dynUniformBuffers[i], VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, VK_MEMORY_PROPERTY_HOST_CACHED_BIT)); - VK_VERIFY(vmaMapMemory(vk_malloc, vk_dynUniformBuffers[i].allocation, &vk_dynUniformBuffers[i].allocInfo.pMappedData)); - CreateUboDescriptorSet(&vk_uboDescriptorSets[i], vk_dynUniformBuffers[i].buffer); + vk_dynUniformBuffers[i].pMappedData = buffer_map(&vk_dynUniformBuffers[i].resource); + CreateUboDescriptorSet(&vk_uboDescriptorSets[i], vk_dynUniformBuffers[i].resource.buffer); QVk_DebugSetObjectName((uint64_t)vk_uboDescriptorSets[i], VK_OBJECT_TYPE_DESCRIPTOR_SET, va("Dynamic UBO Descriptor Set #%d", i)); QVk_DebugSetObjectName((uint64_t)vk_dynUniformBuffers[i].buffer, VK_OBJECT_TYPE_BUFFER, va("Dynamic Uniform Buffer #%d", i)); @@ -2147,20 +2122,20 @@ uint8_t *QVk_GetUniformBuffer(VkDeviceSize size, uint32_t *dstOffset, VkDescript if (vk_config.uniform_buffer_max_usage < vk_config.uniform_buffer_usage) vk_config.uniform_buffer_max_usage = vk_config.uniform_buffer_usage; - return (uint8_t *)vk_dynUniformBuffers[vk_activeDynBufferIdx].allocInfo.pMappedData + (*dstOffset); + return (uint8_t *)vk_dynUniformBuffers[vk_activeDynBufferIdx].pMappedData + (*dstOffset); } uint8_t *QVk_GetStagingBuffer(VkDeviceSize size, int alignment, VkCommandBuffer *cmdBuffer, VkBuffer *buffer, uint32_t *dstOffset) { qvkstagingbuffer_t * stagingBuffer = &vk_stagingBuffers[vk_activeStagingBuffer]; - const int align_mod = stagingBuffer->buffer.currentOffset % alignment; - stagingBuffer->buffer.currentOffset = ((stagingBuffer->buffer.currentOffset % alignment) == 0) - ? stagingBuffer->buffer.currentOffset : (stagingBuffer->buffer.currentOffset + alignment - align_mod); + const int align_mod = stagingBuffer->currentOffset % alignment; + stagingBuffer->currentOffset = ((stagingBuffer->currentOffset % alignment) == 0) + ? stagingBuffer->currentOffset : (stagingBuffer->currentOffset + alignment - align_mod); if (size > STAGING_BUFFER_MAXSIZE) Sys_Error("QVk_GetStagingBuffer(): Cannot allocate staging buffer space!"); - if ((stagingBuffer->buffer.currentOffset + size) >= STAGING_BUFFER_MAXSIZE && !stagingBuffer->submitted) + if ((stagingBuffer->currentOffset + size) >= STAGING_BUFFER_MAXSIZE && !stagingBuffer->submitted) SubmitStagingBuffer(vk_activeStagingBuffer); stagingBuffer = &vk_stagingBuffers[vk_activeStagingBuffer]; @@ -2169,7 +2144,7 @@ uint8_t *QVk_GetStagingBuffer(VkDeviceSize size, int alignment, VkCommandBuffer VK_VERIFY(vkWaitForFences(vk_device.logical, 1, &stagingBuffer->fence, VK_TRUE, UINT64_MAX)); VK_VERIFY(vkResetFences(vk_device.logical, 1, &stagingBuffer->fence)); - stagingBuffer->buffer.currentOffset = 0; + stagingBuffer->currentOffset = 0; stagingBuffer->submitted = false; VkCommandBufferBeginInfo beginInfo = { @@ -2185,12 +2160,12 @@ uint8_t *QVk_GetStagingBuffer(VkDeviceSize size, int alignment, VkCommandBuffer if (cmdBuffer) *cmdBuffer = stagingBuffer->cmdBuffer; if (buffer) - *buffer = stagingBuffer->buffer.buffer; + *buffer = stagingBuffer->resource.buffer; if (dstOffset) - *dstOffset = stagingBuffer->buffer.currentOffset; + *dstOffset = stagingBuffer->currentOffset; - unsigned char *data = (uint8_t *)stagingBuffer->buffer.allocInfo.pMappedData + stagingBuffer->buffer.currentOffset; - stagingBuffer->buffer.currentOffset += size; + unsigned char *data = (uint8_t *)stagingBuffer->pMappedData + stagingBuffer->currentOffset; + stagingBuffer->currentOffset += size; return data; } @@ -2217,7 +2192,7 @@ void QVk_SubmitStagingBuffers() { for (int i = 0; i < NUM_DYNBUFFERS; ++i) { - if (!vk_stagingBuffers[i].submitted && vk_stagingBuffers[i].buffer.currentOffset > 0) + if (!vk_stagingBuffers[i].submitted && vk_stagingBuffers[i].currentOffset > 0) SubmitStagingBuffer(i); } } @@ -2260,8 +2235,8 @@ void QVk_DrawColorRect(float *ubo, VkDeviceSize uboSize, qvkrenderpasstype_t rpT QVk_BindPipeline(&vk_drawColorQuadPipeline[rpType]); VkDeviceSize offsets = 0; vkCmdBindDescriptorSets(vk_activeCmdbuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, vk_drawColorQuadPipeline[rpType].layout, 0, 1, &uboDescriptorSet, 1, &uboOffset); - vkCmdBindVertexBuffers(vk_activeCmdbuffer, 0, 1, &vk_colorRectVbo.buffer, &offsets); - vkCmdBindIndexBuffer(vk_activeCmdbuffer, vk_rectIbo.buffer, 0, VK_INDEX_TYPE_UINT32); + vkCmdBindVertexBuffers(vk_activeCmdbuffer, 0, 1, &vk_colorRectVbo.resource.buffer, &offsets); + vkCmdBindIndexBuffer(vk_activeCmdbuffer, vk_rectIbo.resource.buffer, 0, VK_INDEX_TYPE_UINT32); vkCmdDrawIndexed(vk_activeCmdbuffer, 6, 1, 0, 0, 0); } @@ -2276,8 +2251,8 @@ void QVk_DrawTexRect(const float *ubo, VkDeviceSize uboSize, qvktexture_t *textu VkDeviceSize offsets = 0; VkDescriptorSet descriptorSets[] = { texture->descriptorSet, uboDescriptorSet }; vkCmdBindDescriptorSets(vk_activeCmdbuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, vk_drawTexQuadPipeline.layout, 0, 2, descriptorSets, 1, &uboOffset); - vkCmdBindVertexBuffers(vk_activeCmdbuffer, 0, 1, &vk_texRectVbo.buffer, &offsets); - vkCmdBindIndexBuffer(vk_activeCmdbuffer, vk_rectIbo.buffer, 0, VK_INDEX_TYPE_UINT32); + vkCmdBindVertexBuffers(vk_activeCmdbuffer, 0, 1, &vk_texRectVbo.resource.buffer, &offsets); + vkCmdBindIndexBuffer(vk_activeCmdbuffer, vk_rectIbo.resource.buffer, 0, VK_INDEX_TYPE_UINT32); vkCmdDrawIndexed(vk_activeCmdbuffer, 6, 1, 0, 0, 0); } diff --git a/src/client/refresh/vk/vk_draw.c b/src/client/refresh/vk/vk_draw.c index 79adff78..e521ac1b 100644 --- a/src/client/refresh/vk/vk_draw.c +++ b/src/client/refresh/vk/vk_draw.c @@ -311,7 +311,7 @@ void Draw_StretchRaw (int x, int y, int w, int h, int cols, int rows, byte *data } } - if (vk_rawTexture.image != VK_NULL_HANDLE) + if (vk_rawTexture.resource.image != VK_NULL_HANDLE) { QVk_UpdateTextureData(&vk_rawTexture, (unsigned char*)&image32, 0, 0, 256, 256); } diff --git a/src/client/refresh/vk/vk_image.c b/src/client/refresh/vk/vk_image.c index 67013bbb..ae929367 100644 --- a/src/client/refresh/vk/vk_image.c +++ b/src/client/refresh/vk/vk_image.c @@ -65,7 +65,7 @@ static void transitionImageLayout(const VkCommandBuffer *cmdBuffer, const VkQueu .newLayout = newLayout, .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, - .image = texture->image, + .image = texture->resource.image, .subresourceRange.baseMipLevel = 0, // no mip mapping levels .subresourceRange.baseArrayLayer = 0, .subresourceRange.layerCount = 1, @@ -173,7 +173,7 @@ static void generateMipmaps(const VkCommandBuffer *cmdBuffer, const qvktexture_t .pNext = NULL, .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, - .image = texture->image, + .image = texture->resource.image, .subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, .subresourceRange.levelCount = 1, .subresourceRange.baseArrayLayer = 0, @@ -208,8 +208,8 @@ static void generateMipmaps(const VkCommandBuffer *cmdBuffer, const qvktexture_t }; // src image == dst image, because we're blitting between different mip levels of the same image - vkCmdBlitImage(*cmdBuffer, texture->image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, - texture->image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &blit, mipFilter); + vkCmdBlitImage(*cmdBuffer, texture->resource.image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, + texture->resource.image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &blit, mipFilter); imgBarrier.srcAccessMask = VK_ACCESS_TRANSFER_READ_BIT; imgBarrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT; @@ -250,7 +250,7 @@ static void createTextureImage(qvktexture_t *dstTex, const unsigned char *data, if (dstTex->mipLevels > 1) imageUsage |= VK_IMAGE_USAGE_TRANSFER_SRC_BIT; - VK_VERIFY(QVk_CreateImage(width, height, dstTex->format, VK_IMAGE_TILING_OPTIMAL, imageUsage, VMA_MEMORY_USAGE_GPU_ONLY, dstTex)); + VK_VERIFY(QVk_CreateImage(width, height, dstTex->format, VK_IMAGE_TILING_OPTIMAL, imageUsage, dstTex)); transitionImageLayout(&command_buffer, &vk_device.transferQueue, dstTex, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL); // copy buffer to image @@ -266,7 +266,7 @@ static void createTextureImage(qvktexture_t *dstTex, const unsigned char *data, .imageExtent = { width, height, 1 } }; - vkCmdCopyBufferToImage(command_buffer, staging_buffer, dstTex->image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ®ion); + vkCmdCopyBufferToImage(command_buffer, staging_buffer, dstTex->resource.image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ®ion); if (dstTex->mipLevels > 1) { @@ -309,7 +309,7 @@ VkResult QVk_CreateImageView(const VkImage *image, VkImageAspectFlags aspectFlag return vkCreateImageView(vk_device.logical, &ivCreateInfo, NULL, imageView); } -VkResult QVk_CreateImage(uint32_t width, uint32_t height, VkFormat format, VkImageTiling tiling, VkImageUsageFlags usage, VmaMemoryUsage memUsage, qvktexture_t *texture) +VkResult QVk_CreateImage(uint32_t width, uint32_t height, VkFormat format, VkImageTiling tiling, VkImageUsageFlags usage, qvktexture_t *texture) { VkImageCreateInfo imageInfo = { .sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, @@ -336,49 +336,32 @@ VkResult QVk_CreateImage(uint32_t width, uint32_t height, VkFormat format, VkIma imageInfo.pQueueFamilyIndices = queueFamilies; } - VmaAllocationCreateInfo vmallocInfo = { - .flags = texture->vmaFlags, - .usage = memUsage - }; - texture->sharingMode = imageInfo.sharingMode; - return vmaCreateImage(vk_malloc, &imageInfo, &vmallocInfo, &texture->image, &texture->allocation, &texture->allocInfo); + return image_create(&texture->resource, imageInfo, /*mem_properties*/ 0, /*mem_preferences*/ 0); } void QVk_CreateDepthBuffer(VkSampleCountFlagBits sampleCount, qvktexture_t *depthBuffer) { depthBuffer->format = QVk_FindDepthFormat(); depthBuffer->sampleCount = sampleCount; - // On 64-bit builds, Intel drivers throw a warning: - // "Mapping an image with layout VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL can result in undefined behavior if this memory is used by the device. Only GENERAL or PREINITIALIZED should be used." - // Minor annoyance but we don't want any validation warnings, so we create dedicated allocation for depth buffer. - // more details: https://github.com/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator/issues/34 - // Note that this is a false positive which in other cases could be ignored: https://gpuopen-librariesandsdks.github.io/VulkanMemoryAllocator/html/general_considerations.html#general_considerations_validation_layer_warnings - depthBuffer->vmaFlags = VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT; - VK_VERIFY(QVk_CreateImage(vk_swapchain.extent.width, vk_swapchain.extent.height, depthBuffer->format, VK_IMAGE_TILING_OPTIMAL, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, VMA_MEMORY_USAGE_GPU_ONLY, depthBuffer)); - VK_VERIFY(QVk_CreateImageView(&depthBuffer->image, getDepthStencilAspect(depthBuffer->format), &depthBuffer->imageView, depthBuffer->format, depthBuffer->mipLevels)); + VK_VERIFY(QVk_CreateImage(vk_swapchain.extent.width, vk_swapchain.extent.height, depthBuffer->format, VK_IMAGE_TILING_OPTIMAL, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT, depthBuffer)); + VK_VERIFY(QVk_CreateImageView(&depthBuffer->resource.image, getDepthStencilAspect(depthBuffer->format), &depthBuffer->imageView, depthBuffer->format, depthBuffer->mipLevels)); } void QVk_CreateColorBuffer(VkSampleCountFlagBits sampleCount, qvktexture_t *colorBuffer, int extraFlags) { colorBuffer->format = vk_swapchain.format; colorBuffer->sampleCount = sampleCount; - // On 64-bit builds, Intel drivers throw a warning: - // "Mapping an image with layout VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL can result in undefined behavior if this memory is used by the device. Only GENERAL or PREINITIALIZED should be used." - // Minor annoyance but we don't want any validation warnings, so we create dedicated allocation for color buffer. - // more details: https://github.com/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator/issues/34 - // Note that this is a false positive which in other cases could be ignored: https://gpuopen-librariesandsdks.github.io/VulkanMemoryAllocator/html/general_considerations.html#general_considerations_validation_layer_warnings - colorBuffer->vmaFlags = VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT; - VK_VERIFY(QVk_CreateImage(vk_swapchain.extent.width, vk_swapchain.extent.height, colorBuffer->format, VK_IMAGE_TILING_OPTIMAL, extraFlags | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VMA_MEMORY_USAGE_GPU_ONLY, colorBuffer)); - VK_VERIFY(QVk_CreateImageView(&colorBuffer->image, VK_IMAGE_ASPECT_COLOR_BIT, &colorBuffer->imageView, colorBuffer->format, colorBuffer->mipLevels)); + VK_VERIFY(QVk_CreateImage(vk_swapchain.extent.width, vk_swapchain.extent.height, colorBuffer->format, VK_IMAGE_TILING_OPTIMAL, extraFlags | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, colorBuffer)); + VK_VERIFY(QVk_CreateImageView(&colorBuffer->resource.image, VK_IMAGE_ASPECT_COLOR_BIT, &colorBuffer->imageView, colorBuffer->format, colorBuffer->mipLevels)); } void QVk_CreateTexture(qvktexture_t *texture, const unsigned char *data, uint32_t width, uint32_t height, qvksampler_t samplerType) { createTextureImage(texture, data, width, height); - VK_VERIFY(QVk_CreateImageView(&texture->image, VK_IMAGE_ASPECT_COLOR_BIT, &texture->imageView, texture->format, texture->mipLevels)); + VK_VERIFY(QVk_CreateImageView(&texture->resource.image, VK_IMAGE_ASPECT_COLOR_BIT, &texture->imageView, texture->format, texture->mipLevels)); // create descriptor set for the texture VkDescriptorSetAllocateInfo dsAllocInfo = { @@ -421,7 +404,7 @@ void QVk_UpdateTextureData(qvktexture_t *texture, const unsigned char *data, uin .imageExtent = { width, height, 1 } }; - vkCmdCopyBufferToImage(command_buffer, staging_buffer, texture->image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ®ion); + vkCmdCopyBufferToImage(command_buffer, staging_buffer, texture->resource.image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ®ion); if (texture->mipLevels > 1) { @@ -449,36 +432,37 @@ static void QVk_ReleaseTexture(qvktexture_t *texture) vkDeviceWaitIdle(vk_device.logical); } - if (texture->image != VK_NULL_HANDLE) - vmaDestroyImage(vk_malloc, texture->image, texture->allocation); + if (texture->resource.image != VK_NULL_HANDLE) + image_destroy(&texture->resource); if (texture->imageView != VK_NULL_HANDLE) vkDestroyImageView(vk_device.logical, texture->imageView, NULL); if (texture->descriptorSet != VK_NULL_HANDLE) vkFreeDescriptorSets(vk_device.logical, vk_descriptorPool, 1, &texture->descriptorSet); - texture->image = VK_NULL_HANDLE; + texture->resource.image = VK_NULL_HANDLE; texture->imageView = VK_NULL_HANDLE; texture->descriptorSet = VK_NULL_HANDLE; } void QVk_ReadPixels(uint8_t *dstBuffer, uint32_t width, uint32_t height) { - qvkbuffer_t buff; + BufferResource_t buff; + uint8_t *pMappedData; VkCommandBuffer cmdBuffer; - qvkbufferopts_t buffOpts = { + + VkBufferCreateInfo bcInfo = { + .sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, + .pNext = NULL, + .flags = 0, + .size = width * height * 4, .usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT, - .reqMemFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT, - .prefMemFlags = VK_MEMORY_PROPERTY_HOST_CACHED_BIT, - .vmaUsage = VMA_MEMORY_USAGE_CPU_ONLY, - // When taking a screenshot on Intel, the Linux driver may throw a warning: - // "Mapping an image with layout VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL can result in undefined behavior if this memory is used by the device. Only GENERAL or PREINITIALIZED should be used." - // Minor annoyance but we don't want any validation warnings, so we create dedicated allocation for the image buffer. - // more details: https://github.com/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator/issues/34 - // Note that this is a false positive which in other cases could be ignored: https://gpuopen-librariesandsdks.github.io/VulkanMemoryAllocator/html/general_considerations.html#general_considerations_validation_layer_warnings - .vmaFlags = VMA_ALLOCATION_CREATE_MAPPED_BIT | VMA_ALLOCATION_CREATE_DEDICATED_MEMORY_BIT + .sharingMode = VK_SHARING_MODE_EXCLUSIVE, + .queueFamilyIndexCount = 0, + .pQueueFamilyIndices = NULL, }; - VK_VERIFY(QVk_CreateBuffer(width * height * 4, &buff, buffOpts)); + VK_VERIFY(buffer_create(&buff, width * height * 4, bcInfo, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT, VK_MEMORY_PROPERTY_HOST_CACHED_BIT)); + cmdBuffer = QVk_CreateCommandBuffer(&vk_commandPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY); VK_VERIFY(QVk_BeginCommand(&cmdBuffer)); @@ -521,9 +505,11 @@ void QVk_ReadPixels(uint8_t *dstBuffer, uint32_t width, uint32_t height) QVk_SubmitCommand(&cmdBuffer, &vk_device.gfxQueue); // store image in destination buffer - memcpy(dstBuffer, (uint8_t *)buff.allocInfo.pMappedData, width * height * 4); + pMappedData = buffer_map(&buff); + memcpy(dstBuffer, pMappedData, width * height * 4); + buffer_unmap(&buff); - QVk_FreeBuffer(&buff); + buffer_destroy(&buff); } /* @@ -542,7 +528,7 @@ void Vk_ImageList_f (void) for (i = 0, image = vktextures; i < numvktextures; i++, image++) { - if (image->vk_texture.image == VK_NULL_HANDLE) + if (image->vk_texture.resource.image == VK_NULL_HANDLE) continue; texels += image->upload_width*image->upload_height; switch (image->type) @@ -682,17 +668,17 @@ void Vk_TextureMode( char *string ) for (j = 0, image = vktextures; j < numvktextures; j++, image++) { // skip console characters - we want them unfiltered at all times - if (image->vk_texture.image != VK_NULL_HANDLE && Q_stricmp(image->name, "pics/conchars.pcx")) + if (image->vk_texture.resource.image != VK_NULL_HANDLE && Q_stricmp(image->name, "pics/conchars.pcx")) QVk_UpdateTextureSampler(&image->vk_texture, i); } for (j = 0; j < MAX_SCRAPS; j++) { - if (vk_scrapTextures[j].image != VK_NULL_HANDLE) + if (vk_scrapTextures[j].resource.image != VK_NULL_HANDLE) QVk_UpdateTextureSampler(&vk_scrapTextures[j], i); } - if (vk_rawTexture.image != VK_NULL_HANDLE) + if (vk_rawTexture.resource.image != VK_NULL_HANDLE) QVk_UpdateTextureSampler(&vk_rawTexture, i); } @@ -728,7 +714,7 @@ void Vk_LmapTextureMode( char *string ) vkDeviceWaitIdle(vk_device.logical); for (j = 0; j < MAX_LIGHTMAPS*2; j++) { - if (vk_state.lightmap_textures[j].image != VK_NULL_HANDLE) + if (vk_state.lightmap_textures[j].resource.image != VK_NULL_HANDLE) QVk_UpdateTextureSampler(&vk_state.lightmap_textures[j], i); } } @@ -1236,7 +1222,7 @@ Vk_LoadPic(char *name, byte *pic, int width, int realwidth, int height, int real // find a free image_t for (i = 0, image = vktextures; ivk_texture.image == VK_NULL_HANDLE && !image->scrap) + if (image->vk_texture.resource.image == VK_NULL_HANDLE && !image->scrap) break; } if (i == numvktextures) @@ -1289,7 +1275,7 @@ Vk_LoadPic(char *name, byte *pic, int width, int realwidth, int height, int real // update scrap data Vk_Upload8(scrap_texels[texnum], BLOCK_WIDTH, BLOCK_HEIGHT, false, false, &texBuffer, &upload_width, &upload_height); - if (vk_scrapTextures[texnum].image != VK_NULL_HANDLE) + if (vk_scrapTextures[texnum].resource.image != VK_NULL_HANDLE) { QVk_UpdateTextureData(&vk_scrapTextures[texnum], texBuffer, 0, 0, image->upload_width, image->upload_height); } diff --git a/src/client/refresh/vk/vk_rsurf.c b/src/client/refresh/vk/vk_rsurf.c index 513bd48d..2930e8dc 100644 --- a/src/client/refresh/vk/vk_rsurf.c +++ b/src/client/refresh/vk/vk_rsurf.c @@ -1076,7 +1076,7 @@ static void LM_UploadBlock( qboolean dynamic ) } else { - if (vk_state.lightmap_textures[texture].image != VK_NULL_HANDLE) + if (vk_state.lightmap_textures[texture].resource.image != VK_NULL_HANDLE) QVk_UpdateTextureData(&vk_state.lightmap_textures[texture], vk_lms.lightmap_buffer, 0, 0, BLOCK_WIDTH, BLOCK_HEIGHT); else { @@ -1273,7 +1273,7 @@ void Vk_BeginBuildingLightmaps (model_t *m) /* ** initialize the dynamic lightmap textures */ - if (vk_state.lightmap_textures[DYNLIGHTMAP_OFFSET].image == VK_NULL_HANDLE) + if (vk_state.lightmap_textures[DYNLIGHTMAP_OFFSET].resource.image == VK_NULL_HANDLE) { for (i = DYNLIGHTMAP_OFFSET; i < MAX_LIGHTMAPS*2; i++) {