rallyunlimited-engine/code/renderervk/vk.c
2024-02-02 19:46:17 +03:00

7190 lines
242 KiB
C

#include "tr_local.h"
#include "vk.h"
#if defined (_DEBUG)
#if defined (_WIN32)
#define USE_VK_VALIDATION
#include <windows.h> // for win32 debug callback
#endif
#endif
static int vkSamples = VK_SAMPLE_COUNT_1_BIT;
static int vkMaxSamples = VK_SAMPLE_COUNT_1_BIT;
//
// Vulkan API functions used by the renderer.
//
static PFN_vkCreateInstance qvkCreateInstance;
static PFN_vkEnumerateInstanceExtensionProperties qvkEnumerateInstanceExtensionProperties;
static PFN_vkCreateDevice qvkCreateDevice;
static PFN_vkDestroyInstance qvkDestroyInstance;
static PFN_vkEnumerateDeviceExtensionProperties qvkEnumerateDeviceExtensionProperties;
static PFN_vkEnumeratePhysicalDevices qvkEnumeratePhysicalDevices;
static PFN_vkGetDeviceProcAddr qvkGetDeviceProcAddr;
static PFN_vkGetPhysicalDeviceFeatures qvkGetPhysicalDeviceFeatures;
static PFN_vkGetPhysicalDeviceFormatProperties qvkGetPhysicalDeviceFormatProperties;
static PFN_vkGetPhysicalDeviceMemoryProperties qvkGetPhysicalDeviceMemoryProperties;
static PFN_vkGetPhysicalDeviceProperties qvkGetPhysicalDeviceProperties;
static PFN_vkGetPhysicalDeviceQueueFamilyProperties qvkGetPhysicalDeviceQueueFamilyProperties;
static PFN_vkDestroySurfaceKHR qvkDestroySurfaceKHR;
static PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR qvkGetPhysicalDeviceSurfaceCapabilitiesKHR;
static PFN_vkGetPhysicalDeviceSurfaceFormatsKHR qvkGetPhysicalDeviceSurfaceFormatsKHR;
static PFN_vkGetPhysicalDeviceSurfacePresentModesKHR qvkGetPhysicalDeviceSurfacePresentModesKHR;
static PFN_vkGetPhysicalDeviceSurfaceSupportKHR qvkGetPhysicalDeviceSurfaceSupportKHR;
#ifdef USE_VK_VALIDATION
static PFN_vkCreateDebugReportCallbackEXT qvkCreateDebugReportCallbackEXT;
static PFN_vkDestroyDebugReportCallbackEXT qvkDestroyDebugReportCallbackEXT;
#endif
static PFN_vkAllocateCommandBuffers qvkAllocateCommandBuffers;
static PFN_vkAllocateDescriptorSets qvkAllocateDescriptorSets;
static PFN_vkAllocateMemory qvkAllocateMemory;
static PFN_vkBeginCommandBuffer qvkBeginCommandBuffer;
static PFN_vkBindBufferMemory qvkBindBufferMemory;
static PFN_vkBindImageMemory qvkBindImageMemory;
static PFN_vkCmdBeginRenderPass qvkCmdBeginRenderPass;
static PFN_vkCmdBindDescriptorSets qvkCmdBindDescriptorSets;
static PFN_vkCmdBindIndexBuffer qvkCmdBindIndexBuffer;
static PFN_vkCmdBindPipeline qvkCmdBindPipeline;
static PFN_vkCmdBindVertexBuffers qvkCmdBindVertexBuffers;
static PFN_vkCmdBlitImage qvkCmdBlitImage;
static PFN_vkCmdClearAttachments qvkCmdClearAttachments;
static PFN_vkCmdCopyBuffer qvkCmdCopyBuffer;
static PFN_vkCmdCopyBufferToImage qvkCmdCopyBufferToImage;
static PFN_vkCmdCopyImage qvkCmdCopyImage;
static PFN_vkCmdDraw qvkCmdDraw;
static PFN_vkCmdDrawIndexed qvkCmdDrawIndexed;
static PFN_vkCmdEndRenderPass qvkCmdEndRenderPass;
static PFN_vkCmdNextSubpass qvkCmdNextSubpass;
static PFN_vkCmdPipelineBarrier qvkCmdPipelineBarrier;
static PFN_vkCmdPushConstants qvkCmdPushConstants;
static PFN_vkCmdSetDepthBias qvkCmdSetDepthBias;
static PFN_vkCmdSetScissor qvkCmdSetScissor;
static PFN_vkCmdSetViewport qvkCmdSetViewport;
static PFN_vkCreateBuffer qvkCreateBuffer;
static PFN_vkCreateCommandPool qvkCreateCommandPool;
static PFN_vkCreateDescriptorPool qvkCreateDescriptorPool;
static PFN_vkCreateDescriptorSetLayout qvkCreateDescriptorSetLayout;
static PFN_vkCreateFence qvkCreateFence;
static PFN_vkCreateFramebuffer qvkCreateFramebuffer;
static PFN_vkCreateGraphicsPipelines qvkCreateGraphicsPipelines;
static PFN_vkCreateImage qvkCreateImage;
static PFN_vkCreateImageView qvkCreateImageView;
static PFN_vkCreatePipelineLayout qvkCreatePipelineLayout;
static PFN_vkCreatePipelineCache qvkCreatePipelineCache;
static PFN_vkCreateRenderPass qvkCreateRenderPass;
static PFN_vkCreateSampler qvkCreateSampler;
static PFN_vkCreateSemaphore qvkCreateSemaphore;
static PFN_vkCreateShaderModule qvkCreateShaderModule;
static PFN_vkDestroyBuffer qvkDestroyBuffer;
static PFN_vkDestroyCommandPool qvkDestroyCommandPool;
static PFN_vkDestroyDescriptorPool qvkDestroyDescriptorPool;
static PFN_vkDestroyDescriptorSetLayout qvkDestroyDescriptorSetLayout;
static PFN_vkDestroyDevice qvkDestroyDevice;
static PFN_vkDestroyFence qvkDestroyFence;
static PFN_vkDestroyFramebuffer qvkDestroyFramebuffer;
static PFN_vkDestroyImage qvkDestroyImage;
static PFN_vkDestroyImageView qvkDestroyImageView;
static PFN_vkDestroyPipeline qvkDestroyPipeline;
static PFN_vkDestroyPipelineCache qvkDestroyPipelineCache;
static PFN_vkDestroyPipelineLayout qvkDestroyPipelineLayout;
static PFN_vkDestroyRenderPass qvkDestroyRenderPass;
static PFN_vkDestroySampler qvkDestroySampler;
static PFN_vkDestroySemaphore qvkDestroySemaphore;
static PFN_vkDestroyShaderModule qvkDestroyShaderModule;
static PFN_vkDeviceWaitIdle qvkDeviceWaitIdle;
static PFN_vkEndCommandBuffer qvkEndCommandBuffer;
static PFN_vkFlushMappedMemoryRanges qvkFlushMappedMemoryRanges;
static PFN_vkFreeCommandBuffers qvkFreeCommandBuffers;
static PFN_vkFreeDescriptorSets qvkFreeDescriptorSets;
static PFN_vkFreeMemory qvkFreeMemory;
static PFN_vkGetBufferMemoryRequirements qvkGetBufferMemoryRequirements;
static PFN_vkGetDeviceQueue qvkGetDeviceQueue;
static PFN_vkGetImageMemoryRequirements qvkGetImageMemoryRequirements;
static PFN_vkGetImageSubresourceLayout qvkGetImageSubresourceLayout;
static PFN_vkInvalidateMappedMemoryRanges qvkInvalidateMappedMemoryRanges;
static PFN_vkMapMemory qvkMapMemory;
static PFN_vkQueueSubmit qvkQueueSubmit;
static PFN_vkQueueWaitIdle qvkQueueWaitIdle;
static PFN_vkResetCommandBuffer qvkResetCommandBuffer;
static PFN_vkResetDescriptorPool qvkResetDescriptorPool;
static PFN_vkResetFences qvkResetFences;
static PFN_vkUnmapMemory qvkUnmapMemory;
static PFN_vkUpdateDescriptorSets qvkUpdateDescriptorSets;
static PFN_vkWaitForFences qvkWaitForFences;
static PFN_vkAcquireNextImageKHR qvkAcquireNextImageKHR;
static PFN_vkCreateSwapchainKHR qvkCreateSwapchainKHR;
static PFN_vkDestroySwapchainKHR qvkDestroySwapchainKHR;
static PFN_vkGetSwapchainImagesKHR qvkGetSwapchainImagesKHR;
static PFN_vkQueuePresentKHR qvkQueuePresentKHR;
static PFN_vkGetBufferMemoryRequirements2KHR qvkGetBufferMemoryRequirements2KHR;
static PFN_vkGetImageMemoryRequirements2KHR qvkGetImageMemoryRequirements2KHR;
static PFN_vkDebugMarkerSetObjectNameEXT qvkDebugMarkerSetObjectNameEXT;
////////////////////////////////////////////////////////////////////////////
// forward declaration
VkPipeline create_pipeline( const Vk_Pipeline_Def *def, renderPass_t renderPassIndex );
static uint32_t find_memory_type( VkPhysicalDevice physical_device, uint32_t memory_type_bits, VkMemoryPropertyFlags properties ) {
VkPhysicalDeviceMemoryProperties memory_properties;
uint32_t i;
qvkGetPhysicalDeviceMemoryProperties( vk.physical_device, &memory_properties );
for ( i = 0; i < memory_properties.memoryTypeCount; i++ ) {
if ((memory_type_bits & (1 << i)) != 0 &&
(memory_properties.memoryTypes[i].propertyFlags & properties) == properties) {
return i;
}
}
ri.Error( ERR_FATAL, "Vulkan: failed to find matching memory type with requested properties" );
return ~0U;
}
static uint32_t find_memory_type2( uint32_t memory_type_bits, VkMemoryPropertyFlags properties, VkMemoryPropertyFlags *outprops ) {
VkPhysicalDeviceMemoryProperties memory_properties;
uint32_t i;
qvkGetPhysicalDeviceMemoryProperties( vk.physical_device, &memory_properties );
for ( i = 0; i < memory_properties.memoryTypeCount; i++ ) {
if ( (memory_type_bits & (1 << i)) != 0 && (memory_properties.memoryTypes[i].propertyFlags & properties) == properties ) {
if ( outprops ) {
*outprops = memory_properties.memoryTypes[i].propertyFlags;
}
return i;
}
}
return ~0U;
}
static const char *pmode_to_str( VkPresentModeKHR mode )
{
static char buf[32];
switch ( mode ) {
case VK_PRESENT_MODE_IMMEDIATE_KHR: return "IMMEDIATE";
case VK_PRESENT_MODE_MAILBOX_KHR: return "MAILBOX";
case VK_PRESENT_MODE_FIFO_KHR: return "FIFO";
case VK_PRESENT_MODE_FIFO_RELAXED_KHR: return "FIFO_RELAXED";
default: sprintf( buf, "mode#%x", mode ); return buf;
};
}
#define CASE_STR(x) case (x): return #x
const char *vk_format_string( VkFormat format )
{
static char buf[16];
switch ( format ) {
// color formats
CASE_STR( VK_FORMAT_R5G5B5A1_UNORM_PACK16 );
CASE_STR( VK_FORMAT_B5G5R5A1_UNORM_PACK16 );
CASE_STR( VK_FORMAT_R5G6B5_UNORM_PACK16 );
CASE_STR( VK_FORMAT_B5G6R5_UNORM_PACK16 );
CASE_STR( VK_FORMAT_B8G8R8A8_SRGB );
CASE_STR( VK_FORMAT_R8G8B8A8_SRGB );
CASE_STR( VK_FORMAT_B8G8R8A8_SNORM );
CASE_STR( VK_FORMAT_R8G8B8A8_SNORM );
CASE_STR( VK_FORMAT_B8G8R8A8_UNORM );
CASE_STR( VK_FORMAT_R8G8B8A8_UNORM );
CASE_STR( VK_FORMAT_B4G4R4A4_UNORM_PACK16 );
CASE_STR( VK_FORMAT_R4G4B4A4_UNORM_PACK16 );
CASE_STR( VK_FORMAT_R16G16B16A16_UNORM );
CASE_STR( VK_FORMAT_A2B10G10R10_UNORM_PACK32 );
CASE_STR( VK_FORMAT_A2R10G10B10_UNORM_PACK32 );
CASE_STR( VK_FORMAT_B10G11R11_UFLOAT_PACK32 );
// depth formats
CASE_STR( VK_FORMAT_D16_UNORM );
CASE_STR( VK_FORMAT_D16_UNORM_S8_UINT );
CASE_STR( VK_FORMAT_X8_D24_UNORM_PACK32 );
CASE_STR( VK_FORMAT_D24_UNORM_S8_UINT );
CASE_STR( VK_FORMAT_D32_SFLOAT );
CASE_STR( VK_FORMAT_D32_SFLOAT_S8_UINT );
default:
Com_sprintf( buf, sizeof( buf ), "#%i", format );
return buf;
}
}
static const char *vk_result_string( VkResult code ) {
static char buffer[32];
switch ( code ) {
CASE_STR( VK_SUCCESS );
CASE_STR( VK_NOT_READY );
CASE_STR( VK_TIMEOUT );
CASE_STR( VK_EVENT_SET );
CASE_STR( VK_EVENT_RESET );
CASE_STR( VK_INCOMPLETE );
CASE_STR( VK_ERROR_OUT_OF_HOST_MEMORY );
CASE_STR( VK_ERROR_OUT_OF_DEVICE_MEMORY );
CASE_STR( VK_ERROR_INITIALIZATION_FAILED );
CASE_STR( VK_ERROR_DEVICE_LOST );
CASE_STR( VK_ERROR_MEMORY_MAP_FAILED );
CASE_STR( VK_ERROR_LAYER_NOT_PRESENT );
CASE_STR( VK_ERROR_EXTENSION_NOT_PRESENT );
CASE_STR( VK_ERROR_FEATURE_NOT_PRESENT );
CASE_STR( VK_ERROR_INCOMPATIBLE_DRIVER );
CASE_STR( VK_ERROR_TOO_MANY_OBJECTS );
CASE_STR( VK_ERROR_FORMAT_NOT_SUPPORTED );
CASE_STR( VK_ERROR_FRAGMENTED_POOL );
CASE_STR( VK_ERROR_UNKNOWN );
CASE_STR( VK_ERROR_OUT_OF_POOL_MEMORY );
CASE_STR( VK_ERROR_INVALID_EXTERNAL_HANDLE );
CASE_STR( VK_ERROR_FRAGMENTATION );
CASE_STR( VK_ERROR_INVALID_OPAQUE_CAPTURE_ADDRESS );
CASE_STR( VK_ERROR_SURFACE_LOST_KHR );
CASE_STR( VK_ERROR_NATIVE_WINDOW_IN_USE_KHR );
CASE_STR( VK_SUBOPTIMAL_KHR );
CASE_STR( VK_ERROR_OUT_OF_DATE_KHR );
CASE_STR( VK_ERROR_INCOMPATIBLE_DISPLAY_KHR );
CASE_STR( VK_ERROR_VALIDATION_FAILED_EXT );
CASE_STR( VK_ERROR_INVALID_SHADER_NV );
CASE_STR( VK_ERROR_INVALID_DRM_FORMAT_MODIFIER_PLANE_LAYOUT_EXT );
CASE_STR( VK_ERROR_NOT_PERMITTED_EXT );
CASE_STR( VK_ERROR_FULL_SCREEN_EXCLUSIVE_MODE_LOST_EXT );
CASE_STR( VK_THREAD_IDLE_KHR );
CASE_STR( VK_THREAD_DONE_KHR );
CASE_STR( VK_OPERATION_DEFERRED_KHR );
CASE_STR( VK_OPERATION_NOT_DEFERRED_KHR );
CASE_STR( VK_PIPELINE_COMPILE_REQUIRED_EXT );
default:
sprintf( buffer, "code %i", code );
return buffer;
}
}
#undef CASE_STR
#define VK_CHECK( function_call ) { \
VkResult res = function_call; \
if ( res < 0 ) { \
ri.Error( ERR_FATAL, "Vulkan: %s returned %s", #function_call, vk_result_string( res ) ); \
} \
}
/*
static VkFlags get_composite_alpha( VkCompositeAlphaFlagsKHR flags )
{
const VkCompositeAlphaFlagBitsKHR compositeFlags[] = {
VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR,
VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR,
VK_COMPOSITE_ALPHA_POST_MULTIPLIED_BIT_KHR,
VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR
};
int i;
for ( i = 1; i < ARRAY_LEN( compositeFlags ); i++ ) {
if ( flags & compositeFlags[i] ) {
return compositeFlags[i];
}
}
return compositeFlags[0];
}
*/
static VkCommandBuffer begin_command_buffer( void )
{
VkCommandBufferBeginInfo begin_info;
VkCommandBufferAllocateInfo alloc_info;
VkCommandBuffer command_buffer;
alloc_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
alloc_info.pNext = NULL;
alloc_info.commandPool = vk.command_pool;
alloc_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
alloc_info.commandBufferCount = 1;
VK_CHECK( qvkAllocateCommandBuffers( vk.device, &alloc_info, &command_buffer ) );
begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
begin_info.pNext = NULL;
begin_info.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
begin_info.pInheritanceInfo = NULL;
VK_CHECK( qvkBeginCommandBuffer( command_buffer, &begin_info ) );
return command_buffer;
}
static void end_command_buffer( VkCommandBuffer command_buffer )
{
VkSubmitInfo submit_info;
VkCommandBuffer cmdbuf[1];
cmdbuf[0] = command_buffer;
VK_CHECK( qvkEndCommandBuffer( command_buffer ) );
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.pNext = NULL;
submit_info.waitSemaphoreCount = 0;
submit_info.pWaitSemaphores = NULL;
submit_info.pWaitDstStageMask = NULL;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = cmdbuf;
submit_info.signalSemaphoreCount = 0;
submit_info.pSignalSemaphores = NULL;
VK_CHECK( qvkQueueSubmit( vk.queue, 1, &submit_info, VK_NULL_HANDLE ) );
VK_CHECK( qvkQueueWaitIdle( vk.queue ) );
qvkFreeCommandBuffers( vk.device, vk.command_pool, 1, cmdbuf );
}
static void record_image_layout_transition(VkCommandBuffer command_buffer, VkImage image, VkImageAspectFlags image_aspect_flags, VkAccessFlags src_access_flags, VkImageLayout old_layout, VkAccessFlags dst_access_flags, VkImageLayout new_layout) {
VkImageMemoryBarrier barrier;
barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
barrier.pNext = NULL;
barrier.srcAccessMask = src_access_flags;
barrier.dstAccessMask = dst_access_flags;
barrier.oldLayout = old_layout;
barrier.newLayout = new_layout;
barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
barrier.image = image;
barrier.subresourceRange.aspectMask = image_aspect_flags;
barrier.subresourceRange.baseMipLevel = 0;
barrier.subresourceRange.levelCount = VK_REMAINING_MIP_LEVELS;
barrier.subresourceRange.baseArrayLayer = 0;
barrier.subresourceRange.layerCount = VK_REMAINING_ARRAY_LAYERS;
qvkCmdPipelineBarrier( command_buffer, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, 0, 0, NULL, 0, NULL, 1, &barrier );
}
// debug markers
#define SET_OBJECT_NAME(obj,objName,objType) vk_set_object_name( (uint64_t)(obj), (objName), (objType) )
static void vk_set_object_name( uint64_t obj, const char *objName, VkDebugReportObjectTypeEXT objType )
{
if ( qvkDebugMarkerSetObjectNameEXT && obj )
{
VkDebugMarkerObjectNameInfoEXT info;
info.sType = VK_STRUCTURE_TYPE_DEBUG_MARKER_OBJECT_NAME_INFO_EXT;
info.pNext = NULL;
info.objectType = objType;
info.object = obj;
info.pObjectName = objName;
qvkDebugMarkerSetObjectNameEXT( vk.device, &info );
}
}
static void vk_create_swapchain( VkPhysicalDevice physical_device, VkDevice device, VkSurfaceKHR surface, VkSurfaceFormatKHR surface_format, VkSwapchainKHR *swapchain ) {
VkImageViewCreateInfo view;
VkSurfaceCapabilitiesKHR surface_caps;
VkExtent2D image_extent;
uint32_t present_mode_count, i;
VkPresentModeKHR present_mode;
VkPresentModeKHR *present_modes;
uint32_t image_count;
VkSwapchainCreateInfoKHR desc;
qboolean mailbox_supported = qfalse;
qboolean immediate_supported = qfalse;
qboolean fifo_relaxed_supported = qfalse;
int v;
//physical_device = vk.physical_device;
//device = vk.device;
//surface_format = vk.surface_format;
//swapchain = &vk.swapchain;
VK_CHECK( qvkGetPhysicalDeviceSurfaceCapabilitiesKHR( physical_device, surface, &surface_caps ) );
image_extent = surface_caps.currentExtent;
if ( image_extent.width == 0xffffffff && image_extent.height == 0xffffffff ) {
image_extent.width = MIN( surface_caps.maxImageExtent.width, MAX( surface_caps.minImageExtent.width, (uint32_t) glConfig.vidWidth ) );
image_extent.height = MIN( surface_caps.maxImageExtent.height, MAX( surface_caps.minImageExtent.height, (uint32_t) glConfig.vidHeight ) );
}
vk.fastSky = qtrue;
if ( !vk.fboActive ) {
// VK_IMAGE_USAGE_TRANSFER_DST_BIT is required by image clear operations.
if ( ( surface_caps.supportedUsageFlags & VK_IMAGE_USAGE_TRANSFER_DST_BIT ) == 0 ) {
vk.fastSky = qfalse;
ri.Printf( PRINT_WARNING, "VK_IMAGE_USAGE_TRANSFER_DST_BIT is not supported by the swapchain\n" );
}
// VK_IMAGE_USAGE_TRANSFER_SRC_BIT is required in order to take screenshots.
if ((surface_caps.supportedUsageFlags & VK_IMAGE_USAGE_TRANSFER_SRC_BIT) == 0) {
ri.Error(ERR_FATAL, "create_swapchain: VK_IMAGE_USAGE_TRANSFER_SRC_BIT is not supported by the swapchain");
}
}
// determine present mode and swapchain image count
VK_CHECK(qvkGetPhysicalDeviceSurfacePresentModesKHR(physical_device, surface, &present_mode_count, NULL));
present_modes = (VkPresentModeKHR *) ri.Malloc( present_mode_count * sizeof( VkPresentModeKHR ) );
VK_CHECK(qvkGetPhysicalDeviceSurfacePresentModesKHR(physical_device, surface, &present_mode_count, present_modes));
ri.Printf( PRINT_ALL, "...presentation modes:" );
for ( i = 0; i < present_mode_count; i++ ) {
ri.Printf( PRINT_ALL, " %s", pmode_to_str( present_modes[i] ) );
if ( present_modes[i] == VK_PRESENT_MODE_MAILBOX_KHR )
mailbox_supported = qtrue;
else if ( present_modes[i] == VK_PRESENT_MODE_IMMEDIATE_KHR )
immediate_supported = qtrue;
else if ( present_modes[i] == VK_PRESENT_MODE_FIFO_RELAXED_KHR )
fifo_relaxed_supported = qtrue;
}
ri.Printf( PRINT_ALL, "\n" );
ri.Free( present_modes );
if ( ( v = ri.Cvar_VariableIntegerValue( "r_swapInterval" ) ) != 0 ) {
if ( v == 2 && mailbox_supported )
present_mode = VK_PRESENT_MODE_MAILBOX_KHR;
else if ( fifo_relaxed_supported )
present_mode = VK_PRESENT_MODE_FIFO_RELAXED_KHR;
else
present_mode = VK_PRESENT_MODE_FIFO_KHR;
image_count = MAX( MIN_SWAPCHAIN_IMAGES_FIFO, surface_caps.minImageCount );
} else {
if ( immediate_supported ) {
present_mode = VK_PRESENT_MODE_IMMEDIATE_KHR;
image_count = MAX( MIN_SWAPCHAIN_IMAGES_IMM, surface_caps.minImageCount );
} else if ( mailbox_supported ) {
present_mode = VK_PRESENT_MODE_MAILBOX_KHR;
image_count = MAX( MIN_SWAPCHAIN_IMAGES_MAILBOX, surface_caps.minImageCount );
} else if ( fifo_relaxed_supported ) {
present_mode = VK_PRESENT_MODE_FIFO_RELAXED_KHR;
image_count = MAX( MIN_SWAPCHAIN_IMAGES_FIFO, surface_caps.minImageCount );
} else {
present_mode = VK_PRESENT_MODE_FIFO_KHR;
image_count = MAX( MIN_SWAPCHAIN_IMAGES_FIFO, surface_caps.minImageCount );
}
}
if ( image_count < 2 ) {
image_count = 2;
}
if ( surface_caps.maxImageCount == 0 && present_mode == VK_PRESENT_MODE_FIFO_KHR ) {
image_count = MAX( MIN_SWAPCHAIN_IMAGES_FIFO_0, surface_caps.minImageCount );
} else if ( surface_caps.maxImageCount > 0 ) {
image_count = MIN( MIN( image_count, surface_caps.maxImageCount ), MAX_SWAPCHAIN_IMAGES );
}
ri.Printf( PRINT_ALL, "...selected presentation mode: %s, image count: %i\n", pmode_to_str( present_mode ), image_count );
// create swap chain
desc.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR;
desc.pNext = NULL;
desc.flags = 0;
desc.surface = surface;
desc.minImageCount = image_count;
desc.imageFormat = surface_format.format;
desc.imageColorSpace = surface_format.colorSpace;
desc.imageExtent = image_extent;
desc.imageArrayLayers = 1;
desc.imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
if ( !vk.fboActive ) {
desc.imageUsage |= VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
}
desc.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE;
desc.queueFamilyIndexCount = 0;
desc.pQueueFamilyIndices = NULL;
desc.preTransform = surface_caps.currentTransform;
//desc.compositeAlpha = get_composite_alpha( surface_caps.supportedCompositeAlpha );
desc.compositeAlpha = VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
desc.presentMode = present_mode;
desc.clipped = VK_TRUE;
desc.oldSwapchain = VK_NULL_HANDLE;
VK_CHECK( qvkCreateSwapchainKHR( device, &desc, NULL, swapchain ) );
VK_CHECK( qvkGetSwapchainImagesKHR( vk.device, vk.swapchain, &vk.swapchain_image_count, NULL ) );
vk.swapchain_image_count = MIN( vk.swapchain_image_count, MAX_SWAPCHAIN_IMAGES );
VK_CHECK( qvkGetSwapchainImagesKHR( vk.device, vk.swapchain, &vk.swapchain_image_count, vk.swapchain_images ) );
for ( i = 0; i < vk.swapchain_image_count; i++ ) {
view.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
view.pNext = NULL;
view.flags = 0;
view.image = vk.swapchain_images[i];
view.viewType = VK_IMAGE_VIEW_TYPE_2D;
view.format = vk.present_format.format;
view.components.r = VK_COMPONENT_SWIZZLE_IDENTITY;
view.components.g = VK_COMPONENT_SWIZZLE_IDENTITY;
view.components.b = VK_COMPONENT_SWIZZLE_IDENTITY;
view.components.a = VK_COMPONENT_SWIZZLE_IDENTITY;
view.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
view.subresourceRange.baseMipLevel = 0;
view.subresourceRange.levelCount = 1;
view.subresourceRange.baseArrayLayer = 0;
view.subresourceRange.layerCount = 1;
VK_CHECK( qvkCreateImageView( vk.device, &view, NULL, &vk.swapchain_image_views[i] ) );
SET_OBJECT_NAME( vk.swapchain_images[i], va( "swapchain image %i", i ), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT );
SET_OBJECT_NAME( vk.swapchain_image_views[i], va( "swapchain image %i", i ), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT );
}
if ( vk.initSwapchainLayout != VK_IMAGE_LAYOUT_UNDEFINED ) {
VkCommandBuffer command_buffer = begin_command_buffer();
for ( i = 0; i < vk.swapchain_image_count; i++ ) {
record_image_layout_transition( command_buffer, vk.swapchain_images[i],
VK_IMAGE_ASPECT_COLOR_BIT,
0, VK_IMAGE_LAYOUT_UNDEFINED,
VK_ACCESS_MEMORY_READ_BIT, vk.initSwapchainLayout );
}
end_command_buffer( command_buffer );
}
}
static void vk_create_render_passes( void )
{
VkAttachmentDescription attachments[3]; // color | depth | msaa color
VkAttachmentReference colorResolveRef;
VkAttachmentReference colorRef0;
VkAttachmentReference depthRef0;
VkSubpassDescription subpass;
VkSubpassDependency deps[2];
VkRenderPassCreateInfo desc;
VkFormat depth_format;
VkDevice device;
uint32_t i;
depth_format = vk.depth_format;
device = vk.device;
if ( r_fbo->integer == 0 )
{
// presentation
attachments[0].flags = 0;
attachments[0].format = vk.present_format.format;
attachments[0].samples = VK_SAMPLE_COUNT_1_BIT;
#ifdef USE_BUFFER_CLEAR
attachments[0].loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
#else
attachments[0].loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE; // Assuming this will be completely overwritten
#endif
attachments[0].storeOp = VK_ATTACHMENT_STORE_OP_STORE; // needed for presentation
attachments[0].stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
attachments[0].stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
attachments[0].initialLayout = vk.initSwapchainLayout;
attachments[0].finalLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
}
else
{
// resolve/color buffer
attachments[0].flags = 0;
attachments[0].format = vk.color_format;
attachments[0].samples = VK_SAMPLE_COUNT_1_BIT;
#ifdef USE_BUFFER_CLEAR
if ( vk.msaaActive )
attachments[0].loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE; // Assuming this will be completely overwritten
else
attachments[ 0 ].loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
#else
attachments[ 0 ].loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE; // Assuming this will be completely overwritten
#endif
attachments[0].storeOp = VK_ATTACHMENT_STORE_OP_STORE; // needed for next render pass
attachments[0].stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
attachments[0].stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
attachments[0].initialLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
attachments[0].finalLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
}
// depth buffer
attachments[1].flags = 0;
attachments[1].format = depth_format;
attachments[1].samples = vkSamples;
attachments[1].loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR; // Need empty depth buffer before use
attachments[1].stencilLoadOp = r_stencilbits->integer ? VK_ATTACHMENT_LOAD_OP_CLEAR : VK_ATTACHMENT_LOAD_OP_DONT_CARE;
if ( r_bloom->integer ) {
attachments[1].storeOp = VK_ATTACHMENT_STORE_OP_STORE; // keep it for post-bloom pass
attachments[1].stencilStoreOp = r_stencilbits->integer ? VK_ATTACHMENT_STORE_OP_STORE : VK_ATTACHMENT_STORE_OP_DONT_CARE;
} else {
attachments[1].storeOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
attachments[1].stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
}
attachments[1].initialLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
attachments[1].finalLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
colorRef0.attachment = 0;
colorRef0.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
depthRef0.attachment = 1;
depthRef0.layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
Com_Memset( &subpass, 0, sizeof( subpass ) );
subpass.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
subpass.colorAttachmentCount = 1;
subpass.pColorAttachments = &colorRef0;
subpass.pDepthStencilAttachment = &depthRef0;
Com_Memset( &desc, 0, sizeof( desc ) );
desc.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
desc.pNext = NULL;
desc.flags = 0;
desc.pAttachments = attachments;
desc.pSubpasses = &subpass;
desc.subpassCount = 1;
desc.attachmentCount = 2;
if ( vk.msaaActive )
{
attachments[2].flags = 0;
attachments[2].format = vk.color_format;
attachments[2].samples = vkSamples;
#ifdef USE_BUFFER_CLEAR
attachments[2].loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
#else
attachments[2].loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
#endif
if ( r_bloom->integer ) {
attachments[2].storeOp = VK_ATTACHMENT_STORE_OP_STORE; // keep it for post-bloom pass
} else {
attachments[2].storeOp = VK_ATTACHMENT_STORE_OP_DONT_CARE; // Intermediate storage (not written)
}
attachments[2].stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
attachments[2].stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
attachments[2].initialLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
attachments[2].finalLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
desc.attachmentCount = 3;
colorRef0.attachment = 2; // msaa image attachment
colorRef0.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
colorResolveRef.attachment = 0; // resolve image attachment
colorResolveRef.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
subpass.pResolveAttachments = &colorResolveRef;
}
// subpass dependencies
Com_Memset( &deps, 0, sizeof( deps ) );
if ( r_fbo->integer == 0 )
{
desc.dependencyCount = 1;
desc.pDependencies = deps;
deps[ 0 ].srcSubpass = VK_SUBPASS_EXTERNAL;
deps[ 0 ].dstSubpass = 0;
deps[ 0 ].srcStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT; // What pipeline stage is waiting on the dependency
deps[ 0 ].dstStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT; // What pipeline stage is waiting on the dependency
deps[ 0 ].srcAccessMask = 0; // What access scopes are influence the dependency
deps[ 0 ].dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; // What access scopes are waiting on the dependency
deps[ 0 ].dependencyFlags = 0;
VK_CHECK( qvkCreateRenderPass( device, &desc, NULL, &vk.render_pass.main ) );
SET_OBJECT_NAME( vk.render_pass.main, "render pass - main", VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT );
return;
}
desc.dependencyCount = 2;
desc.pDependencies = deps;
deps[0].srcSubpass = VK_SUBPASS_EXTERNAL;
deps[0].dstSubpass = 0;
deps[0].srcStageMask = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT; // What pipeline stage must have completed for the dependency
deps[0].dstStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT; // What pipeline stage is waiting on the dependency
deps[0].srcAccessMask = VK_ACCESS_SHADER_READ_BIT; // What access scopes are influence the dependency
deps[0].dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; // What access scopes are waiting on the dependency
deps[0].dependencyFlags = VK_DEPENDENCY_BY_REGION_BIT; // Only need the current fragment (or tile) synchronized, not the whole framebuffer
deps[1].srcSubpass = 0;
deps[1].dstSubpass = VK_SUBPASS_EXTERNAL;
deps[1].srcStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT; // Fragment data has been written
deps[1].dstStageMask = VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT; // Don't start shading until data is available
deps[1].srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT; // Waiting for color data to be written
deps[1].dstAccessMask = VK_ACCESS_SHADER_READ_BIT; // Don't read things from the shader before ready
deps[1].dependencyFlags = VK_DEPENDENCY_BY_REGION_BIT; // Only need the current fragment (or tile) synchronized, not the whole framebuffer
VK_CHECK( qvkCreateRenderPass( device, &desc, NULL, &vk.render_pass.main ) );
SET_OBJECT_NAME( vk.render_pass.main, "render pass - main", VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT );
if ( r_bloom->integer ) {
// post-bloom pass
// color buffer
attachments[0].loadOp = VK_ATTACHMENT_LOAD_OP_LOAD; // load from previous pass
// depth buffer
attachments[1].loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
attachments[1].storeOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
attachments[1].stencilLoadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
attachments[1].stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
if ( vk.msaaActive ) {
// msaa render target
attachments[2].loadOp = VK_ATTACHMENT_LOAD_OP_LOAD;
attachments[2].storeOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
}
VK_CHECK( qvkCreateRenderPass( device, &desc, NULL, &vk.render_pass.post_bloom ) );
SET_OBJECT_NAME( vk.render_pass.post_bloom, "render pass - post_bloom", VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT );
// bloom extraction, using resolved/main fbo as a source
desc.attachmentCount = 1;
colorRef0.attachment = 0;
colorRef0.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
Com_Memset( &subpass, 0, sizeof( subpass ) );
subpass.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
subpass.colorAttachmentCount = 1;
subpass.pColorAttachments = &colorRef0;
attachments[0].flags = 0;
attachments[0].format = vk.bloom_format;
attachments[0].samples = VK_SAMPLE_COUNT_1_BIT;
attachments[0].loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE; // Assuming this will be completely overwritten
attachments[0].storeOp = VK_ATTACHMENT_STORE_OP_STORE; // needed for next render pass
attachments[0].stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
attachments[0].stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
attachments[0].initialLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
attachments[0].finalLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
VK_CHECK( qvkCreateRenderPass( device, &desc, NULL, &vk.render_pass.bloom_extract ) );
SET_OBJECT_NAME( vk.render_pass.bloom_extract, "render pass - bloom_extract", VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT );
for ( i = 0; i < ARRAY_LEN( vk.render_pass.blur ); i++ )
{
VK_CHECK( qvkCreateRenderPass( device, &desc, NULL, &vk.render_pass.blur[i] ) );
SET_OBJECT_NAME( vk.render_pass.blur[i], va( "render pass - blur %i", i ), VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT );
}
}
// capture render pass
if ( vk.capture.image )
{
Com_Memset( &subpass, 0, sizeof( subpass ) );
attachments[0].flags = 0;
attachments[0].format = vk.capture_format;
attachments[0].samples = VK_SAMPLE_COUNT_1_BIT;
attachments[0].loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE; // this will be completely overwritten
attachments[0].storeOp = VK_ATTACHMENT_STORE_OP_STORE; // needed for next render pass
attachments[0].stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
attachments[0].stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
attachments[0].initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
attachments[0].finalLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL;
colorRef0.attachment = 0;
colorRef0.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
subpass.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
subpass.colorAttachmentCount = 1;
subpass.pColorAttachments = &colorRef0;
desc.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
desc.pNext = NULL;
desc.flags = 0;
desc.pAttachments = attachments;
desc.attachmentCount = 1;
desc.pSubpasses = &subpass;
desc.subpassCount = 1;
VK_CHECK( qvkCreateRenderPass( device, &desc, NULL, &vk.render_pass.capture ) );
SET_OBJECT_NAME( vk.render_pass.capture, "render pass - capture", VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT );
}
colorRef0.attachment = 0;
colorRef0.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
desc.attachmentCount = 1;
Com_Memset( &subpass, 0, sizeof( subpass ) );
subpass.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
subpass.colorAttachmentCount = 1;
subpass.pColorAttachments = &colorRef0;
// gamma post-processing
attachments[0].flags = 0;
attachments[0].format = vk.present_format.format;
attachments[0].samples = VK_SAMPLE_COUNT_1_BIT;
attachments[0].loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
attachments[0].storeOp = VK_ATTACHMENT_STORE_OP_STORE; // needed for presentation
attachments[0].stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
attachments[0].stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
attachments[0].initialLayout = vk.initSwapchainLayout;
attachments[0].finalLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
VK_CHECK( qvkCreateRenderPass( device, &desc, NULL, &vk.render_pass.gamma ) );
SET_OBJECT_NAME( vk.render_pass.gamma, "render pass - gamma", VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT );
// screenmap
// resolve/color buffer
attachments[0].flags = 0;
attachments[0].format = vk.color_format;
attachments[0].samples = VK_SAMPLE_COUNT_1_BIT;
#ifdef USE_BUFFER_CLEAR
if ( vk.screenMapSamples > VK_SAMPLE_COUNT_1_BIT )
attachments[0].loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
else
attachments[0].loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
#else
attachments[0].loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE; // Assuming this will be completely overwritten
#endif
attachments[0].storeOp = VK_ATTACHMENT_STORE_OP_STORE; // needed for next render pass
attachments[0].stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
attachments[0].stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
attachments[0].initialLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
attachments[0].finalLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
// depth buffer
attachments[1].flags = 0;
attachments[1].format = depth_format;
attachments[1].samples = vk.screenMapSamples;
attachments[1].loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR; // Need empty depth buffer before use
attachments[1].storeOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
attachments[1].stencilLoadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
attachments[1].stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
attachments[1].initialLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
attachments[1].finalLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
colorRef0.attachment = 0;
colorRef0.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
depthRef0.attachment = 1;
depthRef0.layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
Com_Memset( &subpass, 0, sizeof( subpass ) );
subpass.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
subpass.colorAttachmentCount = 1;
subpass.pColorAttachments = &colorRef0;
subpass.pDepthStencilAttachment = &depthRef0;
Com_Memset( &desc, 0, sizeof( desc ) );
desc.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO;
desc.pNext = NULL;
desc.flags = 0;
desc.pAttachments = attachments;
desc.pSubpasses = &subpass;
desc.subpassCount = 1;
desc.attachmentCount = 2;
desc.dependencyCount = 2;
desc.pDependencies = deps;
if ( vk.screenMapSamples > VK_SAMPLE_COUNT_1_BIT ) {
attachments[2].flags = 0;
attachments[2].format = vk.color_format;
attachments[2].samples = vk.screenMapSamples;
#ifdef USE_BUFFER_CLEAR
attachments[2].loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR;
#else
attachments[2].loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
#endif
attachments[2].storeOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
attachments[2].stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE;
attachments[2].stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE;
attachments[2].initialLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
attachments[2].finalLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
desc.attachmentCount = 3;
colorRef0.attachment = 2; // msaa image attachment
colorRef0.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
colorResolveRef.attachment = 0; // resolve image attachment
colorResolveRef.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
subpass.pResolveAttachments = &colorResolveRef;
}
VK_CHECK( qvkCreateRenderPass( device, &desc, NULL, &vk.render_pass.screenmap ) );
SET_OBJECT_NAME( vk.render_pass.screenmap, "render pass - screenmap", VK_DEBUG_REPORT_OBJECT_TYPE_RENDER_PASS_EXT );
}
static void allocate_and_bind_image_memory(VkImage image) {
VkMemoryRequirements memory_requirements;
VkDeviceSize alignment;
ImageChunk *chunk;
int i;
qvkGetImageMemoryRequirements(vk.device, image, &memory_requirements);
if ( memory_requirements.size > vk.image_chunk_size ) {
ri.Error( ERR_FATAL, "Vulkan: could not allocate memory, image is too large (%ikbytes).",
(int)(memory_requirements.size/1024) );
}
chunk = NULL;
// Try to find an existing chunk of sufficient capacity.
alignment = memory_requirements.alignment;
for ( i = 0; i < vk_world.num_image_chunks; i++ ) {
// ensure that memory region has proper alignment
VkDeviceSize offset = PAD( vk_world.image_chunks[i].used, alignment );
if ( offset + memory_requirements.size <= vk.image_chunk_size ) {
chunk = &vk_world.image_chunks[i];
chunk->used = offset + memory_requirements.size;
break;
}
}
// Allocate a new chunk in case we couldn't find suitable existing chunk.
if (chunk == NULL) {
VkMemoryAllocateInfo alloc_info;
VkDeviceMemory memory;
if (vk_world.num_image_chunks >= MAX_IMAGE_CHUNKS) {
ri.Error(ERR_FATAL, "Vulkan: image chunk limit has been reached" );
}
alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
alloc_info.pNext = NULL;
alloc_info.allocationSize = vk.image_chunk_size;
alloc_info.memoryTypeIndex = find_memory_type(vk.physical_device, memory_requirements.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT);
VK_CHECK(qvkAllocateMemory(vk.device, &alloc_info, NULL, &memory));
chunk = &vk_world.image_chunks[vk_world.num_image_chunks];
chunk->memory = memory;
chunk->used = memory_requirements.size;
SET_OBJECT_NAME( memory, va( "image memory chunk %i", vk_world.num_image_chunks ), VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT );
vk_world.num_image_chunks++;
}
VK_CHECK(qvkBindImageMemory(vk.device, image, chunk->memory, chunk->used - memory_requirements.size));
}
static void ensure_staging_buffer_allocation(VkDeviceSize size) {
VkBufferCreateInfo buffer_desc;
VkMemoryRequirements memory_requirements;
VkMemoryAllocateInfo alloc_info;
uint32_t memory_type;
void *data;
if (vk_world.staging_buffer_size >= size)
return;
if (vk_world.staging_buffer != VK_NULL_HANDLE)
qvkDestroyBuffer(vk.device, vk_world.staging_buffer, NULL);
if (vk_world.staging_buffer_memory != VK_NULL_HANDLE)
qvkFreeMemory(vk.device, vk_world.staging_buffer_memory, NULL);
vk_world.staging_buffer_size = size;
buffer_desc.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
buffer_desc.pNext = NULL;
buffer_desc.flags = 0;
buffer_desc.size = size;
buffer_desc.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
buffer_desc.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
buffer_desc.queueFamilyIndexCount = 0;
buffer_desc.pQueueFamilyIndices = NULL;
VK_CHECK(qvkCreateBuffer(vk.device, &buffer_desc, NULL, &vk_world.staging_buffer));
qvkGetBufferMemoryRequirements(vk.device, vk_world.staging_buffer, &memory_requirements);
memory_type = find_memory_type(vk.physical_device, memory_requirements.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT);
alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
alloc_info.pNext = NULL;
alloc_info.allocationSize = memory_requirements.size;
alloc_info.memoryTypeIndex = memory_type;
VK_CHECK(qvkAllocateMemory(vk.device, &alloc_info, NULL, &vk_world.staging_buffer_memory));
VK_CHECK(qvkBindBufferMemory(vk.device, vk_world.staging_buffer, vk_world.staging_buffer_memory, 0));
VK_CHECK(qvkMapMemory(vk.device, vk_world.staging_buffer_memory, 0, VK_WHOLE_SIZE, 0, &data));
vk_world.staging_buffer_ptr = (byte*)data;
SET_OBJECT_NAME( vk_world.staging_buffer, "staging buffer", VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT );
SET_OBJECT_NAME( vk_world.staging_buffer_memory, "staging buffer memory", VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT );
}
#ifdef USE_VK_VALIDATION
static VKAPI_ATTR VkBool32 VKAPI_CALL debug_callback(VkDebugReportFlagsEXT flags, VkDebugReportObjectTypeEXT object_type, uint64_t object, size_t location,
int32_t message_code, const char* layer_prefix, const char* message, void* user_data) {
#ifdef _WIN32
MessageBoxA( 0, message, layer_prefix, MB_ICONWARNING );
OutputDebugString(message);
OutputDebugString("\n");
DebugBreak();
#endif
return VK_FALSE;
}
#endif
static qboolean used_instance_extension( const char *ext )
{
const char *u;
// allow all VK_*_surface extensions
u = strrchr( ext, '_' );
if ( u && Q_stricmp( u + 1, "surface" ) == 0 )
return qtrue;
if ( Q_stricmp( ext, VK_KHR_DISPLAY_EXTENSION_NAME ) == 0 )
return qtrue; // needed for KMSDRM instances/devices?
if ( Q_stricmp( ext, VK_KHR_SWAPCHAIN_EXTENSION_NAME ) == 0 )
return qtrue;
#ifdef USE_VK_VALIDATION
if ( Q_stricmp( ext, VK_EXT_DEBUG_REPORT_EXTENSION_NAME ) == 0 )
return qtrue;
#endif
if ( Q_stricmp( ext, VK_EXT_DEBUG_UTILS_EXTENSION_NAME ) == 0 )
return qtrue;
if ( Q_stricmp( ext, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME ) == 0 )
return qtrue;
if ( Q_stricmp( ext, VK_KHR_PORTABILITY_ENUMERATION_EXTENSION_NAME ) == 0 )
return qtrue;
return qfalse;
}
static void create_instance( void )
{
#ifdef USE_VK_VALIDATION
const char* validation_layer_name = "VK_LAYER_LUNARG_standard_validation";
const char* validation_layer_name2 = "VK_LAYER_KHRONOS_validation";
#endif
VkInstanceCreateInfo desc;
VkInstanceCreateFlags flags;
VkExtensionProperties *extension_properties;
VkResult res;
const char **extension_names;
uint32_t i, n, count, extension_count;
VkApplicationInfo appInfo;
flags = 0;
count = 0;
extension_count = 0;
VK_CHECK(qvkEnumerateInstanceExtensionProperties(NULL, &count, NULL));
extension_properties = (VkExtensionProperties *)ri.Malloc(sizeof(VkExtensionProperties) * count);
extension_names = (const char**)ri.Malloc(sizeof(char *) * count);
VK_CHECK( qvkEnumerateInstanceExtensionProperties( NULL, &count, extension_properties ) );
for ( i = 0; i < count; i++ ) {
const char *ext = extension_properties[i].extensionName;
if ( !used_instance_extension( ext ) ) {
continue;
}
// search for duplicates
for ( n = 0; n < extension_count; n++ ) {
if ( Q_stricmp( ext, extension_names[ n ] ) == 0 ) {
break;
}
}
if ( n != extension_count ) {
continue;
}
extension_names[ extension_count++ ] = ext;
if ( Q_stricmp( ext, VK_KHR_PORTABILITY_ENUMERATION_EXTENSION_NAME ) == 0 ) {
flags |= VK_INSTANCE_CREATE_ENUMERATE_PORTABILITY_BIT_KHR;
}
ri.Printf(PRINT_DEVELOPER, "instance extension: %s\n", ext);
}
appInfo.sType = VK_STRUCTURE_TYPE_APPLICATION_INFO;
appInfo.pNext = NULL;
appInfo.pApplicationName = NULL; // Q3_VERSION;
appInfo.applicationVersion = 0x0;
appInfo.pEngineName = NULL;
appInfo.engineVersion = 0x0;
appInfo.apiVersion = VK_API_VERSION_1_0;
// create instance
desc.sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO;
desc.pNext = NULL;
desc.flags = flags;
desc.pApplicationInfo = &appInfo;
desc.enabledExtensionCount = extension_count;
desc.ppEnabledExtensionNames = extension_names;
#ifdef USE_VK_VALIDATION
desc.enabledLayerCount = 1;
desc.ppEnabledLayerNames = &validation_layer_name;
res = qvkCreateInstance( &desc, NULL, &vk.instance );
if ( res == VK_ERROR_LAYER_NOT_PRESENT ) {
desc.enabledLayerCount = 1;
desc.ppEnabledLayerNames = &validation_layer_name2;
res = qvkCreateInstance( &desc, NULL, &vk.instance );
if ( res == VK_ERROR_LAYER_NOT_PRESENT ) {
ri.Printf( PRINT_WARNING, "...validation layer is not available\n" );
// try without validation layer
desc.enabledLayerCount = 0;
desc.ppEnabledLayerNames = NULL;
res = qvkCreateInstance( &desc, NULL, &vk.instance );
}
}
#else
desc.enabledLayerCount = 0;
desc.ppEnabledLayerNames = NULL;
res = qvkCreateInstance( &desc, NULL, &vk.instance );
#endif
ri.Free( (void*)extension_names );
ri.Free( extension_properties );
if ( res != VK_SUCCESS ) {
ri.Error( ERR_FATAL, "Vulkan: instance creation failed with %s", vk_result_string( res ) );
}
}
static VkFormat get_depth_format( VkPhysicalDevice physical_device ) {
VkFormatProperties props;
VkFormat formats[2];
int i;
if (r_stencilbits->integer > 0) {
formats[0] = glConfig.depthBits == 16 ? VK_FORMAT_D16_UNORM_S8_UINT : VK_FORMAT_D24_UNORM_S8_UINT;
formats[1] = VK_FORMAT_D32_SFLOAT_S8_UINT;
glConfig.stencilBits = 8;
} else {
formats[0] = glConfig.depthBits == 16 ? VK_FORMAT_D16_UNORM : VK_FORMAT_X8_D24_UNORM_PACK32;
formats[1] = VK_FORMAT_D32_SFLOAT;
glConfig.stencilBits = 0;
}
for ( i = 0; i < ARRAY_LEN( formats ); i++ ) {
qvkGetPhysicalDeviceFormatProperties( physical_device, formats[i], &props );
if ( ( props.optimalTilingFeatures & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT ) != 0 ) {
return formats[i];
}
}
ri.Error( ERR_FATAL, "get_depth_format: failed to find depth attachment format" );
return VK_FORMAT_UNDEFINED; // never get here
}
// Check if we can use vkCmdBlitImage for the given source and destination image formats.
static qboolean vk_blit_enabled( VkPhysicalDevice physical_device, const VkFormat srcFormat, const VkFormat dstFormat )
{
VkFormatProperties formatProps;
qvkGetPhysicalDeviceFormatProperties( physical_device, srcFormat, &formatProps );
if ( ( formatProps.optimalTilingFeatures & VK_FORMAT_FEATURE_BLIT_SRC_BIT ) == 0 ) {
return qfalse;
}
qvkGetPhysicalDeviceFormatProperties( physical_device, dstFormat, &formatProps );
if ( ( formatProps.linearTilingFeatures & VK_FORMAT_FEATURE_BLIT_DST_BIT ) == 0 ) {
return qfalse;
}
return qtrue;
}
static VkFormat get_hdr_format( VkFormat base_format )
{
if ( r_fbo->integer == 0 ) {
return base_format;
}
switch ( r_hdr->integer ) {
case -1: return VK_FORMAT_B4G4R4A4_UNORM_PACK16;
case 1: return VK_FORMAT_R16G16B16A16_UNORM;
default: return base_format;
}
}
typedef struct {
int bits;
VkFormat rgb;
VkFormat bgr;
} present_format_t;
static const present_format_t present_formats[] = {
//{12, VK_FORMAT_B4G4R4A4_UNORM_PACK16, VK_FORMAT_R4G4B4A4_UNORM_PACK16},
//{15, VK_FORMAT_B5G5R5A1_UNORM_PACK16, VK_FORMAT_R5G5B5A1_UNORM_PACK16},
{16, VK_FORMAT_B5G6R5_UNORM_PACK16, VK_FORMAT_R5G6B5_UNORM_PACK16},
{24, VK_FORMAT_B8G8R8A8_UNORM, VK_FORMAT_R8G8B8A8_UNORM},
{30, VK_FORMAT_A2B10G10R10_UNORM_PACK32, VK_FORMAT_A2R10G10B10_UNORM_PACK32},
//{32, VK_FORMAT_B10G11R11_UFLOAT_PACK32, VK_FORMAT_B10G11R11_UFLOAT_PACK32}
};
static void get_present_format( int present_bits, VkFormat *bgr, VkFormat *rgb ) {
const present_format_t *pf, *sel;
int i;
sel = NULL;
pf = present_formats;
for ( i = 0; i < ARRAY_LEN( present_formats ); i++, pf++ ) {
if ( pf->bits <= present_bits ) {
sel = pf;
}
}
if ( !sel ) {
*bgr = VK_FORMAT_B8G8R8A8_UNORM;
*rgb = VK_FORMAT_R8G8B8A8_UNORM;
} else {
*bgr = sel->bgr;
*rgb = sel->rgb;
}
}
static qboolean vk_select_surface_format( VkPhysicalDevice physical_device, VkSurfaceKHR surface )
{
VkFormat base_bgr, base_rgb;
VkFormat ext_bgr, ext_rgb;
VkSurfaceFormatKHR *candidates;
uint32_t format_count;
VkResult res;
res = qvkGetPhysicalDeviceSurfaceFormatsKHR( physical_device, surface, &format_count, NULL );
if ( res < 0 ) {
ri.Printf( PRINT_ERROR, "vkGetPhysicalDeviceSurfaceFormatsKHR returned %s\n", vk_result_string( res ) );
return qfalse;
}
if ( format_count == 0 ) {
ri.Printf( PRINT_ERROR, "...no surface formats found\n" );
return qfalse;
}
candidates = (VkSurfaceFormatKHR*)ri.Malloc( format_count * sizeof(VkSurfaceFormatKHR) );
VK_CHECK( qvkGetPhysicalDeviceSurfaceFormatsKHR( physical_device, surface, &format_count, candidates ) );
get_present_format( 24, &base_bgr, &base_rgb );
if ( r_fbo->integer ) {
get_present_format( r_presentBits->integer, &ext_bgr, &ext_rgb );
} else {
ext_bgr = base_bgr;
ext_rgb = base_rgb;
}
if ( format_count == 1 && candidates[0].format == VK_FORMAT_UNDEFINED ) {
// special case that means we can choose any format
vk.base_format.format = base_bgr;
vk.base_format.colorSpace = VK_COLORSPACE_SRGB_NONLINEAR_KHR;
vk.present_format.format = ext_bgr;
vk.present_format.colorSpace = VK_COLORSPACE_SRGB_NONLINEAR_KHR;
}
else {
uint32_t i;
for ( i = 0; i < format_count; i++ ) {
if ( ( candidates[i].format == base_bgr || candidates[i].format == base_rgb ) && candidates[i].colorSpace == VK_COLORSPACE_SRGB_NONLINEAR_KHR ) {
vk.base_format = candidates[i];
break;
}
}
if ( i == format_count ) {
vk.base_format = candidates[0];
}
for ( i = 0; i < format_count; i++ ) {
if ( ( candidates[i].format == ext_bgr || candidates[i].format == ext_rgb ) && candidates[i].colorSpace == VK_COLORSPACE_SRGB_NONLINEAR_KHR ) {
vk.present_format = candidates[i];
break;
}
}
if ( i == format_count ) {
vk.present_format = vk.base_format;
}
}
if ( !r_fbo->integer ) {
vk.present_format = vk.base_format;
}
ri.Free( candidates );
return qtrue;
}
static void setup_surface_formats( VkPhysicalDevice physical_device )
{
vk.depth_format = get_depth_format( physical_device );
vk.color_format = get_hdr_format( vk.base_format.format );
vk.capture_format = VK_FORMAT_R8G8B8A8_UNORM;
vk.bloom_format = vk.base_format.format;
vk.blitEnabled = vk_blit_enabled( physical_device, vk.color_format, vk.capture_format );
if ( !vk.blitEnabled )
{
vk.capture_format = vk.color_format;
}
}
static const char *renderer_name( const VkPhysicalDeviceProperties *props ) {
static char buf[sizeof( props->deviceName ) + 64];
const char *device_type;
switch ( props->deviceType ) {
case VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU: device_type = "Integrated"; break;
case VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU: device_type = "Discrete"; break;
case VK_PHYSICAL_DEVICE_TYPE_VIRTUAL_GPU: device_type = "Virtual"; break;
case VK_PHYSICAL_DEVICE_TYPE_CPU: device_type = "CPU"; break;
default: device_type = "OTHER"; break;
}
Com_sprintf( buf, sizeof( buf ), "%s %s, 0x%04x",
device_type, props->deviceName, props->deviceID );
return buf;
}
static qboolean vk_create_device( VkPhysicalDevice physical_device, int device_index ) {
ri.Printf( PRINT_ALL, "...selected physical device: %i\n", device_index );
// select surface format
if ( !vk_select_surface_format( physical_device, vk.surface ) ) {
return qfalse;
}
setup_surface_formats( physical_device );
// select queue family
{
VkQueueFamilyProperties *queue_families;
uint32_t queue_family_count;
uint32_t i;
qvkGetPhysicalDeviceQueueFamilyProperties( physical_device, &queue_family_count, NULL );
queue_families = (VkQueueFamilyProperties*)ri.Malloc( queue_family_count * sizeof( VkQueueFamilyProperties ) );
qvkGetPhysicalDeviceQueueFamilyProperties( physical_device, &queue_family_count, queue_families );
// select queue family with presentation and graphics support
vk.queue_family_index = ~0U;
for (i = 0; i < queue_family_count; i++) {
VkBool32 presentation_supported;
VK_CHECK( qvkGetPhysicalDeviceSurfaceSupportKHR( physical_device, i, vk.surface, &presentation_supported ) );
if (presentation_supported && (queue_families[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) != 0) {
vk.queue_family_index = i;
break;
}
}
ri.Free( queue_families );
if ( vk.queue_family_index == ~0U ) {
ri.Printf( PRINT_ERROR, "...failed to find graphics queue family\n" );
return qfalse;
}
}
// create VkDevice
{
const char *device_extension_list[4];
uint32_t device_extension_count;
const char *ext, *end;
char *str;
const float priority = 1.0;
VkExtensionProperties *extension_properties;
VkDeviceQueueCreateInfo queue_desc;
VkPhysicalDeviceFeatures device_features;
VkPhysicalDeviceFeatures features;
VkDeviceCreateInfo device_desc;
VkResult res;
qboolean swapchainSupported = qfalse;
qboolean dedicatedAllocation = qfalse;
qboolean memoryRequirements2 = qfalse;
qboolean debugMarker = qfalse;
uint32_t i, len, count = 0;
VK_CHECK( qvkEnumerateDeviceExtensionProperties( physical_device, NULL, &count, NULL ) );
extension_properties = (VkExtensionProperties*)ri.Malloc( count * sizeof( VkExtensionProperties ) );
VK_CHECK( qvkEnumerateDeviceExtensionProperties( physical_device, NULL, &count, extension_properties ) );
// fill glConfig.extensions_string
str = glConfig.extensions_string; *str = '\0';
end = &glConfig.extensions_string[ sizeof( glConfig.extensions_string ) - 1];
for ( i = 0; i < count; i++ ) {
ext = extension_properties[i].extensionName;
if ( strcmp( ext, VK_KHR_SWAPCHAIN_EXTENSION_NAME ) == 0 ) {
swapchainSupported = qtrue;
} else if ( strcmp( ext, VK_KHR_DEDICATED_ALLOCATION_EXTENSION_NAME ) == 0 ) {
dedicatedAllocation = qtrue;
} else if ( strcmp( ext, VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME ) == 0 ) {
memoryRequirements2 = qtrue;
} else if ( strcmp( ext, VK_EXT_DEBUG_MARKER_EXTENSION_NAME ) == 0 ) {
debugMarker = qtrue;
}
// add this device extension to glConfig
if ( i != 0 ) {
if ( str + 1 >= end )
continue;
str = Q_stradd( str, " " );
}
len = (uint32_t)strlen( ext );
if ( str + len >= end )
continue;
str = Q_stradd( str, ext );
}
ri.Free( extension_properties );
device_extension_count = 0;
if ( !swapchainSupported ) {
ri.Printf( PRINT_ERROR, "...required device extension is not available: %s\n", VK_KHR_SWAPCHAIN_EXTENSION_NAME );
return qfalse;
}
if ( !memoryRequirements2 )
dedicatedAllocation = qfalse;
else
vk.dedicatedAllocation = dedicatedAllocation;
#ifndef USE_DEDICATED_ALLOCATION
vk.dedicatedAllocation = qfalse;
#endif
device_extension_list[ device_extension_count++ ] = VK_KHR_SWAPCHAIN_EXTENSION_NAME;
if ( vk.dedicatedAllocation ) {
device_extension_list[ device_extension_count++ ] = VK_KHR_DEDICATED_ALLOCATION_EXTENSION_NAME;
device_extension_list[ device_extension_count++ ] = VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME;
}
if ( debugMarker ) {
device_extension_list[ device_extension_count++ ] = VK_EXT_DEBUG_MARKER_EXTENSION_NAME;
vk.debugMarkers = qtrue;
}
qvkGetPhysicalDeviceFeatures( physical_device, &device_features );
if ( device_features.fillModeNonSolid == VK_FALSE ) {
ri.Printf( PRINT_ERROR, "...fillModeNonSolid feature is not supported\n" );
return qfalse;
}
queue_desc.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
queue_desc.pNext = NULL;
queue_desc.flags = 0;
queue_desc.queueFamilyIndex = vk.queue_family_index;
queue_desc.queueCount = 1;
queue_desc.pQueuePriorities = &priority;
Com_Memset( &features, 0, sizeof( features ) );
features.fillModeNonSolid = VK_TRUE;
if ( device_features.wideLines ) { // needed for RB_SurfaceAxis
features.wideLines = VK_TRUE;
vk.wideLines = qtrue;
}
if ( device_features.fragmentStoresAndAtomics ) {
features.fragmentStoresAndAtomics = VK_TRUE;
vk.fragmentStores = qtrue;
}
if ( r_ext_texture_filter_anisotropic->integer && device_features.samplerAnisotropy ) {
features.samplerAnisotropy = VK_TRUE;
vk.samplerAnisotropy = qtrue;
}
device_desc.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
device_desc.pNext = NULL;
device_desc.flags = 0;
device_desc.queueCreateInfoCount = 1;
device_desc.pQueueCreateInfos = &queue_desc;
device_desc.enabledLayerCount = 0;
device_desc.ppEnabledLayerNames = NULL;
device_desc.enabledExtensionCount = device_extension_count;
device_desc.ppEnabledExtensionNames = device_extension_list;
device_desc.pEnabledFeatures = &features;
res = qvkCreateDevice( physical_device, &device_desc, NULL, &vk.device );
if ( res < 0 ) {
ri.Printf( PRINT_ERROR, "vkCreateDevice returned %s\n", vk_result_string( res ) );
return qfalse;
}
}
return qtrue;
}
#define INIT_INSTANCE_FUNCTION(func) \
q##func = /*(PFN_ ## func)*/ ri.VK_GetInstanceProcAddr(vk.instance, #func); \
if (q##func == NULL) { \
ri.Error(ERR_FATAL, "Failed to find entrypoint %s", #func); \
}
#define INIT_INSTANCE_FUNCTION_EXT(func) \
q##func = /*(PFN_ ## func)*/ ri.VK_GetInstanceProcAddr(vk.instance, #func);
#define INIT_DEVICE_FUNCTION(func) \
q##func = (PFN_ ## func) qvkGetDeviceProcAddr(vk.device, #func);\
if (q##func == NULL) { \
ri.Error(ERR_FATAL, "Failed to find entrypoint %s", #func); \
}
#define INIT_DEVICE_FUNCTION_EXT(func) \
q##func = (PFN_ ## func) qvkGetDeviceProcAddr(vk.device, #func);
static void init_vulkan_library( void )
{
VkPhysicalDeviceProperties props;
VkPhysicalDevice *physical_devices;
uint32_t device_count;
int device_index, i;
VkResult res;
Com_Memset( &vk, 0, sizeof( vk ) );
//
// Get functions that do not depend on VkInstance (vk.instance == nullptr at this point).
//
INIT_INSTANCE_FUNCTION(vkCreateInstance)
INIT_INSTANCE_FUNCTION(vkEnumerateInstanceExtensionProperties)
//
// Get instance level functions.
//
create_instance();
INIT_INSTANCE_FUNCTION(vkCreateDevice)
INIT_INSTANCE_FUNCTION(vkDestroyInstance)
INIT_INSTANCE_FUNCTION(vkEnumerateDeviceExtensionProperties)
INIT_INSTANCE_FUNCTION(vkEnumeratePhysicalDevices)
INIT_INSTANCE_FUNCTION(vkGetDeviceProcAddr)
INIT_INSTANCE_FUNCTION(vkGetPhysicalDeviceFeatures)
INIT_INSTANCE_FUNCTION(vkGetPhysicalDeviceFormatProperties)
INIT_INSTANCE_FUNCTION(vkGetPhysicalDeviceMemoryProperties)
INIT_INSTANCE_FUNCTION(vkGetPhysicalDeviceProperties)
INIT_INSTANCE_FUNCTION(vkGetPhysicalDeviceQueueFamilyProperties)
INIT_INSTANCE_FUNCTION(vkDestroySurfaceKHR)
INIT_INSTANCE_FUNCTION(vkGetPhysicalDeviceSurfaceCapabilitiesKHR)
INIT_INSTANCE_FUNCTION(vkGetPhysicalDeviceSurfaceFormatsKHR)
INIT_INSTANCE_FUNCTION(vkGetPhysicalDeviceSurfacePresentModesKHR)
INIT_INSTANCE_FUNCTION(vkGetPhysicalDeviceSurfaceSupportKHR)
#ifdef USE_VK_VALIDATION
INIT_INSTANCE_FUNCTION_EXT(vkCreateDebugReportCallbackEXT)
INIT_INSTANCE_FUNCTION_EXT(vkDestroyDebugReportCallbackEXT)
//
// Create debug callback.
//
if ( qvkCreateDebugReportCallbackEXT && qvkDestroyDebugReportCallbackEXT )
{
VkDebugReportCallbackCreateInfoEXT desc;
desc.sType = VK_STRUCTURE_TYPE_DEBUG_REPORT_CALLBACK_CREATE_INFO_EXT;
desc.pNext = NULL;
desc.flags = VK_DEBUG_REPORT_WARNING_BIT_EXT |
VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT |
VK_DEBUG_REPORT_ERROR_BIT_EXT;
desc.pfnCallback = &debug_callback;
desc.pUserData = NULL;
VK_CHECK(qvkCreateDebugReportCallbackEXT(vk.instance, &desc, NULL, &vk.debug_callback));
}
#endif
// create surface
if ( !ri.VK_CreateSurface( vk.instance, &vk.surface ) ) {
ri.Error( ERR_FATAL, "Error creating Vulkan surface" );
return;
}
res = qvkEnumeratePhysicalDevices( vk.instance, &device_count, NULL );
if ( device_count == 0 ) {
ri.Error( ERR_FATAL, "Vulkan: no physical devices found" );
return;
}
else if ( res < 0 ) {
ri.Error( ERR_FATAL, "vkEnumeratePhysicalDevices returned %s", vk_result_string( res ) );
return;
}
physical_devices = (VkPhysicalDevice*)ri.Malloc( device_count * sizeof( VkPhysicalDevice ) );
VK_CHECK( qvkEnumeratePhysicalDevices( vk.instance, &device_count, physical_devices ) );
// initial physical device index
device_index = r_device->integer;
ri.Printf( PRINT_ALL, ".......................\nAvailable physical devices:\n" );
for ( i = 0; i < device_count; i++ ) {
qvkGetPhysicalDeviceProperties( physical_devices[ i ], &props );
ri.Printf( PRINT_ALL, " %i: %s\n", i, renderer_name( &props ) );
if ( device_index == -1 && props.deviceType == VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU ) {
device_index = i;
} else if ( device_index == -2 && props.deviceType == VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU ) {
device_index = i;
}
}
ri.Printf( PRINT_ALL, ".......................\n" );
vk.physical_device = VK_NULL_HANDLE;
for ( i = 0; i < device_count; i++, device_index++ ) {
if ( device_index >= device_count || device_index < 0 ) {
device_index = 0;
}
if ( vk_create_device( physical_devices[ device_index ], device_index ) ) {
vk.physical_device = physical_devices[ device_index ];
break;
}
}
ri.Free( physical_devices );
if ( vk.physical_device == VK_NULL_HANDLE ) {
ri.Error( ERR_FATAL, "Vulkan: unable to find any suitable physical device" );
return;
}
//
// Get device level functions.
//
INIT_DEVICE_FUNCTION(vkAllocateCommandBuffers)
INIT_DEVICE_FUNCTION(vkAllocateDescriptorSets)
INIT_DEVICE_FUNCTION(vkAllocateMemory)
INIT_DEVICE_FUNCTION(vkBeginCommandBuffer)
INIT_DEVICE_FUNCTION(vkBindBufferMemory)
INIT_DEVICE_FUNCTION(vkBindImageMemory)
INIT_DEVICE_FUNCTION(vkCmdBeginRenderPass)
INIT_DEVICE_FUNCTION(vkCmdBindDescriptorSets)
INIT_DEVICE_FUNCTION(vkCmdBindIndexBuffer)
INIT_DEVICE_FUNCTION(vkCmdBindPipeline)
INIT_DEVICE_FUNCTION(vkCmdBindVertexBuffers)
INIT_DEVICE_FUNCTION(vkCmdBlitImage)
INIT_DEVICE_FUNCTION(vkCmdClearAttachments)
INIT_DEVICE_FUNCTION(vkCmdCopyBuffer)
INIT_DEVICE_FUNCTION(vkCmdCopyBufferToImage)
INIT_DEVICE_FUNCTION(vkCmdCopyImage)
INIT_DEVICE_FUNCTION(vkCmdDraw)
INIT_DEVICE_FUNCTION(vkCmdDrawIndexed)
INIT_DEVICE_FUNCTION(vkCmdEndRenderPass)
INIT_DEVICE_FUNCTION(vkCmdNextSubpass)
INIT_DEVICE_FUNCTION(vkCmdPipelineBarrier)
INIT_DEVICE_FUNCTION(vkCmdPushConstants)
INIT_DEVICE_FUNCTION(vkCmdSetDepthBias)
INIT_DEVICE_FUNCTION(vkCmdSetScissor)
INIT_DEVICE_FUNCTION(vkCmdSetViewport)
INIT_DEVICE_FUNCTION(vkCreateBuffer)
INIT_DEVICE_FUNCTION(vkCreateCommandPool)
INIT_DEVICE_FUNCTION(vkCreateDescriptorPool)
INIT_DEVICE_FUNCTION(vkCreateDescriptorSetLayout)
INIT_DEVICE_FUNCTION(vkCreateFence)
INIT_DEVICE_FUNCTION(vkCreateFramebuffer)
INIT_DEVICE_FUNCTION(vkCreateGraphicsPipelines)
INIT_DEVICE_FUNCTION(vkCreateImage)
INIT_DEVICE_FUNCTION(vkCreateImageView)
INIT_DEVICE_FUNCTION(vkCreatePipelineCache)
INIT_DEVICE_FUNCTION(vkCreatePipelineLayout)
INIT_DEVICE_FUNCTION(vkCreateRenderPass)
INIT_DEVICE_FUNCTION(vkCreateSampler)
INIT_DEVICE_FUNCTION(vkCreateSemaphore)
INIT_DEVICE_FUNCTION(vkCreateShaderModule)
INIT_DEVICE_FUNCTION(vkDestroyBuffer)
INIT_DEVICE_FUNCTION(vkDestroyCommandPool)
INIT_DEVICE_FUNCTION(vkDestroyDescriptorPool)
INIT_DEVICE_FUNCTION(vkDestroyDescriptorSetLayout)
INIT_DEVICE_FUNCTION(vkDestroyDevice)
INIT_DEVICE_FUNCTION(vkDestroyFence)
INIT_DEVICE_FUNCTION(vkDestroyFramebuffer)
INIT_DEVICE_FUNCTION(vkDestroyImage)
INIT_DEVICE_FUNCTION(vkDestroyImageView)
INIT_DEVICE_FUNCTION(vkDestroyPipeline)
INIT_DEVICE_FUNCTION(vkDestroyPipelineCache)
INIT_DEVICE_FUNCTION(vkDestroyPipelineLayout)
INIT_DEVICE_FUNCTION(vkDestroyRenderPass)
INIT_DEVICE_FUNCTION(vkDestroySampler)
INIT_DEVICE_FUNCTION(vkDestroySemaphore)
INIT_DEVICE_FUNCTION(vkDestroyShaderModule)
INIT_DEVICE_FUNCTION(vkDeviceWaitIdle)
INIT_DEVICE_FUNCTION(vkEndCommandBuffer)
INIT_DEVICE_FUNCTION(vkFlushMappedMemoryRanges)
INIT_DEVICE_FUNCTION(vkFreeCommandBuffers)
INIT_DEVICE_FUNCTION(vkFreeDescriptorSets)
INIT_DEVICE_FUNCTION(vkFreeMemory)
INIT_DEVICE_FUNCTION(vkGetBufferMemoryRequirements)
INIT_DEVICE_FUNCTION(vkGetDeviceQueue)
INIT_DEVICE_FUNCTION(vkGetImageMemoryRequirements)
INIT_DEVICE_FUNCTION(vkGetImageSubresourceLayout)
INIT_DEVICE_FUNCTION(vkInvalidateMappedMemoryRanges)
INIT_DEVICE_FUNCTION(vkMapMemory)
INIT_DEVICE_FUNCTION(vkQueueSubmit)
INIT_DEVICE_FUNCTION(vkQueueWaitIdle)
INIT_DEVICE_FUNCTION(vkResetCommandBuffer)
INIT_DEVICE_FUNCTION(vkResetDescriptorPool)
INIT_DEVICE_FUNCTION(vkResetFences)
INIT_DEVICE_FUNCTION(vkUnmapMemory)
INIT_DEVICE_FUNCTION(vkUpdateDescriptorSets)
INIT_DEVICE_FUNCTION(vkWaitForFences)
INIT_DEVICE_FUNCTION(vkAcquireNextImageKHR)
INIT_DEVICE_FUNCTION(vkCreateSwapchainKHR)
INIT_DEVICE_FUNCTION(vkDestroySwapchainKHR)
INIT_DEVICE_FUNCTION(vkGetSwapchainImagesKHR)
INIT_DEVICE_FUNCTION(vkQueuePresentKHR)
if ( vk.dedicatedAllocation ) {
INIT_DEVICE_FUNCTION_EXT(vkGetBufferMemoryRequirements2KHR);
INIT_DEVICE_FUNCTION_EXT(vkGetImageMemoryRequirements2KHR);
if ( !qvkGetBufferMemoryRequirements2KHR || !qvkGetImageMemoryRequirements2KHR ) {
vk.dedicatedAllocation = qfalse;
}
}
if ( vk.debugMarkers ) {
INIT_DEVICE_FUNCTION_EXT(vkDebugMarkerSetObjectNameEXT)
}
}
#undef INIT_INSTANCE_FUNCTION
#undef INIT_DEVICE_FUNCTION
#undef INIT_DEVICE_FUNCTION_EXT
static void deinit_vulkan_library( void )
{
qvkCreateInstance = NULL;
qvkEnumerateInstanceExtensionProperties = NULL;
qvkCreateDevice = NULL;
qvkDestroyInstance = NULL;
qvkEnumerateDeviceExtensionProperties = NULL;
qvkEnumeratePhysicalDevices = NULL;
qvkGetDeviceProcAddr = NULL;
qvkGetPhysicalDeviceFeatures = NULL;
qvkGetPhysicalDeviceFormatProperties = NULL;
qvkGetPhysicalDeviceMemoryProperties = NULL;
qvkGetPhysicalDeviceProperties = NULL;
qvkGetPhysicalDeviceQueueFamilyProperties = NULL;
qvkDestroySurfaceKHR = NULL;
qvkGetPhysicalDeviceSurfaceCapabilitiesKHR = NULL;
qvkGetPhysicalDeviceSurfaceFormatsKHR = NULL;
qvkGetPhysicalDeviceSurfacePresentModesKHR = NULL;
qvkGetPhysicalDeviceSurfaceSupportKHR = NULL;
#ifdef USE_VK_VALIDATION
qvkCreateDebugReportCallbackEXT = NULL;
qvkDestroyDebugReportCallbackEXT = NULL;
#endif
qvkAllocateCommandBuffers = NULL;
qvkAllocateDescriptorSets = NULL;
qvkAllocateMemory = NULL;
qvkBeginCommandBuffer = NULL;
qvkBindBufferMemory = NULL;
qvkBindImageMemory = NULL;
qvkCmdBeginRenderPass = NULL;
qvkCmdBindDescriptorSets = NULL;
qvkCmdBindIndexBuffer = NULL;
qvkCmdBindPipeline = NULL;
qvkCmdBindVertexBuffers = NULL;
qvkCmdBlitImage = NULL;
qvkCmdClearAttachments = NULL;
qvkCmdCopyBuffer = NULL;
qvkCmdCopyBufferToImage = NULL;
qvkCmdCopyImage = NULL;
qvkCmdDraw = NULL;
qvkCmdDrawIndexed = NULL;
qvkCmdEndRenderPass = NULL;
qvkCmdNextSubpass = NULL;
qvkCmdPipelineBarrier = NULL;
qvkCmdPushConstants = NULL;
qvkCmdSetDepthBias = NULL;
qvkCmdSetScissor = NULL;
qvkCmdSetViewport = NULL;
qvkCreateBuffer = NULL;
qvkCreateCommandPool = NULL;
qvkCreateDescriptorPool = NULL;
qvkCreateDescriptorSetLayout = NULL;
qvkCreateFence = NULL;
qvkCreateFramebuffer = NULL;
qvkCreateGraphicsPipelines = NULL;
qvkCreateImage = NULL;
qvkCreateImageView = NULL;
qvkCreatePipelineCache = NULL;
qvkCreatePipelineLayout = NULL;
qvkCreateRenderPass = NULL;
qvkCreateSampler = NULL;
qvkCreateSemaphore = NULL;
qvkCreateShaderModule = NULL;
qvkDestroyBuffer = NULL;
qvkDestroyCommandPool = NULL;
qvkDestroyDescriptorPool = NULL;
qvkDestroyDescriptorSetLayout = NULL;
qvkDestroyDevice = NULL;
qvkDestroyFence = NULL;
qvkDestroyFramebuffer = NULL;
qvkDestroyImage = NULL;
qvkDestroyImageView = NULL;
qvkDestroyPipeline = NULL;
qvkDestroyPipelineCache = NULL;
qvkDestroyPipelineLayout = NULL;
qvkDestroyRenderPass = NULL;
qvkDestroySampler = NULL;
qvkDestroySemaphore = NULL;
qvkDestroyShaderModule = NULL;
qvkDeviceWaitIdle = NULL;
qvkEndCommandBuffer = NULL;
qvkFlushMappedMemoryRanges = NULL;
qvkFreeCommandBuffers = NULL;
qvkFreeDescriptorSets = NULL;
qvkFreeMemory = NULL;
qvkGetBufferMemoryRequirements = NULL;
qvkGetDeviceQueue = NULL;
qvkGetImageMemoryRequirements = NULL;
qvkGetImageSubresourceLayout = NULL;
qvkInvalidateMappedMemoryRanges = NULL;
qvkMapMemory = NULL;
qvkQueueSubmit = NULL;
qvkQueueWaitIdle = NULL;
qvkResetCommandBuffer = NULL;
qvkResetDescriptorPool = NULL;
qvkResetFences = NULL;
qvkUnmapMemory = NULL;
qvkUpdateDescriptorSets = NULL;
qvkWaitForFences = NULL;
qvkAcquireNextImageKHR = NULL;
qvkCreateSwapchainKHR = NULL;
qvkDestroySwapchainKHR = NULL;
qvkGetSwapchainImagesKHR = NULL;
qvkQueuePresentKHR = NULL;
qvkGetBufferMemoryRequirements2KHR = NULL;
qvkGetImageMemoryRequirements2KHR = NULL;
qvkDebugMarkerSetObjectNameEXT = NULL;
}
static VkShaderModule SHADER_MODULE(const uint8_t *bytes, const int count) {
VkShaderModuleCreateInfo desc;
VkShaderModule module;
if ( count % 4 != 0 ) {
ri.Error( ERR_FATAL, "Vulkan: SPIR-V binary buffer size is not a multiple of 4" );
}
desc.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO;
desc.pNext = NULL;
desc.flags = 0;
desc.codeSize = count;
desc.pCode = (const uint32_t*)bytes;
VK_CHECK(qvkCreateShaderModule(vk.device, &desc, NULL, &module));
return module;
}
static void vk_create_layout_binding( int binding, VkDescriptorType type, VkShaderStageFlags flags, VkDescriptorSetLayout *layout )
{
VkDescriptorSetLayoutBinding bind;
VkDescriptorSetLayoutCreateInfo desc;
bind.binding = binding;
bind.descriptorType = type;
bind.descriptorCount = 1;
bind.stageFlags = flags;
bind.pImmutableSamplers = NULL;
desc.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
desc.pNext = NULL;
desc.flags = 0;
desc.bindingCount = 1;
desc.pBindings = &bind;
VK_CHECK( qvkCreateDescriptorSetLayout(vk.device, &desc, NULL, layout ) );
}
void vk_update_uniform_descriptor( VkDescriptorSet descriptor, VkBuffer buffer )
{
VkDescriptorBufferInfo info;
VkWriteDescriptorSet desc;
info.buffer = buffer;
info.offset = 0;
info.range = sizeof( vkUniform_t );
desc.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
desc.dstSet = descriptor;
desc.dstBinding = 0;
desc.dstArrayElement = 0;
desc.descriptorCount = 1;
desc.pNext = NULL;
desc.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
desc.pImageInfo = NULL;
desc.pBufferInfo = &info;
desc.pTexelBufferView = NULL;
qvkUpdateDescriptorSets( vk.device, 1, &desc, 0, NULL );
}
static VkSampler vk_find_sampler( const Vk_Sampler_Def *def ) {
VkSamplerAddressMode address_mode;
VkSamplerCreateInfo desc;
VkSampler sampler;
VkFilter mag_filter;
VkFilter min_filter;
VkSamplerMipmapMode mipmap_mode;
float maxLod;
int i;
// Look for sampler among existing samplers.
for ( i = 0; i < vk_world.num_samplers; i++ ) {
const Vk_Sampler_Def *cur_def = &vk_world.sampler_defs[i];
if ( memcmp( cur_def, def, sizeof( *def ) ) == 0 ) {
return vk_world.samplers[i];
}
}
// Create new sampler.
if ( vk_world.num_samplers >= MAX_VK_SAMPLERS ) {
ri.Error( ERR_DROP, "vk_find_sampler: MAX_VK_SAMPLERS hit\n" );
}
address_mode = def->address_mode;
if (def->gl_mag_filter == GL_NEAREST) {
mag_filter = VK_FILTER_NEAREST;
} else if (def->gl_mag_filter == GL_LINEAR) {
mag_filter = VK_FILTER_LINEAR;
} else {
ri.Error(ERR_FATAL, "vk_find_sampler: invalid gl_mag_filter");
return VK_NULL_HANDLE;
}
maxLod = vk.maxLod;
if (def->gl_min_filter == GL_NEAREST) {
min_filter = VK_FILTER_NEAREST;
mipmap_mode = VK_SAMPLER_MIPMAP_MODE_NEAREST;
maxLod = 0.25f; // used to emulate OpenGL's GL_LINEAR/GL_NEAREST minification filter
} else if (def->gl_min_filter == GL_LINEAR) {
min_filter = VK_FILTER_LINEAR;
mipmap_mode = VK_SAMPLER_MIPMAP_MODE_NEAREST;
maxLod = 0.25f; // used to emulate OpenGL's GL_LINEAR/GL_NEAREST minification filter
} else if (def->gl_min_filter == GL_NEAREST_MIPMAP_NEAREST) {
min_filter = VK_FILTER_NEAREST;
mipmap_mode = VK_SAMPLER_MIPMAP_MODE_NEAREST;
} else if (def->gl_min_filter == GL_LINEAR_MIPMAP_NEAREST) {
min_filter = VK_FILTER_LINEAR;
mipmap_mode = VK_SAMPLER_MIPMAP_MODE_NEAREST;
} else if (def->gl_min_filter == GL_NEAREST_MIPMAP_LINEAR) {
min_filter = VK_FILTER_NEAREST;
mipmap_mode = VK_SAMPLER_MIPMAP_MODE_LINEAR;
} else if (def->gl_min_filter == GL_LINEAR_MIPMAP_LINEAR) {
min_filter = VK_FILTER_LINEAR;
mipmap_mode = VK_SAMPLER_MIPMAP_MODE_LINEAR;
} else {
ri.Error(ERR_FATAL, "vk_find_sampler: invalid gl_min_filter");
return VK_NULL_HANDLE;
}
if ( def->max_lod_1_0 ) {
maxLod = 1.0f;
}
desc.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO;
desc.pNext = NULL;
desc.flags = 0;
desc.magFilter = mag_filter;
desc.minFilter = min_filter;
desc.mipmapMode = mipmap_mode;
desc.addressModeU = address_mode;
desc.addressModeV = address_mode;
desc.addressModeW = address_mode;
desc.mipLodBias = 0.0f;
if ( def->noAnisotropy || mipmap_mode == VK_SAMPLER_MIPMAP_MODE_NEAREST || mag_filter == VK_FILTER_NEAREST ) {
desc.anisotropyEnable = VK_FALSE;
desc.maxAnisotropy = 1.0f;
} else {
desc.anisotropyEnable = (r_ext_texture_filter_anisotropic->integer && vk.samplerAnisotropy) ? VK_TRUE : VK_FALSE;
if ( desc.anisotropyEnable ) {
desc.maxAnisotropy = MIN( r_ext_max_anisotropy->integer, vk.maxAnisotropy );
}
}
desc.compareEnable = VK_FALSE;
desc.compareOp = VK_COMPARE_OP_ALWAYS;
desc.minLod = 0.0f;
desc.maxLod = maxLod;
desc.borderColor = VK_BORDER_COLOR_INT_OPAQUE_BLACK;
desc.unnormalizedCoordinates = VK_FALSE;
VK_CHECK( qvkCreateSampler( vk.device, &desc, NULL, &sampler ) );
SET_OBJECT_NAME( sampler, va( "image sampler %i", vk_world.num_samplers ), VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT );
vk_world.sampler_defs[ vk_world.num_samplers ] = *def;
vk_world.samplers[ vk_world.num_samplers ] = sampler;
vk_world.num_samplers++;
return sampler;
}
static void vk_update_attachment_descriptors( void ) {
if ( vk.color_image_view )
{
VkDescriptorImageInfo info;
VkWriteDescriptorSet desc;
Vk_Sampler_Def sd;
Com_Memset( &sd, 0, sizeof( sd ) );
sd.gl_mag_filter = sd.gl_min_filter = vk.blitFilter;
sd.address_mode = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE;
sd.max_lod_1_0 = qtrue;
sd.noAnisotropy = qtrue;
info.sampler = vk_find_sampler( &sd );
info.imageView = vk.color_image_view;
info.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
desc.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
desc.dstSet = vk.color_descriptor;
desc.dstBinding = 0;
desc.dstArrayElement = 0;
desc.descriptorCount = 1;
desc.pNext = NULL;
desc.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
desc.pImageInfo = &info;
desc.pBufferInfo = NULL;
desc.pTexelBufferView = NULL;
qvkUpdateDescriptorSets( vk.device, 1, &desc, 0, NULL );
// screenmap
sd.gl_mag_filter = sd.gl_min_filter = GL_LINEAR;
sd.max_lod_1_0 = qfalse;
sd.noAnisotropy = qtrue;
info.sampler = vk_find_sampler( &sd );
info.imageView = vk.screenMap.color_image_view;
desc.dstSet = vk.screenMap.color_descriptor;
qvkUpdateDescriptorSets( vk.device, 1, &desc, 0, NULL );
// bloom images
if ( r_bloom->integer )
{
uint32_t i;
for ( i = 0; i < ARRAY_LEN( vk.bloom_image_descriptor ); i++ )
{
info.imageView = vk.bloom_image_view[i];
desc.dstSet = vk.bloom_image_descriptor[i];
qvkUpdateDescriptorSets( vk.device, 1, &desc, 0, NULL );
}
}
}
}
void vk_init_descriptors( void )
{
VkDescriptorSetAllocateInfo alloc;
VkDescriptorBufferInfo info;
VkWriteDescriptorSet desc;
uint32_t i;
alloc.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
alloc.pNext = NULL;
alloc.descriptorPool = vk.descriptor_pool;
alloc.descriptorSetCount = 1;
alloc.pSetLayouts = &vk.set_layout_storage;
VK_CHECK( qvkAllocateDescriptorSets( vk.device, &alloc, &vk.storage.descriptor ) );
info.buffer = vk.storage.buffer;
info.offset = 0;
info.range = sizeof( uint32_t );
desc.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
desc.dstSet = vk.storage.descriptor;
desc.dstBinding = 0;
desc.dstArrayElement = 0;
desc.descriptorCount = 1;
desc.pNext = NULL;
desc.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC;
desc.pImageInfo = NULL;
desc.pBufferInfo = &info;
desc.pTexelBufferView = NULL;
qvkUpdateDescriptorSets( vk.device, 1, &desc, 0, NULL );
// allocated and update descriptor set
for ( i = 0; i < NUM_COMMAND_BUFFERS; i++ )
{
alloc.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
alloc.pNext = NULL;
alloc.descriptorPool = vk.descriptor_pool;
alloc.descriptorSetCount = 1;
alloc.pSetLayouts = &vk.set_layout_uniform;
VK_CHECK( qvkAllocateDescriptorSets( vk.device, &alloc, &vk.tess[i].uniform_descriptor ) );
vk_update_uniform_descriptor( vk.tess[ i ].uniform_descriptor, vk.tess[ i ].vertex_buffer );
SET_OBJECT_NAME( vk.tess[ i ].uniform_descriptor, va( "uniform descriptor %i", i ), VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT );
}
if ( vk.color_image_view )
{
alloc.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
alloc.pNext = NULL;
alloc.descriptorPool = vk.descriptor_pool;
alloc.descriptorSetCount = 1;
alloc.pSetLayouts = &vk.set_layout_sampler;
VK_CHECK( qvkAllocateDescriptorSets( vk.device, &alloc, &vk.color_descriptor ) );
if ( r_bloom->integer )
{
for ( i = 0; i < ARRAY_LEN( vk.bloom_image_descriptor ); i++ )
{
VK_CHECK( qvkAllocateDescriptorSets( vk.device, &alloc, &vk.bloom_image_descriptor[i] ) );
}
}
alloc.descriptorSetCount = 1;
VK_CHECK( qvkAllocateDescriptorSets( vk.device, &alloc, &vk.screenMap.color_descriptor ) ); // screenmap
vk_update_attachment_descriptors();
}
}
static void vk_release_geometry_buffers( void )
{
int i;
for ( i = 0; i < NUM_COMMAND_BUFFERS; i++ ) {
qvkDestroyBuffer( vk.device, vk.tess[i].vertex_buffer, NULL );
vk.tess[i].vertex_buffer = VK_NULL_HANDLE;
}
qvkFreeMemory( vk.device, vk.geometry_buffer_memory, NULL );
vk.geometry_buffer_memory = VK_NULL_HANDLE;
}
static void vk_create_geometry_buffers( VkDeviceSize size )
{
VkMemoryRequirements vb_memory_requirements;
VkMemoryAllocateInfo alloc_info;
VkBufferCreateInfo desc;
VkDeviceSize vertex_buffer_offset;
uint32_t memory_type_bits;
uint32_t memory_type;
void *data;
int i;
desc.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
desc.pNext = NULL;
desc.flags = 0;
desc.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
desc.queueFamilyIndexCount = 0;
desc.pQueueFamilyIndices = NULL;
Com_Memset( &vb_memory_requirements, 0, sizeof( vb_memory_requirements ) );
for ( i = 0 ; i < NUM_COMMAND_BUFFERS; i++ ) {
desc.size = size;
desc.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_INDEX_BUFFER_BIT | VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
VK_CHECK( qvkCreateBuffer( vk.device, &desc, NULL, &vk.tess[i].vertex_buffer ) );
qvkGetBufferMemoryRequirements( vk.device, vk.tess[i].vertex_buffer, &vb_memory_requirements );
}
memory_type_bits = vb_memory_requirements.memoryTypeBits;
memory_type = find_memory_type( vk.physical_device, memory_type_bits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT );
alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
alloc_info.pNext = NULL;
alloc_info.allocationSize = vb_memory_requirements.size * NUM_COMMAND_BUFFERS;
alloc_info.memoryTypeIndex = memory_type;
VK_CHECK( qvkAllocateMemory( vk.device, &alloc_info, NULL, &vk.geometry_buffer_memory ) );
VK_CHECK( qvkMapMemory( vk.device, vk.geometry_buffer_memory, 0, VK_WHOLE_SIZE, 0, &data ) );
vertex_buffer_offset = 0;
for ( i = 0 ; i < NUM_COMMAND_BUFFERS; i++ ) {
qvkBindBufferMemory( vk.device, vk.tess[i].vertex_buffer, vk.geometry_buffer_memory, vertex_buffer_offset );
vk.tess[i].vertex_buffer_ptr = (byte*)data + vertex_buffer_offset;
vk.tess[i].vertex_buffer_offset = 0;
vertex_buffer_offset += vb_memory_requirements.size;
SET_OBJECT_NAME( vk.tess[i].vertex_buffer, va( "geometry buffer %i", i ), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT );
}
SET_OBJECT_NAME( vk.geometry_buffer_memory, "geometry buffer memory", VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT );
vk.geometry_buffer_size = vb_memory_requirements.size;
Com_Memset( &vk.stats, 0, sizeof( vk.stats ) );
}
static void vk_create_storage_buffer( uint32_t size )
{
VkMemoryRequirements memory_requirements;
VkMemoryAllocateInfo alloc_info;
VkBufferCreateInfo desc;
uint32_t memory_type_bits;
uint32_t memory_type;
desc.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
desc.pNext = NULL;
desc.flags = 0;
desc.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
desc.queueFamilyIndexCount = 0;
desc.pQueueFamilyIndices = NULL;
Com_Memset( &memory_requirements, 0, sizeof( memory_requirements ) );
desc.size = size;
desc.usage = VK_BUFFER_USAGE_STORAGE_BUFFER_BIT;
VK_CHECK( qvkCreateBuffer( vk.device, &desc, NULL, &vk.storage.buffer ) );
qvkGetBufferMemoryRequirements( vk.device, vk.storage.buffer, &memory_requirements );
memory_type_bits = memory_requirements.memoryTypeBits;
memory_type = find_memory_type( vk.physical_device, memory_type_bits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT );
alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
alloc_info.pNext = NULL;
alloc_info.allocationSize = memory_requirements.size;
alloc_info.memoryTypeIndex = memory_type;
VK_CHECK( qvkAllocateMemory( vk.device, &alloc_info, NULL, &vk.storage.memory ) );
VK_CHECK( qvkMapMemory( vk.device, vk.storage.memory, 0, VK_WHOLE_SIZE, 0, (void**)&vk.storage.buffer_ptr ) );
Com_Memset( vk.storage.buffer_ptr, 0, memory_requirements.size );
qvkBindBufferMemory( vk.device, vk.storage.buffer, vk.storage.memory, 0 );
SET_OBJECT_NAME( vk.storage.buffer, "storage buffer", VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT );
SET_OBJECT_NAME( vk.storage.descriptor, "storage buffer", VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT );
SET_OBJECT_NAME( vk.storage.memory, "storage buffer memory", VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT );
}
#ifdef USE_VBO
void vk_release_vbo( void )
{
if ( vk.vbo.vertex_buffer )
qvkDestroyBuffer( vk.device, vk.vbo.vertex_buffer, NULL );
vk.vbo.vertex_buffer = VK_NULL_HANDLE;
if ( vk.vbo.buffer_memory )
qvkFreeMemory( vk.device, vk.vbo.buffer_memory, NULL );
vk.vbo.buffer_memory = VK_NULL_HANDLE;
}
qboolean vk_alloc_vbo( const byte *vbo_data, int vbo_size )
{
VkMemoryRequirements vb_mem_reqs;
VkMemoryAllocateInfo alloc_info;
VkBufferCreateInfo desc;
VkDeviceSize vertex_buffer_offset;
VkDeviceSize allocationSize;
uint32_t memory_type_bits;
VkBuffer staging_vertex_buffer;
VkDeviceMemory staging_buffer_memory;
VkCommandBuffer command_buffer;
VkBufferCopy copyRegion[1];
void *data;
vk_release_vbo();
desc.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
desc.pNext = NULL;
desc.flags = 0;
desc.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
desc.queueFamilyIndexCount = 0;
desc.pQueueFamilyIndices = NULL;
// device-local buffer
desc.size = vbo_size;
desc.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_INDEX_BUFFER_BIT;
VK_CHECK( qvkCreateBuffer( vk.device, &desc, NULL, &vk.vbo.vertex_buffer ) );
// staging buffer
desc.size = vbo_size;
desc.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_INDEX_BUFFER_BIT;
VK_CHECK( qvkCreateBuffer( vk.device, &desc, NULL, &staging_vertex_buffer ) );
// memory requirements
qvkGetBufferMemoryRequirements( vk.device, vk.vbo.vertex_buffer, &vb_mem_reqs );
vertex_buffer_offset = 0;
allocationSize = vertex_buffer_offset + vb_mem_reqs.size;
memory_type_bits = vb_mem_reqs.memoryTypeBits;
alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
alloc_info.pNext = NULL;
alloc_info.allocationSize = allocationSize;
alloc_info.memoryTypeIndex = find_memory_type( vk.physical_device, memory_type_bits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT );
VK_CHECK( qvkAllocateMemory( vk.device, &alloc_info, NULL, &vk.vbo.buffer_memory ) );
qvkBindBufferMemory( vk.device, vk.vbo.vertex_buffer, vk.vbo.buffer_memory, vertex_buffer_offset );
// staging buffers
// memory requirements
qvkGetBufferMemoryRequirements( vk.device, staging_vertex_buffer, &vb_mem_reqs );
vertex_buffer_offset = 0;
allocationSize = vertex_buffer_offset + vb_mem_reqs.size;
memory_type_bits = vb_mem_reqs.memoryTypeBits;
alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
alloc_info.pNext = NULL;
alloc_info.allocationSize = allocationSize;
alloc_info.memoryTypeIndex = find_memory_type( vk.physical_device, memory_type_bits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT );
VK_CHECK( qvkAllocateMemory( vk.device, &alloc_info, NULL, &staging_buffer_memory ) );
qvkBindBufferMemory( vk.device, staging_vertex_buffer, staging_buffer_memory, vertex_buffer_offset );
VK_CHECK( qvkMapMemory( vk.device, staging_buffer_memory, 0, VK_WHOLE_SIZE, 0, &data ) );
memcpy( (byte*)data + vertex_buffer_offset, vbo_data, vbo_size );
qvkUnmapMemory( vk.device, staging_buffer_memory );
command_buffer = begin_command_buffer();
copyRegion[0].srcOffset = 0;
copyRegion[0].dstOffset = 0;
copyRegion[0].size = vbo_size;
qvkCmdCopyBuffer( command_buffer, staging_vertex_buffer, vk.vbo.vertex_buffer, 1, &copyRegion[0] );
end_command_buffer( command_buffer );
qvkDestroyBuffer( vk.device, staging_vertex_buffer, NULL );
qvkFreeMemory( vk.device, staging_buffer_memory, NULL );
SET_OBJECT_NAME( vk.vbo.vertex_buffer, "static VBO", VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT );
SET_OBJECT_NAME( vk.vbo.buffer_memory, "static VBO memory", VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT );
return qtrue;
}
#endif
#include "shaders/spirv/shader_data.c"
#define SHADER_MODULE(name) SHADER_MODULE(name,sizeof(name))
static void vk_create_shader_modules( void )
{
int i, j, k, l;
vk.modules.vert.gen[0][0][0][0] = SHADER_MODULE( vert_tx0 );
vk.modules.vert.gen[0][0][0][1] = SHADER_MODULE( vert_tx0_fog );
vk.modules.vert.gen[0][0][1][0] = SHADER_MODULE( vert_tx0_env );
vk.modules.vert.gen[0][0][1][1] = SHADER_MODULE( vert_tx0_env_fog );
vk.modules.vert.gen[1][0][0][0] = SHADER_MODULE( vert_tx1 );
vk.modules.vert.gen[1][0][0][1] = SHADER_MODULE( vert_tx1_fog );
vk.modules.vert.gen[1][0][1][0] = SHADER_MODULE( vert_tx1_env );
vk.modules.vert.gen[1][0][1][1] = SHADER_MODULE( vert_tx1_env_fog );
vk.modules.vert.gen[1][1][0][0] = SHADER_MODULE( vert_tx1_cl );
vk.modules.vert.gen[1][1][0][1] = SHADER_MODULE( vert_tx1_cl_fog );
vk.modules.vert.gen[1][1][1][0] = SHADER_MODULE( vert_tx1_cl_env );
vk.modules.vert.gen[1][1][1][1] = SHADER_MODULE( vert_tx1_cl_env_fog );
vk.modules.vert.gen[2][0][0][0] = SHADER_MODULE( vert_tx2 );
vk.modules.vert.gen[2][0][0][1] = SHADER_MODULE( vert_tx2_fog );
vk.modules.vert.gen[2][0][1][0] = SHADER_MODULE( vert_tx2_env );
vk.modules.vert.gen[2][0][1][1] = SHADER_MODULE( vert_tx2_env_fog );
vk.modules.vert.gen[2][1][0][0] = SHADER_MODULE( vert_tx2_cl );
vk.modules.vert.gen[2][1][0][1] = SHADER_MODULE( vert_tx2_cl_fog );
vk.modules.vert.gen[2][1][1][0] = SHADER_MODULE( vert_tx2_cl_env );
vk.modules.vert.gen[2][1][1][1] = SHADER_MODULE( vert_tx2_cl_env_fog );
for ( i = 0; i < 3; i++ ) {
const char *tx[] = { "single", "double", "triple" };
const char *cl[] = { "", "+cl" };
const char *env[] = { "", "+env" };
const char *fog[] = { "", "+fog" };
for ( j = 0; j < 2; j++ ) {
for ( k = 0; k < 2; k++ ) {
for ( l = 0; l < 2; l++ ) {
const char *s = va( "%s-texture%s%s%s vertex module", tx[i], cl[j], env[k], fog[l] );
SET_OBJECT_NAME( vk.modules.vert.gen[i][j][k][l], s, VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT );
}
}
}
}
vk.modules.frag.gen0_df = SHADER_MODULE( frag_tx0_df );
SET_OBJECT_NAME( vk.modules.frag.gen0_df, "single-texture df fragment module", VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT );
vk.modules.vert.gen0_ident = SHADER_MODULE( vert_tx0_ident );
vk.modules.frag.gen0_ident = SHADER_MODULE( frag_tx0_ident );
SET_OBJECT_NAME( vk.modules.vert.gen0_ident, "single-texture ident.color vertex module", VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT );
SET_OBJECT_NAME( vk.modules.frag.gen0_ident, "single-texture ident.color fragment module", VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT );
vk.modules.frag.gen[0][0][0] = SHADER_MODULE( frag_tx0 );
vk.modules.frag.gen[0][0][1] = SHADER_MODULE( frag_tx0_fog );
vk.modules.frag.gen[1][0][0] = SHADER_MODULE( frag_tx1 );
vk.modules.frag.gen[1][0][1] = SHADER_MODULE( frag_tx1_fog );
vk.modules.frag.gen[1][1][0] = SHADER_MODULE( frag_tx1_cl );
vk.modules.frag.gen[1][1][1] = SHADER_MODULE( frag_tx1_cl_fog );
vk.modules.frag.gen[2][0][0] = SHADER_MODULE( frag_tx2 );
vk.modules.frag.gen[2][0][1] = SHADER_MODULE( frag_tx2_fog );
vk.modules.frag.gen[2][1][0] = SHADER_MODULE( frag_tx2_cl );
vk.modules.frag.gen[2][1][1] = SHADER_MODULE( frag_tx2_cl_fog );
for ( i = 0; i < 3; i++ ) {
const char *tx[] = { "single", "double", "triple" };
const char *cl[] = { "", "+cl" };
const char *fog[] = { "", "+fog" };
for ( j = 0; j < 2; j++ ) {
for ( k = 0; k < 2; k++ ) {
const char *s = va( "%s-texture%s%s fragment module", tx[i], cl[j], fog[k] );
SET_OBJECT_NAME( vk.modules.frag.gen[i][j][k], s, VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT );
}
}
}
vk.modules.vert.light[0] = SHADER_MODULE( vert_light );
vk.modules.vert.light[1] = SHADER_MODULE( vert_light_fog );
SET_OBJECT_NAME( vk.modules.vert.light[0], "light vertex module", VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT );
SET_OBJECT_NAME( vk.modules.vert.light[1], "light fog vertex module", VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT );
vk.modules.frag.light[0][0] = SHADER_MODULE( frag_light );
vk.modules.frag.light[0][1] = SHADER_MODULE( frag_light_fog );
vk.modules.frag.light[1][0] = SHADER_MODULE( frag_light_line );
vk.modules.frag.light[1][1] = SHADER_MODULE( frag_light_line_fog );
SET_OBJECT_NAME( vk.modules.frag.light[0][0], "light fragment module", VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT );
SET_OBJECT_NAME( vk.modules.frag.light[0][1], "light fog fragment module", VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT );
SET_OBJECT_NAME( vk.modules.frag.light[1][0], "linear light fragment module", VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT );
SET_OBJECT_NAME( vk.modules.frag.light[1][1], "linear light fog fragment module", VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT );
vk.modules.color_fs = SHADER_MODULE( color_frag_spv );
vk.modules.color_vs = SHADER_MODULE( color_vert_spv );
SET_OBJECT_NAME( vk.modules.color_vs, "single-color vertex module", VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT );
SET_OBJECT_NAME( vk.modules.color_fs, "single-color fragment module", VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT );
vk.modules.fog_vs = SHADER_MODULE( fog_vert_spv );
vk.modules.fog_fs = SHADER_MODULE( fog_frag_spv );
SET_OBJECT_NAME( vk.modules.fog_vs, "fog-only vertex module", VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT );
SET_OBJECT_NAME( vk.modules.fog_fs, "fog-only fragment module", VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT );
vk.modules.dot_vs = SHADER_MODULE( dot_vert_spv );
vk.modules.dot_fs = SHADER_MODULE( dot_frag_spv );
SET_OBJECT_NAME( vk.modules.dot_vs, "dot vertex module", VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT );
SET_OBJECT_NAME( vk.modules.dot_fs, "dot fragment module", VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT );
vk.modules.bloom_fs = SHADER_MODULE( bloom_frag_spv );
vk.modules.blur_fs = SHADER_MODULE( blur_frag_spv );
vk.modules.blend_fs = SHADER_MODULE( blend_frag_spv );
SET_OBJECT_NAME( vk.modules.bloom_fs, "bloom extraction fragment module", VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT );
SET_OBJECT_NAME( vk.modules.blur_fs, "gaussian blur fragment module", VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT );
SET_OBJECT_NAME( vk.modules.blend_fs, "final bloom blend fragment module", VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT );
vk.modules.gamma_fs = SHADER_MODULE( gamma_frag_spv );
vk.modules.gamma_vs = SHADER_MODULE( gamma_vert_spv );
SET_OBJECT_NAME( vk.modules.gamma_fs, "gamma post-processing fragment module", VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT );
SET_OBJECT_NAME( vk.modules.gamma_vs, "gamma post-processing vertex module", VK_DEBUG_REPORT_OBJECT_TYPE_SHADER_MODULE_EXT );
}
static void vk_alloc_persistent_pipelines( void )
{
unsigned int state_bits;
Vk_Pipeline_Def def;
// skybox
{
Com_Memset(&def, 0, sizeof(def));
def.shader_type = TYPE_SIGNLE_TEXTURE_IDENTITY;
def.face_culling = CT_FRONT_SIDED;
def.polygon_offset = qfalse;
def.mirror = qfalse;
vk.skybox_pipeline = vk_find_pipeline_ext( 0, &def, qtrue );
}
// stencil shadows
{
cullType_t cull_types[2] = { CT_FRONT_SIDED, CT_BACK_SIDED };
qboolean mirror_flags[2] = { qfalse, qtrue };
int i, j;
Com_Memset(&def, 0, sizeof(def));
def.polygon_offset = qfalse;
def.state_bits = 0;
def.shader_type = TYPE_SIGNLE_TEXTURE;
def.shadow_phase = SHADOW_EDGES;
for (i = 0; i < 2; i++) {
def.face_culling = cull_types[i];
for (j = 0; j < 2; j++) {
def.mirror = mirror_flags[j];
vk.shadow_volume_pipelines[i][j] = vk_find_pipeline_ext( 0, &def, r_shadows->integer ? qtrue: qfalse );
}
}
}
{
Com_Memset( &def, 0, sizeof( def ) );
def.face_culling = CT_FRONT_SIDED;
def.polygon_offset = qfalse;
def.state_bits = GLS_DEPTHMASK_TRUE | GLS_SRCBLEND_DST_COLOR | GLS_DSTBLEND_ZERO;
def.shader_type = TYPE_SIGNLE_TEXTURE;
def.mirror = qfalse;
def.shadow_phase = SHADOW_FS_QUAD;
def.primitives = TRIANGLE_STRIP;
vk.shadow_finish_pipeline = vk_find_pipeline_ext( 0, &def, r_shadows->integer ? qtrue: qfalse );
}
// fog and dlights
{
unsigned int fog_state_bits[2] = {
GLS_SRCBLEND_SRC_ALPHA | GLS_DSTBLEND_ONE_MINUS_SRC_ALPHA | GLS_DEPTHFUNC_EQUAL, // fogPass == FP_EQUAL
GLS_SRCBLEND_SRC_ALPHA | GLS_DSTBLEND_ONE_MINUS_SRC_ALPHA // fogPass == FP_LE
};
unsigned int dlight_state_bits[2] = {
GLS_SRCBLEND_DST_COLOR | GLS_DSTBLEND_ONE | GLS_DEPTHFUNC_EQUAL, // modulated
GLS_SRCBLEND_ONE | GLS_DSTBLEND_ONE | GLS_DEPTHFUNC_EQUAL // additive
};
qboolean polygon_offset[2] = { qfalse, qtrue };
int i, j, k, l;
Com_Memset(&def, 0, sizeof(def));
def.shader_type = TYPE_SIGNLE_TEXTURE;
def.mirror = qfalse;
for ( i = 0; i < 2; i++ ) {
unsigned fog_state = fog_state_bits[ i ];
unsigned dlight_state = dlight_state_bits[ i ];
for ( j = 0; j < 3; j++ ) {
def.face_culling = j; // cullType_t value
for ( k = 0; k < 2; k++ ) {
def.polygon_offset = polygon_offset[ k ];
#ifdef USE_FOG_ONLY
def.shader_type = TYPE_FOG_ONLY;
#else
def.shader_type = TYPE_SIGNLE_TEXTURE;
#endif
def.state_bits = fog_state;
vk.fog_pipelines[ i ][ j ][ k ] = vk_find_pipeline_ext( 0, &def, qtrue );
def.shader_type = TYPE_SIGNLE_TEXTURE;
def.state_bits = dlight_state;
#ifdef USE_LEGACY_DLIGHTS
#ifdef USE_PMLIGHT
vk.dlight_pipelines[ i ][ j ][ k ] = vk_find_pipeline_ext( 0, &def, r_dlightMode->integer == 0 ? qtrue : qfalse );
#else
vk.dlight_pipelines[ i ][ j ][ k ] = vk_find_pipeline_ext( 0, &def, qtrue );
#endif
#endif
}
}
}
#ifdef USE_PMLIGHT
def.state_bits = GLS_SRCBLEND_ONE | GLS_DSTBLEND_ONE | GLS_DEPTHFUNC_EQUAL;
//def.shader_type = TYPE_SIGNLE_TEXTURE_LIGHTING;
for (i = 0; i < 3; i++) { // cullType
def.face_culling = i;
for ( j = 0; j < 2; j++ ) { // polygonOffset
def.polygon_offset = polygon_offset[j];
for ( k = 0; k < 2; k++ ) {
def.fog_stage = k; // fogStage
for ( l = 0; l < 2; l++ ) {
def.abs_light = l;
def.shader_type = TYPE_SIGNLE_TEXTURE_LIGHTING;
vk.dlight_pipelines_x[i][j][k][l] = vk_find_pipeline_ext( 0, &def, qfalse );
def.shader_type = TYPE_SIGNLE_TEXTURE_LIGHTING_LINEAR;
vk.dlight1_pipelines_x[i][j][k][l] = vk_find_pipeline_ext( 0, &def, qfalse );
}
}
}
}
#endif // USE_PMLIGHT
}
// RT_BEAM surface
{
Com_Memset(&def, 0, sizeof(def));
def.state_bits = GLS_SRCBLEND_ONE | GLS_DSTBLEND_ONE;
def.face_culling = CT_FRONT_SIDED;
def.primitives = TRIANGLE_STRIP;
vk.surface_beam_pipeline = vk_find_pipeline_ext( 0, &def, qfalse );
}
// axis for missing models
{
Com_Memset( &def, 0, sizeof( def ) );
def.state_bits = GLS_DEFAULT;
def.shader_type = TYPE_SIGNLE_TEXTURE;
def.face_culling = CT_TWO_SIDED;
def.primitives = LINE_LIST;
if ( vk.wideLines )
def.line_width = 3;
vk.surface_axis_pipeline = vk_find_pipeline_ext( 0, &def, qfalse );
}
// flare visibility test dot
{
Com_Memset( &def, 0, sizeof( def ) );
//def.state_bits = GLS_DEFAULT;
def.face_culling = CT_TWO_SIDED;
def.shader_type = TYPE_DOT;
def.primitives = POINT_LIST;
vk.dot_pipeline = vk_find_pipeline_ext( 0, &def, qtrue );
}
// DrawTris()
state_bits = GLS_POLYMODE_LINE | GLS_DEPTHMASK_TRUE;
{
Com_Memset(&def, 0, sizeof(def));
def.state_bits = state_bits;
def.shader_type = TYPE_COLOR_WHITE;
def.face_culling = CT_FRONT_SIDED;
vk.tris_debug_pipeline = vk_find_pipeline_ext( 0, &def, qfalse );
}
{
Com_Memset(&def, 0, sizeof(def));
def.state_bits = state_bits;
def.shader_type = TYPE_COLOR_WHITE;
def.face_culling = CT_BACK_SIDED;
vk.tris_mirror_debug_pipeline = vk_find_pipeline_ext( 0, &def, qfalse );
}
{
Com_Memset(&def, 0, sizeof(def));
def.state_bits = state_bits;
def.shader_type = TYPE_COLOR_GREEN;
def.face_culling = CT_FRONT_SIDED;
vk.tris_debug_green_pipeline = vk_find_pipeline_ext( 0, &def, qfalse );
}
{
Com_Memset(&def, 0, sizeof(def));
def.state_bits = state_bits;
def.shader_type = TYPE_COLOR_GREEN;
def.face_culling = CT_BACK_SIDED;
vk.tris_mirror_debug_green_pipeline = vk_find_pipeline_ext( 0, &def, qfalse );
}
{
Com_Memset(&def, 0, sizeof(def));
def.state_bits = state_bits;
def.shader_type = TYPE_COLOR_RED;
def.face_culling = CT_FRONT_SIDED;
vk.tris_debug_red_pipeline = vk_find_pipeline_ext( 0, &def, qfalse );
}
{
Com_Memset(&def, 0, sizeof(def));
def.state_bits = state_bits;
def.shader_type = TYPE_COLOR_RED;
def.face_culling = CT_BACK_SIDED;
vk.tris_mirror_debug_red_pipeline = vk_find_pipeline_ext( 0, &def, qfalse );
}
// DrawNormals()
{
Com_Memset(&def, 0, sizeof(def));
def.state_bits = GLS_DEPTHMASK_TRUE;
def.shader_type = TYPE_SIGNLE_TEXTURE;
def.primitives = LINE_LIST;
vk.normals_debug_pipeline = vk_find_pipeline_ext( 0, &def, qfalse );
}
// RB_DebugPolygon()
{
Com_Memset(&def, 0, sizeof(def));
def.state_bits = GLS_DEPTHMASK_TRUE | GLS_SRCBLEND_ONE | GLS_DSTBLEND_ONE;
def.shader_type = TYPE_SIGNLE_TEXTURE;
vk.surface_debug_pipeline_solid = vk_find_pipeline_ext( 0, &def, qfalse );
}
{
Com_Memset(&def, 0, sizeof(def));
def.state_bits = GLS_POLYMODE_LINE | GLS_DEPTHMASK_TRUE | GLS_SRCBLEND_ONE | GLS_DSTBLEND_ONE;
def.shader_type = TYPE_SIGNLE_TEXTURE;
def.primitives = LINE_LIST;
vk.surface_debug_pipeline_outline = vk_find_pipeline_ext( 0, &def, qfalse );
}
// RB_ShowImages
{
Com_Memset(&def, 0, sizeof(def));
def.state_bits = GLS_DEPTHTEST_DISABLE | GLS_SRCBLEND_SRC_ALPHA | GLS_DSTBLEND_ONE_MINUS_SRC_ALPHA;
def.shader_type = TYPE_SIGNLE_TEXTURE;
def.primitives = TRIANGLE_STRIP;
vk.images_debug_pipeline = vk_find_pipeline_ext( 0, &def, qfalse );
}
}
void vk_create_blur_pipeline( uint32_t index, uint32_t width, uint32_t height, qboolean horizontal_pass );
void static vk_create_bloom_pipelines( void )
{
if ( vk.fboActive && r_bloom->integer )
{
uint32_t width = gls.captureWidth;
uint32_t height = gls.captureHeight;
uint32_t i;
vk_create_post_process_pipeline( 1, width, height ); // bloom extraction
for ( i = 0; i < ARRAY_LEN( vk.blur_pipeline ); i += 2 ) {
width /= 2;
height /= 2;
vk_create_blur_pipeline( i + 0, width, height, qtrue ); // horizontal
vk_create_blur_pipeline( i + 1, width, height, qfalse ); // vertical
}
vk_create_post_process_pipeline( 2, glConfig.vidWidth, glConfig.vidHeight ); // bloom blending
}
}
void vk_update_post_process_pipelines( void )
{
if ( vk.fboActive ) {
// update gamma shader
vk_create_post_process_pipeline( 0, 0, 0 );
if ( vk.capture.image ) {
// update capture pipeline
vk_create_post_process_pipeline( 3, gls.captureWidth, gls.captureHeight );
}
}
}
typedef struct vk_attach_desc_s {
VkImage descriptor;
VkImageView *image_view;
VkImageUsageFlags usage;
VkMemoryRequirements reqs;
uint32_t memoryTypeIndex;
VkDeviceSize memory_offset;
// for layout transition:
VkImageAspectFlags aspect_flags;
VkAccessFlags access_flags;
VkImageLayout image_layout;
VkFormat image_format;
} vk_attach_desc_t;
static vk_attach_desc_t attachments[ MAX_ATTACHMENTS_IN_POOL ];
static uint32_t num_attachments = 0;
static void vk_clear_attachment_pool( void )
{
num_attachments = 0;
}
static void vk_alloc_attachments( void )
{
VkImageViewCreateInfo view_desc;
VkMemoryDedicatedAllocateInfoKHR alloc_info2;
VkMemoryAllocateInfo alloc_info;
VkCommandBuffer command_buffer;
VkDeviceMemory memory;
VkDeviceSize offset;
uint32_t memoryTypeBits;
uint32_t memoryTypeIndex;
uint32_t i;
if ( num_attachments == 0 ) {
return;
}
if ( vk.image_memory_count >= ARRAY_LEN( vk.image_memory ) ) {
ri.Error( ERR_DROP, "vk.image_memory_count == %i", (int)ARRAY_LEN( vk.image_memory ) );
}
memoryTypeBits = ~0U;
offset = 0;
for ( i = 0; i < num_attachments; i++ ) {
#ifdef MIN_IMAGE_ALIGN
VkDeviceSize alignment = MAX( attachments[ i ].reqs.alignment, MIN_IMAGE_ALIGN );
#else
VkDeviceSize alignment = attachments[ i ].reqs.alignment;
#endif
memoryTypeBits &= attachments[ i ].reqs.memoryTypeBits;
offset = PAD( offset, alignment );
attachments[ i ].memory_offset = offset;
offset += attachments[ i ].reqs.size;
#ifdef _DEBUG
ri.Printf( PRINT_ALL, S_COLOR_CYAN "[%i] type %i, size %i, align %i\n", i,
attachments[ i ].reqs.memoryTypeBits,
(int)attachments[ i ].reqs.size,
(int)attachments[ i ].reqs.alignment );
#endif
}
if ( num_attachments == 1 && attachments[ 0 ].usage & VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT ) {
// try lazy memory
memoryTypeIndex = find_memory_type2( memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT | VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT, NULL );
if ( memoryTypeIndex == ~0U ) {
memoryTypeIndex = find_memory_type( vk.physical_device, memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT );
}
} else {
memoryTypeIndex = find_memory_type( vk.physical_device, memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT );
}
#ifdef _DEBUG
ri.Printf( PRINT_ALL, "memory type bits: %04x\n", memoryTypeBits );
ri.Printf( PRINT_ALL, "memory type index: %04x\n", memoryTypeIndex );
ri.Printf( PRINT_ALL, "total size: %i\n", (int)offset );
#endif
alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
alloc_info.pNext = NULL;
alloc_info.allocationSize = offset;
alloc_info.memoryTypeIndex = memoryTypeIndex;
if ( num_attachments == 1 ) {
if ( vk.dedicatedAllocation ) {
Com_Memset( &alloc_info2, 0, sizeof( alloc_info2 ) );
alloc_info2.sType = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR;
alloc_info2.image = attachments[ 0 ].descriptor;
alloc_info.pNext = &alloc_info2;
}
}
// allocate and bind memory
VK_CHECK( qvkAllocateMemory( vk.device, &alloc_info, NULL, &memory ) );
vk.image_memory[ vk.image_memory_count++ ] = memory;
for ( i = 0; i < num_attachments; i++ ) {
VK_CHECK( qvkBindImageMemory( vk.device, attachments[i].descriptor, memory, attachments[i].memory_offset ) );
// create color image view
view_desc.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
view_desc.pNext = NULL;
view_desc.flags = 0;
view_desc.image = attachments[ i ].descriptor;
view_desc.viewType = VK_IMAGE_VIEW_TYPE_2D;
view_desc.format = attachments[ i ].image_format;
view_desc.components.r = VK_COMPONENT_SWIZZLE_IDENTITY;
view_desc.components.g = VK_COMPONENT_SWIZZLE_IDENTITY;
view_desc.components.b = VK_COMPONENT_SWIZZLE_IDENTITY;
view_desc.components.a = VK_COMPONENT_SWIZZLE_IDENTITY;
view_desc.subresourceRange.aspectMask = attachments[ i ].aspect_flags;
view_desc.subresourceRange.baseMipLevel = 0;
view_desc.subresourceRange.levelCount = 1;
view_desc.subresourceRange.baseArrayLayer = 0;
view_desc.subresourceRange.layerCount = 1;
VK_CHECK( qvkCreateImageView( vk.device, &view_desc, NULL, attachments[ i ].image_view ) );
}
// perform layout transition
command_buffer = begin_command_buffer();
for ( i = 0; i < num_attachments; i++ ) {
record_image_layout_transition( command_buffer,
attachments[i].descriptor,
attachments[i].aspect_flags,
0, // src_access_flags
VK_IMAGE_LAYOUT_UNDEFINED, // old_layout
attachments[i].access_flags,
attachments[i].image_layout
);
}
end_command_buffer( command_buffer );
num_attachments = 0;
}
static void vk_add_attachment_desc( VkImage desc, VkImageView *image_view, VkImageUsageFlags usage, VkMemoryRequirements *reqs, VkFormat image_format, VkImageAspectFlags aspect_flags, VkAccessFlags access_flags, VkImageLayout image_layout )
{
if ( num_attachments >= ARRAY_LEN( attachments ) ) {
ri.Error( ERR_FATAL, "Attachments array overflow" );
} else {
attachments[ num_attachments ].descriptor = desc;
attachments[ num_attachments ].image_view = image_view;
attachments[ num_attachments ].usage = usage;
attachments[ num_attachments ].reqs = *reqs;
attachments[ num_attachments ].aspect_flags = aspect_flags;
attachments[ num_attachments ].access_flags = access_flags;
attachments[ num_attachments ].image_layout = image_layout;
attachments[ num_attachments ].image_format = image_format;
attachments[ num_attachments ].memory_offset = 0;
num_attachments++;
}
}
static void vk_get_image_memory_erquirements( VkImage image, VkMemoryRequirements *memory_requirements )
{
if ( vk.dedicatedAllocation ) {
VkMemoryRequirements2KHR memory_requirements2;
VkImageMemoryRequirementsInfo2KHR image_requirements2;
VkMemoryDedicatedRequirementsKHR mem_req2;
Com_Memset( &mem_req2, 0, sizeof( mem_req2 ) );
mem_req2.sType = VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR;
image_requirements2.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2_KHR;
image_requirements2.image = image;
image_requirements2.pNext = NULL;
Com_Memset( &memory_requirements2, 0, sizeof( memory_requirements2 ) );
memory_requirements2.sType = VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2_KHR;
memory_requirements2.pNext = &mem_req2;
qvkGetImageMemoryRequirements2KHR( vk.device, &image_requirements2, &memory_requirements2 );
*memory_requirements = memory_requirements2.memoryRequirements;
} else {
qvkGetImageMemoryRequirements( vk.device, image, memory_requirements );
}
}
static void create_color_attachment( uint32_t width, uint32_t height, VkSampleCountFlagBits samples, VkFormat format,
VkImageUsageFlags usage, VkImage *image, VkImageView *image_view, VkImageLayout image_layout, qboolean multisample )
{
VkImageCreateInfo create_desc;
VkMemoryRequirements memory_requirements;
if ( multisample && !( usage & VK_IMAGE_USAGE_SAMPLED_BIT ) )
usage |= VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT;
// create color image
create_desc.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
create_desc.pNext = NULL;
create_desc.flags = 0;
create_desc.imageType = VK_IMAGE_TYPE_2D;
create_desc.format = format;
create_desc.extent.width = width;
create_desc.extent.height = height;
create_desc.extent.depth = 1;
create_desc.mipLevels = 1;
create_desc.arrayLayers = 1;
create_desc.samples = samples;
create_desc.tiling = VK_IMAGE_TILING_OPTIMAL;
create_desc.usage = usage;
create_desc.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
create_desc.queueFamilyIndexCount = 0;
create_desc.pQueueFamilyIndices = NULL;
create_desc.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
VK_CHECK( qvkCreateImage( vk.device, &create_desc, NULL, image ) );
vk_get_image_memory_erquirements( *image, &memory_requirements );
if ( multisample )
vk_add_attachment_desc( *image, image_view, usage, &memory_requirements, format, VK_IMAGE_ASPECT_COLOR_BIT, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, image_layout );
else
vk_add_attachment_desc( *image, image_view, usage, &memory_requirements, format, VK_IMAGE_ASPECT_COLOR_BIT, VK_ACCESS_SHADER_READ_BIT, image_layout );
}
static void create_depth_attachment( uint32_t width, uint32_t height, VkSampleCountFlagBits samples, VkImage *image, VkImageView *image_view )
{
VkImageCreateInfo create_desc;
VkMemoryRequirements memory_requirements;
VkImageAspectFlags image_aspect_flags;
// create depth image
create_desc.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
create_desc.pNext = NULL;
create_desc.flags = 0;
create_desc.imageType = VK_IMAGE_TYPE_2D;
create_desc.format = vk.depth_format;
create_desc.extent.width = width;
create_desc.extent.height = height;
create_desc.extent.depth = 1;
create_desc.mipLevels = 1;
create_desc.arrayLayers = 1;
create_desc.samples = samples;
create_desc.tiling = VK_IMAGE_TILING_OPTIMAL;
create_desc.usage = VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT;
create_desc.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
create_desc.queueFamilyIndexCount = 0;
create_desc.pQueueFamilyIndices = NULL;
create_desc.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
image_aspect_flags = VK_IMAGE_ASPECT_DEPTH_BIT;
if ( r_stencilbits->integer )
image_aspect_flags |= VK_IMAGE_ASPECT_STENCIL_BIT;
VK_CHECK( qvkCreateImage( vk.device, &create_desc, NULL, image ) );
vk_get_image_memory_erquirements( *image, &memory_requirements );
vk_add_attachment_desc( *image, image_view, create_desc.usage, &memory_requirements, vk.depth_format, image_aspect_flags, VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL );
}
static void vk_create_attachments( void )
{
uint32_t i;
vk_clear_attachment_pool();
// It looks like resulting performance depends from order you're creating/allocating
// memory for attachments in vulkan i.e. similar images grouped together will provide best results
// so [resolve0][resolve1][msaa0][msaa1][depth0][depth1] is most optimal
// while cases like [resolve0][depth0][color0][...] is the worst
// TODO: preallocate first image chunk in attachment' memory pool?
if ( vk.fboActive ) {
VkImageUsageFlags usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
// bloom
if ( r_bloom->integer ) {
uint32_t width = gls.captureWidth;
uint32_t height = gls.captureHeight;
create_color_attachment( width, height, VK_SAMPLE_COUNT_1_BIT, vk.bloom_format,
usage, &vk.bloom_image[0], &vk.bloom_image_view[0], VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, qfalse );
for ( i = 1; i < ARRAY_LEN( vk.bloom_image ); i += 2 ) {
width /= 2;
height /= 2;
create_color_attachment( width, height, VK_SAMPLE_COUNT_1_BIT, vk.bloom_format,
usage, &vk.bloom_image[i+0], &vk.bloom_image_view[i+0], VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, qfalse );
create_color_attachment( width, height, VK_SAMPLE_COUNT_1_BIT, vk.bloom_format,
usage, &vk.bloom_image[i+1], &vk.bloom_image_view[i+1], VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, qfalse );
}
}
// post-processing/msaa-resolve
create_color_attachment( glConfig.vidWidth, glConfig.vidHeight, VK_SAMPLE_COUNT_1_BIT, vk.color_format,
usage, &vk.color_image, &vk.color_image_view, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, qfalse );
// screenmap
usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_SAMPLED_BIT;
if ( vk.screenMapSamples > VK_SAMPLE_COUNT_1_BIT ) {
create_color_attachment( vk.screenMapWidth, vk.screenMapHeight, vk.screenMapSamples, vk.color_format,
usage, &vk.screenMap.color_image_msaa, &vk.screenMap.color_image_view_msaa, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, qtrue );
}
create_color_attachment( vk.screenMapWidth, vk.screenMapHeight, VK_SAMPLE_COUNT_1_BIT, vk.color_format,
usage, &vk.screenMap.color_image, &vk.screenMap.color_image_view, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, qfalse );
// screenmap depth
create_depth_attachment( vk.screenMapWidth, vk.screenMapHeight, vk.screenMapSamples, &vk.screenMap.depth_image, &vk.screenMap.depth_image_view );
if ( vk.msaaActive ) {
create_color_attachment( glConfig.vidWidth, glConfig.vidHeight, vkSamples, vk.color_format,
VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, &vk.msaa_image, &vk.msaa_image_view, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, qtrue );
}
if ( r_ext_supersample->integer ) {
// capture buffer
usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
create_color_attachment( gls.captureWidth, gls.captureHeight, VK_SAMPLE_COUNT_1_BIT, vk.capture_format,
usage, &vk.capture.image, &vk.capture.image_view, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, qfalse );
}
} // if ( vk.fboActive )
//vk_alloc_attachments();
create_depth_attachment( glConfig.vidWidth, glConfig.vidHeight, vkSamples, &vk.depth_image, &vk.depth_image_view );
vk_alloc_attachments();
for ( i = 0; i < vk.image_memory_count; i++ )
{
SET_OBJECT_NAME( vk.image_memory[i], va( "framebuffer memory chunk %i", i ), VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT );
}
SET_OBJECT_NAME( vk.depth_image, "depth attachment", VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT );
SET_OBJECT_NAME( vk.depth_image_view, "depth attachment", VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT );
SET_OBJECT_NAME( vk.color_image, "color attachment", VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT );
SET_OBJECT_NAME( vk.color_image_view, "color attachment", VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT );
SET_OBJECT_NAME( vk.capture.image, "capture image", VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT );
SET_OBJECT_NAME( vk.capture.image_view, "capture image view", VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT );
for ( i = 0; i < ARRAY_LEN( vk.bloom_image ); i++ )
{
SET_OBJECT_NAME( vk.bloom_image[i], va( "bloom attachment %i", i ), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT );
SET_OBJECT_NAME( vk.bloom_image_view[i], va( "bloom attachment %i", i ), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT );
}
}
static void vk_create_framebuffers( void )
{
VkImageView attachments[3];
VkFramebufferCreateInfo desc;
uint32_t n;
desc.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO;
desc.pNext = NULL;
desc.flags = 0;
desc.pAttachments = attachments;
desc.layers = 1;
for ( n = 0; n < vk.swapchain_image_count; n++ )
{
desc.renderPass = vk.render_pass.main;
desc.attachmentCount = 2;
if ( r_fbo->integer == 0 )
{
desc.width = gls.windowWidth;
desc.height = gls.windowHeight;
attachments[0] = vk.swapchain_image_views[n];
attachments[1] = vk.depth_image_view;
VK_CHECK( qvkCreateFramebuffer( vk.device, &desc, NULL, &vk.framebuffers.main[n] ) );
SET_OBJECT_NAME( vk.framebuffers.main[n], va( "framebuffer - main %i", n ), VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT );
}
else
{
// same framebuffer configuration for main and post-bloom render passes
if ( n == 0 )
{
desc.width = glConfig.vidWidth;
desc.height = glConfig.vidHeight;
attachments[0] = vk.color_image_view;
attachments[1] = vk.depth_image_view;
if ( vk.msaaActive )
{
desc.attachmentCount = 3;
attachments[2] = vk.msaa_image_view;
}
VK_CHECK( qvkCreateFramebuffer( vk.device, &desc, NULL, &vk.framebuffers.main[n] ) );
SET_OBJECT_NAME( vk.framebuffers.main[n], "framebuffer - main", VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT );
}
else
{
vk.framebuffers.main[n] = vk.framebuffers.main[0];
}
// gamma correction
desc.renderPass = vk.render_pass.gamma;
desc.attachmentCount = 1;
desc.width = gls.windowWidth;
desc.height = gls.windowHeight;
attachments[0] = vk.swapchain_image_views[n];
VK_CHECK( qvkCreateFramebuffer( vk.device, &desc, NULL, &vk.framebuffers.gamma[n] ) );
SET_OBJECT_NAME( vk.framebuffers.gamma[n], "framebuffer - gamma-correction", VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT );
}
}
if ( vk.fboActive )
{
// screenmap
desc.renderPass = vk.render_pass.screenmap;
desc.attachmentCount = 2;
desc.width = vk.screenMapWidth;
desc.height = vk.screenMapHeight;
attachments[0] = vk.screenMap.color_image_view;
attachments[1] = vk.screenMap.depth_image_view;
if ( vk.screenMapSamples > VK_SAMPLE_COUNT_1_BIT )
{
desc.attachmentCount = 3;
attachments[2] = vk.screenMap.color_image_view_msaa;
}
VK_CHECK( qvkCreateFramebuffer( vk.device, &desc, NULL, &vk.framebuffers.screenmap ) );
SET_OBJECT_NAME( vk.framebuffers.screenmap, "framebuffer - screenmap", VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT );
if ( vk.capture.image != VK_NULL_HANDLE )
{
attachments[0] = vk.capture.image_view;
desc.renderPass = vk.render_pass.capture;
desc.pAttachments = attachments;
desc.attachmentCount = 1;
desc.width = gls.captureWidth;
desc.height = gls.captureHeight;
VK_CHECK( qvkCreateFramebuffer( vk.device, &desc, NULL, &vk.framebuffers.capture ) );
SET_OBJECT_NAME( vk.framebuffers.capture, "framebuffer - capture", VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT );
}
if ( r_bloom->integer )
{
uint32_t width = gls.captureWidth;
uint32_t height = gls.captureHeight;
// bloom color extraction
desc.renderPass = vk.render_pass.bloom_extract;
desc.width = width;
desc.height = height;
desc.attachmentCount = 1;
attachments[0] = vk.bloom_image_view[0];
VK_CHECK( qvkCreateFramebuffer( vk.device, &desc, NULL, &vk.framebuffers.bloom_extract ) );
SET_OBJECT_NAME( vk.framebuffers.bloom_extract, "framebuffer - bloom extraction", VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT );
for ( n = 0; n < ARRAY_LEN( vk.framebuffers.blur ); n += 2 )
{
width /= 2;
height /= 2;
desc.renderPass = vk.render_pass.blur[n];
desc.width = width;
desc.height = height;
desc.attachmentCount = 1;
attachments[0] = vk.bloom_image_view[n+0+1];
VK_CHECK( qvkCreateFramebuffer( vk.device, &desc, NULL, &vk.framebuffers.blur[n+0] ) );
attachments[0] = vk.bloom_image_view[n+1+1];
VK_CHECK( qvkCreateFramebuffer( vk.device, &desc, NULL, &vk.framebuffers.blur[n+1] ) );
SET_OBJECT_NAME( vk.framebuffers.blur[n+0], va( "framebuffer - blur %i", n+0 ), VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT );
SET_OBJECT_NAME( vk.framebuffers.blur[n+1], va( "framebuffer - blur %i", n+1 ), VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT );
}
}
}
}
static void vk_create_sync_primitives( void ) {
VkSemaphoreCreateInfo desc;
VkFenceCreateInfo fence_desc;
uint32_t i;
desc.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
desc.pNext = NULL;
desc.flags = 0;
// all commands submitted
for ( i = 0; i < NUM_COMMAND_BUFFERS; i++ )
{
desc.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
desc.pNext = NULL;
desc.flags = 0;
// swapchain image acquired
VK_CHECK( qvkCreateSemaphore( vk.device, &desc, NULL, &vk.tess[ i ].image_acquired ) );
VK_CHECK( qvkCreateSemaphore( vk.device, &desc, NULL, &vk.tess[i].rendering_finished ) );
fence_desc.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
fence_desc.pNext = NULL;
fence_desc.flags = VK_FENCE_CREATE_SIGNALED_BIT; // so it can be used to start rendering
VK_CHECK( qvkCreateFence( vk.device, &fence_desc, NULL, &vk.tess[i].rendering_finished_fence ) );
vk.tess[i].waitForFence = qtrue;
SET_OBJECT_NAME( vk.tess[i].image_acquired, va( "image_acquired semaphore %i", i ), VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT);
SET_OBJECT_NAME( vk.tess[i].rendering_finished, va( "rendering_finished semaphore %i", i ), VK_DEBUG_REPORT_OBJECT_TYPE_SEMAPHORE_EXT );
SET_OBJECT_NAME( vk.tess[i].rendering_finished_fence, va( "rendering_finished fence %i", i ), VK_DEBUG_REPORT_OBJECT_TYPE_FENCE_EXT );
}
}
static void vk_destroy_sync_primitives( void ) {
uint32_t i;
for ( i = 0; i < NUM_COMMAND_BUFFERS; i++ ) {
qvkDestroySemaphore( vk.device, vk.tess[i].image_acquired, NULL );
qvkDestroySemaphore( vk.device, vk.tess[i].rendering_finished, NULL );
qvkDestroyFence( vk.device, vk.tess[i].rendering_finished_fence, NULL );
vk.tess[i].waitForFence = qfalse;
}
}
static void vk_destroy_framebuffers( void ) {
uint32_t n;
for ( n = 0; n < vk.swapchain_image_count; n++ ) {
if ( vk.framebuffers.main[n] != VK_NULL_HANDLE ) {
if ( !vk.fboActive || n == 0 ) {
qvkDestroyFramebuffer( vk.device, vk.framebuffers.main[n], NULL );
}
vk.framebuffers.main[n] = VK_NULL_HANDLE;
}
if ( vk.framebuffers.gamma[n] != VK_NULL_HANDLE ) {
qvkDestroyFramebuffer( vk.device, vk.framebuffers.gamma[n], NULL );
vk.framebuffers.gamma[n] = VK_NULL_HANDLE;
}
}
if ( vk.framebuffers.bloom_extract != VK_NULL_HANDLE ) {
qvkDestroyFramebuffer( vk.device, vk.framebuffers.bloom_extract, NULL );
vk.framebuffers.bloom_extract = VK_NULL_HANDLE;
}
if ( vk.framebuffers.screenmap != VK_NULL_HANDLE ) {
qvkDestroyFramebuffer( vk.device, vk.framebuffers.screenmap, NULL );
vk.framebuffers.screenmap = VK_NULL_HANDLE;
}
if ( vk.framebuffers.capture != VK_NULL_HANDLE ) {
qvkDestroyFramebuffer( vk.device, vk.framebuffers.capture, NULL );
vk.framebuffers.capture = VK_NULL_HANDLE;
}
for ( n = 0; n < ARRAY_LEN( vk.framebuffers.blur ); n++ ) {
if ( vk.framebuffers.blur[n] != VK_NULL_HANDLE ) {
qvkDestroyFramebuffer( vk.device, vk.framebuffers.blur[n], NULL );
vk.framebuffers.blur[n] = VK_NULL_HANDLE;
}
}
}
static void vk_destroy_swapchain( void ) {
uint32_t i;
for ( i = 0; i < vk.swapchain_image_count; i++ ) {
if ( vk.swapchain_image_views[i] != VK_NULL_HANDLE ) {
qvkDestroyImageView( vk.device, vk.swapchain_image_views[i], NULL );
vk.swapchain_image_views[i] = VK_NULL_HANDLE;
}
}
qvkDestroySwapchainKHR( vk.device, vk.swapchain, NULL );
}
static void vk_destroy_attachments( void );
static void vk_destroy_render_passes( void );
static void vk_destroy_pipelines( qboolean resetCount );
static void vk_restart_swapchain( const char *funcname )
{
uint32_t i;
ri.Printf( PRINT_WARNING, "%s(): restarting swapchain...\n", funcname );
vk_wait_idle();
for ( i = 0; i < NUM_COMMAND_BUFFERS; i++ ) {
qvkResetCommandBuffer( vk.tess[i].command_buffer, 0 );
}
vk_destroy_pipelines( qfalse );
vk_destroy_framebuffers();
vk_destroy_render_passes();
vk_destroy_attachments();
vk_destroy_swapchain();
vk_destroy_sync_primitives();
vk_select_surface_format( vk.physical_device, vk.surface );
setup_surface_formats( vk.physical_device );
vk_create_sync_primitives();
vk_create_swapchain( vk.physical_device, vk.device, vk.surface, vk.present_format, &vk.swapchain );
vk_create_attachments();
vk_create_render_passes();
vk_create_framebuffers();
vk_create_bloom_pipelines();
vk_update_attachment_descriptors();
vk_update_post_process_pipelines();
}
static void vk_set_render_scale( void )
{
if ( gls.windowWidth != glConfig.vidWidth || gls.windowHeight != glConfig.vidHeight )
{
if ( r_renderScale->integer > 0 )
{
int scaleMode = r_renderScale->integer - 1;
if ( scaleMode & 1 )
{
// preserve aspect ratio (black bars on sides)
float windowAspect = (float) gls.windowWidth / (float) gls.windowHeight;
float renderAspect = (float) glConfig.vidWidth / (float) glConfig.vidHeight;
if ( windowAspect >= renderAspect )
{
float scale = (float)gls.windowHeight / ( float ) glConfig.vidHeight;
int bias = ( gls.windowWidth - scale * (float) glConfig.vidWidth ) / 2;
vk.blitX0 += bias;
}
else
{
float scale = (float)gls.windowWidth / ( float ) glConfig.vidWidth;
int bias = ( gls.windowHeight - scale * (float) glConfig.vidHeight ) / 2;
vk.blitY0 += bias;
}
}
// linear filtering
if ( scaleMode & 2 )
vk.blitFilter = GL_LINEAR;
else
vk.blitFilter = GL_NEAREST;
}
vk.windowAdjusted = qtrue;
}
if ( r_fbo->integer && r_ext_supersample->integer && !r_renderScale->integer )
{
vk.blitFilter = GL_LINEAR;
}
}
void vk_initialize( void )
{
char buf[64], driver_version[64];
const char *vendor_name;
VkPhysicalDeviceProperties props;
uint32_t major;
uint32_t minor;
uint32_t patch;
uint32_t maxSize;
uint32_t i;
init_vulkan_library();
qvkGetDeviceQueue( vk.device, vk.queue_family_index, 0, &vk.queue );
qvkGetPhysicalDeviceProperties( vk.physical_device, &props );
vk.cmd = vk.tess + 0;
vk.uniform_alignment = props.limits.minUniformBufferOffsetAlignment;
vk.uniform_item_size = PAD( sizeof( vkUniform_t ), vk.uniform_alignment );
// for flare visibility tests
vk.storage_alignment = MAX( props.limits.minStorageBufferOffsetAlignment, sizeof( uint32_t ) );
vk.maxAnisotropy = props.limits.maxSamplerAnisotropy;
vk.blitFilter = GL_NEAREST;
vk.windowAdjusted = qfalse;
vk.blitX0 = vk.blitY0 = 0;
vk_set_render_scale();
if ( r_fbo->integer )
{
vk.fboActive = qtrue;
if ( r_ext_multisample->integer )
{
vk.msaaActive = qtrue;
}
}
// multisampling
vkMaxSamples = MIN( props.limits.sampledImageColorSampleCounts, props.limits.sampledImageDepthSampleCounts );
if ( /*vk.fboActive &&*/ vk.msaaActive ) {
VkSampleCountFlags mask = vkMaxSamples;
vkSamples = MAX( log2pad( r_ext_multisample->integer, 1 ), VK_SAMPLE_COUNT_2_BIT );
while ( vkSamples > mask )
vkSamples >>= 1;
ri.Printf( PRINT_ALL, "...using %ix MSAA\n", vkSamples );
} else {
vkSamples = VK_SAMPLE_COUNT_1_BIT;
}
vk.screenMapSamples = MIN( vkMaxSamples, VK_SAMPLE_COUNT_4_BIT );
vk.screenMapWidth = (float) glConfig.vidWidth / 16.0;
if ( vk.screenMapWidth < 4 )
vk.screenMapWidth = 4;
vk.screenMapHeight = (float) glConfig.vidHeight / 16.0;
if ( vk.screenMapHeight < 4 )
vk.screenMapHeight = 4;
// fill glConfig information
// maxTextureSize must not exceed IMAGE_CHUNK_SIZE
maxSize = sqrtf( IMAGE_CHUNK_SIZE / 4 );
// round down to next power of 2
glConfig.maxTextureSize = MIN( props.limits.maxImageDimension2D, log2pad( maxSize, 0 ) );
if ( glConfig.maxTextureSize > MAX_TEXTURE_SIZE )
glConfig.maxTextureSize = MAX_TEXTURE_SIZE; // ResampleTexture() relies on that maximum
// default chunk size, may be doubled on demand
vk.image_chunk_size = IMAGE_CHUNK_SIZE;
vk.maxLod = 1 + Q_log2( glConfig.maxTextureSize );
if ( props.limits.maxPerStageDescriptorSamplers != 0xFFFFFFFF )
glConfig.numTextureUnits = props.limits.maxPerStageDescriptorSamplers;
else
glConfig.numTextureUnits = props.limits.maxBoundDescriptorSets;
if ( glConfig.numTextureUnits > MAX_TEXTURE_UNITS )
glConfig.numTextureUnits = MAX_TEXTURE_UNITS;
vk.maxBoundDescriptorSets = props.limits.maxBoundDescriptorSets;
glConfig.textureEnvAddAvailable = qtrue;
glConfig.textureCompression = TC_NONE;
major = VK_VERSION_MAJOR(props.apiVersion);
minor = VK_VERSION_MINOR(props.apiVersion);
patch = VK_VERSION_PATCH(props.apiVersion);
// decode driver version
switch ( props.vendorID ) {
case 0x10DE: // NVidia
Com_sprintf( driver_version, sizeof( driver_version ), "%i.%i.%i.%i",
(props.driverVersion >> 22) & 0x3FF,
(props.driverVersion >> 14) & 0x0FF,
(props.driverVersion >> 6) & 0x0FF,
(props.driverVersion >> 0) & 0x03F );
break;
#ifdef _WIN32
case 0x8086: // Intel
Com_sprintf( driver_version, sizeof( driver_version ), "%i.%i",
(props.driverVersion >> 14),
(props.driverVersion >> 0) & 0x3FFF );
break;
#endif
default:
Com_sprintf( driver_version, sizeof( driver_version ), "%i.%i.%i",
(props.driverVersion >> 22),
(props.driverVersion >> 12) & 0x3FF,
(props.driverVersion >> 0) & 0xFFF );
}
Com_sprintf( glConfig.version_string, sizeof( glConfig.version_string ), "API: %i.%i.%i, Driver: %s",
major, minor, patch, driver_version );
vk.offscreenRender = qtrue;
if ( props.vendorID == 0x1002 ) {
vendor_name = "Advanced Micro Devices, Inc.";
} else if ( props.vendorID == 0x106B ) {
vendor_name = "Apple Inc.";
} else if ( props.vendorID == 0x10DE ) {
// https://github.com/SaschaWillems/Vulkan/issues/493
// we can't render to offscreen presentation surfaces on nvidia
vk.offscreenRender = qfalse;
vendor_name = "NVIDIA";
} else if ( props.vendorID == 0x14E4 ) {
vendor_name = "Broadcom Inc.";
} else if ( props.vendorID == 0x1AE0 ) {
vendor_name = "Google Inc.";
} else if ( props.vendorID == 0x8086 ) {
vendor_name = "Intel Corporation";
} else if ( props.vendorID == VK_VENDOR_ID_MESA ) {
vendor_name = "MESA";
} else {
Com_sprintf( buf, sizeof( buf ), "VendorID: %04x", props.vendorID );
vendor_name = buf;
}
Q_strncpyz( glConfig.vendor_string, vendor_name, sizeof( glConfig.vendor_string ) );
Q_strncpyz( glConfig.renderer_string, renderer_name( &props ), sizeof( glConfig.renderer_string ) );
SET_OBJECT_NAME( (intptr_t)vk.device, glConfig.renderer_string, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_EXT );
//
// Sync primitives.
//
vk_create_sync_primitives();
//
// Command pool.
//
{
VkCommandPoolCreateInfo desc;
desc.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
desc.pNext = NULL;
desc.flags = VK_COMMAND_POOL_CREATE_TRANSIENT_BIT | VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
desc.queueFamilyIndex = vk.queue_family_index;
VK_CHECK(qvkCreateCommandPool(vk.device, &desc, NULL, &vk.command_pool));
SET_OBJECT_NAME( vk.command_pool, "command pool", VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_POOL_EXT );
}
//
// Command buffers and color attachments.
//
for ( i = 0; i < NUM_COMMAND_BUFFERS; i++ )
{
VkCommandBufferAllocateInfo alloc_info;
alloc_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
alloc_info.pNext = NULL;
alloc_info.commandPool = vk.command_pool;
alloc_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
alloc_info.commandBufferCount = 1;
VK_CHECK( qvkAllocateCommandBuffers( vk.device, &alloc_info, &vk.tess[i].command_buffer ) );
//SET_OBJECT_NAME( vk.tess[i].command_buffer, va( "command buffer %i", i ), VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT );
}
//
// Descriptor pool.
//
{
VkDescriptorPoolSize pool_size[3];
VkDescriptorPoolCreateInfo desc;
uint32_t i, maxSets;
pool_size[0].type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
pool_size[0].descriptorCount = MAX_DRAWIMAGES + 1 + 1 + 1 + VK_NUM_BLOOM_PASSES * 2; // color, screenmap, bloom descriptors
pool_size[1].type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC;
pool_size[1].descriptorCount = NUM_COMMAND_BUFFERS;
//pool_size[2].type = VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT;
//pool_size[2].descriptorCount = NUM_COMMAND_BUFFERS;
pool_size[2].type = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC;
pool_size[2].descriptorCount = 1;
for ( i = 0, maxSets = 0; i < ARRAY_LEN( pool_size ); i++ ) {
maxSets += pool_size[i].descriptorCount;
}
desc.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
desc.pNext = NULL;
desc.flags = 0;
desc.maxSets = maxSets;
desc.poolSizeCount = ARRAY_LEN( pool_size );
desc.pPoolSizes = pool_size;
VK_CHECK( qvkCreateDescriptorPool( vk.device, &desc, NULL, &vk.descriptor_pool ) );
}
//
// Descriptor set layout.
//
vk_create_layout_binding( 0, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, VK_SHADER_STAGE_FRAGMENT_BIT, &vk.set_layout_sampler );
vk_create_layout_binding( 0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC, VK_SHADER_STAGE_FRAGMENT_BIT | VK_SHADER_STAGE_VERTEX_BIT, &vk.set_layout_uniform );
vk_create_layout_binding( 0, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC, VK_SHADER_STAGE_FRAGMENT_BIT, &vk.set_layout_storage );
//vk_create_layout_binding( 0, VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT, VK_SHADER_STAGE_FRAGMENT_BIT, &vk.set_layout_input );
//
// Pipeline layouts.
//
{
VkDescriptorSetLayout set_layouts[6];
VkPipelineLayoutCreateInfo desc;
VkPushConstantRange push_range;
push_range.stageFlags = VK_SHADER_STAGE_VERTEX_BIT;
push_range.offset = 0;
push_range.size = 64; // 16 floats
// standard pipelines
set_layouts[0] = vk.set_layout_storage; // storage for testing flare visibility
set_layouts[1] = vk.set_layout_uniform; // fog/dlight parameters
set_layouts[2] = vk.set_layout_sampler; // diffuse
set_layouts[3] = vk.set_layout_sampler; // lightmap / fog-only
set_layouts[4] = vk.set_layout_sampler; // blend
set_layouts[5] = vk.set_layout_sampler; // collapsed fog texture
desc.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
desc.pNext = NULL;
desc.flags = 0;
desc.setLayoutCount = (vk.maxBoundDescriptorSets >= 6) ? 6 : 4;
desc.pSetLayouts = set_layouts;
desc.pushConstantRangeCount = 1;
desc.pPushConstantRanges = &push_range;
VK_CHECK(qvkCreatePipelineLayout(vk.device, &desc, NULL, &vk.pipeline_layout));
// flare test pipeline
#if 0
set_layouts[0] = vk.set_layout_storage; // dynamic storage buffer
desc.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
desc.pNext = NULL;
desc.flags = 0;
desc.setLayoutCount = 1;
desc.pSetLayouts = set_layouts;
desc.pushConstantRangeCount = 1;
desc.pPushConstantRanges = &push_range;
VK_CHECK(qvkCreatePipelineLayout(vk.device, &desc, NULL, &vk.pipeline_layout_storage));
#endif
// post-processing pipeline
set_layouts[0] = vk.set_layout_sampler; // sampler
set_layouts[1] = vk.set_layout_sampler; // sampler
set_layouts[2] = vk.set_layout_sampler; // sampler
set_layouts[3] = vk.set_layout_sampler; // sampler
desc.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO;
desc.pNext = NULL;
desc.flags = 0;
desc.setLayoutCount = 1;
desc.pSetLayouts = set_layouts;
desc.pushConstantRangeCount = 0;
desc.pPushConstantRanges = NULL;
VK_CHECK( qvkCreatePipelineLayout( vk.device, &desc, NULL, &vk.pipeline_layout_post_process ) );
desc.setLayoutCount = VK_NUM_BLOOM_PASSES;
VK_CHECK( qvkCreatePipelineLayout( vk.device, &desc, NULL, &vk.pipeline_layout_blend ) );
SET_OBJECT_NAME( vk.pipeline_layout, "pipeline layout - main", VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT );
SET_OBJECT_NAME( vk.pipeline_layout_post_process, "pipeline layout - post-processing", VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT );
SET_OBJECT_NAME( vk.pipeline_layout_blend, "pipeline layout - blend", VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_LAYOUT_EXT );
}
vk.geometry_buffer_size_new = VERTEX_BUFFER_SIZE;
vk_create_geometry_buffers( vk.geometry_buffer_size_new );
vk.geometry_buffer_size_new = 0;
vk_create_storage_buffer( MAX_FLARES * vk.storage_alignment );
vk_create_shader_modules();
{
VkPipelineCacheCreateInfo ci;
Com_Memset( &ci, 0, sizeof( ci ) );
ci.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
VK_CHECK( qvkCreatePipelineCache( vk.device, &ci, NULL, &vk.pipelineCache ) );
}
vk.renderPassIndex = RENDER_PASS_MAIN; // default render pass
// swapchain
vk.initSwapchainLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
//vk.initSwapchainLayout = VK_IMAGE_LAYOUT_UNDEFINED;
vk_create_swapchain( vk.physical_device, vk.device, vk.surface, vk.present_format, &vk.swapchain );
// color/depth attachments
vk_create_attachments();
// renderpasses
vk_create_render_passes();
// framebuffers for each swapchain image
vk_create_framebuffers();
vk.active = qtrue;
}
void vk_create_pipelines( void )
{
vk_alloc_persistent_pipelines();
vk.pipelines_world_base = vk.pipelines_count;
vk_create_bloom_pipelines();
}
static void vk_destroy_attachments( void )
{
uint32_t i;
if ( vk.bloom_image[0] ) {
for ( i = 0; i < ARRAY_LEN( vk.bloom_image ); i++ ) {
qvkDestroyImage( vk.device, vk.bloom_image[i], NULL );
qvkDestroyImageView( vk.device, vk.bloom_image_view[i], NULL );
vk.bloom_image[i] = VK_NULL_HANDLE;
vk.bloom_image_view[i] = VK_NULL_HANDLE;
}
}
if ( vk.color_image ) {
qvkDestroyImage( vk.device, vk.color_image, NULL );
qvkDestroyImageView( vk.device, vk.color_image_view, NULL );
vk.color_image = VK_NULL_HANDLE;
vk.color_image_view = VK_NULL_HANDLE;
}
if ( vk.msaa_image ) {
qvkDestroyImage( vk.device, vk.msaa_image, NULL );
qvkDestroyImageView( vk.device, vk.msaa_image_view, NULL );
vk.msaa_image = VK_NULL_HANDLE;
vk.msaa_image_view = VK_NULL_HANDLE;
}
qvkDestroyImage( vk.device, vk.depth_image, NULL );
qvkDestroyImageView( vk.device, vk.depth_image_view, NULL );
vk.depth_image = VK_NULL_HANDLE;
vk.depth_image_view = VK_NULL_HANDLE;
if ( vk.screenMap.color_image ) {
qvkDestroyImage( vk.device, vk.screenMap.color_image, NULL );
qvkDestroyImageView( vk.device, vk.screenMap.color_image_view, NULL );
vk.screenMap.color_image = VK_NULL_HANDLE;
vk.screenMap.color_image_view = VK_NULL_HANDLE;
}
if ( vk.screenMap.color_image_msaa ) {
qvkDestroyImage( vk.device, vk.screenMap.color_image_msaa, NULL );
qvkDestroyImageView( vk.device, vk.screenMap.color_image_view_msaa, NULL );
vk.screenMap.color_image_msaa = VK_NULL_HANDLE;
vk.screenMap.color_image_view_msaa = VK_NULL_HANDLE;
}
if ( vk.screenMap.depth_image ) {
qvkDestroyImage( vk.device, vk.screenMap.depth_image, NULL );
qvkDestroyImageView( vk.device, vk.screenMap.depth_image_view, NULL );
vk.screenMap.depth_image = VK_NULL_HANDLE;
vk.screenMap.depth_image_view = VK_NULL_HANDLE;
}
if ( vk.capture.image ) {
qvkDestroyImage( vk.device, vk.capture.image, NULL );
qvkDestroyImageView( vk.device, vk.capture.image_view, NULL );
vk.capture.image = VK_NULL_HANDLE;
vk.capture.image_view = VK_NULL_HANDLE;
}
for ( i = 0; i < vk.image_memory_count; i++ ) {
qvkFreeMemory( vk.device, vk.image_memory[i], NULL );
}
vk.image_memory_count = 0;
}
static void vk_destroy_render_passes( void )
{
uint32_t i;
if ( vk.render_pass.main != VK_NULL_HANDLE ) {
qvkDestroyRenderPass( vk.device, vk.render_pass.main, NULL );
vk.render_pass.main = VK_NULL_HANDLE;
}
if ( vk.render_pass.bloom_extract != VK_NULL_HANDLE ) {
qvkDestroyRenderPass( vk.device, vk.render_pass.bloom_extract, NULL );
vk.render_pass.bloom_extract = VK_NULL_HANDLE;
}
for ( i = 0; i < ARRAY_LEN( vk.render_pass.blur ); i++ ) {
if ( vk.render_pass.blur[i] != VK_NULL_HANDLE ) {
qvkDestroyRenderPass( vk.device, vk.render_pass.blur[i], NULL );
vk.render_pass.blur[i] = VK_NULL_HANDLE;
}
}
if ( vk.render_pass.post_bloom != VK_NULL_HANDLE ) {
qvkDestroyRenderPass( vk.device, vk.render_pass.post_bloom, NULL );
vk.render_pass.post_bloom = VK_NULL_HANDLE;
}
if ( vk.render_pass.screenmap != VK_NULL_HANDLE ) {
qvkDestroyRenderPass( vk.device, vk.render_pass.screenmap, NULL );
vk.render_pass.screenmap = VK_NULL_HANDLE;
}
if ( vk.render_pass.gamma != VK_NULL_HANDLE ) {
qvkDestroyRenderPass( vk.device, vk.render_pass.gamma, NULL );
vk.render_pass.gamma = VK_NULL_HANDLE;
}
if ( vk.render_pass.capture != VK_NULL_HANDLE ) {
qvkDestroyRenderPass( vk.device, vk.render_pass.capture, NULL );
vk.render_pass.capture = VK_NULL_HANDLE;
}
}
static void vk_destroy_pipelines( qboolean reset )
{
uint32_t i, j;
for ( i = 0; i < vk.pipelines_count; i++ ) {
for ( j = 0; j < RENDER_PASS_COUNT; j++ ) {
if ( vk.pipelines[i].handle[j] != VK_NULL_HANDLE ) {
qvkDestroyPipeline( vk.device, vk.pipelines[i].handle[j], NULL );
vk.pipelines[i].handle[j] = VK_NULL_HANDLE;
vk.pipeline_create_count--;
}
}
}
if ( reset ) {
Com_Memset( &vk.pipelines, 0, sizeof( vk.pipelines ) );
vk.pipelines_count = 0;
}
if ( vk.gamma_pipeline ) {
qvkDestroyPipeline( vk.device, vk.gamma_pipeline, NULL );
vk.gamma_pipeline = VK_NULL_HANDLE;
}
if ( vk.capture_pipeline ) {
qvkDestroyPipeline( vk.device, vk.capture_pipeline, NULL );
vk.capture_pipeline = VK_NULL_HANDLE;
}
if ( vk.bloom_extract_pipeline != VK_NULL_HANDLE ) {
qvkDestroyPipeline( vk.device, vk.bloom_extract_pipeline, NULL );
vk.bloom_extract_pipeline = VK_NULL_HANDLE;
}
if ( vk.bloom_blend_pipeline != VK_NULL_HANDLE ) {
qvkDestroyPipeline( vk.device, vk.bloom_blend_pipeline, NULL );
vk.bloom_blend_pipeline = VK_NULL_HANDLE;
}
for ( i = 0; i < ARRAY_LEN( vk.blur_pipeline ); i++ ) {
if ( vk.blur_pipeline[i] != VK_NULL_HANDLE ) {
qvkDestroyPipeline( vk.device, vk.blur_pipeline[i], NULL );
vk.blur_pipeline[i] = VK_NULL_HANDLE;
}
}
}
void vk_shutdown( void )
{
int i, j, k, l;
if ( !qvkQueuePresentKHR ) {// not fully initialized
goto __cleanup;
}
vk_destroy_framebuffers();
vk_destroy_pipelines( qtrue ); // reset counter
vk_destroy_render_passes();
vk_destroy_attachments();
vk_destroy_swapchain();
if ( vk.pipelineCache != VK_NULL_HANDLE ) {
qvkDestroyPipelineCache( vk.device, vk.pipelineCache, NULL );
vk.pipelineCache = VK_NULL_HANDLE;
}
qvkDestroyCommandPool( vk.device, vk.command_pool, NULL );
qvkDestroyDescriptorPool(vk.device, vk.descriptor_pool, NULL);
qvkDestroyDescriptorSetLayout(vk.device, vk.set_layout_sampler, NULL);
qvkDestroyDescriptorSetLayout(vk.device, vk.set_layout_uniform, NULL);
qvkDestroyDescriptorSetLayout(vk.device, vk.set_layout_storage, NULL);
qvkDestroyPipelineLayout(vk.device, vk.pipeline_layout, NULL);
//qvkDestroyPipelineLayout(vk.device, vk.pipeline_layout_storage, NULL);
qvkDestroyPipelineLayout(vk.device, vk.pipeline_layout_post_process, NULL);
qvkDestroyPipelineLayout(vk.device, vk.pipeline_layout_blend, NULL);
#ifdef USE_VBO
vk_release_vbo();
#endif
vk_release_geometry_buffers();
vk_destroy_sync_primitives();
qvkDestroyBuffer( vk.device, vk.storage.buffer, NULL );
qvkFreeMemory( vk.device, vk.storage.memory, NULL );
for ( i = 0; i < 3; i++ ) {
for ( j = 0; j < 2; j++ ) {
for ( k = 0; k < 2; k++ ) {
for ( l = 0; l < 2; l++ ) {
if ( vk.modules.vert.gen[i][j][k][l] != VK_NULL_HANDLE ) {
qvkDestroyShaderModule( vk.device, vk.modules.vert.gen[i][j][k][l], NULL );
vk.modules.vert.gen[i][j][k][l] = VK_NULL_HANDLE;
}
}
}
}
}
for ( i = 0; i < 3; i++ ) {
for ( j = 0; j < 2; j++ ) {
for ( k = 0; k < 2; k++ ) {
if ( vk.modules.frag.gen[i][j][k] != VK_NULL_HANDLE ) {
qvkDestroyShaderModule( vk.device, vk.modules.frag.gen[i][j][k], NULL );
vk.modules.frag.gen[i][j][k] = VK_NULL_HANDLE;
}
}
}
}
for ( i = 0; i < 2; i++ ) {
if ( vk.modules.vert.light[i] != VK_NULL_HANDLE ) {
qvkDestroyShaderModule( vk.device, vk.modules.vert.light[i], NULL );
vk.modules.vert.light[i] = VK_NULL_HANDLE;
}
for ( j = 0; j < 2; j++ ) {
if ( vk.modules.frag.light[i][j] != VK_NULL_HANDLE ) {
qvkDestroyShaderModule( vk.device, vk.modules.frag.light[i][j], NULL );
vk.modules.frag.light[i][j] = VK_NULL_HANDLE;
}
}
}
qvkDestroyShaderModule( vk.device, vk.modules.vert.gen0_ident, NULL );
qvkDestroyShaderModule( vk.device, vk.modules.frag.gen0_ident, NULL );
qvkDestroyShaderModule( vk.device, vk.modules.frag.gen0_df, NULL );
qvkDestroyShaderModule( vk.device, vk.modules.color_fs, NULL );
qvkDestroyShaderModule( vk.device, vk.modules.color_vs, NULL );
qvkDestroyShaderModule(vk.device, vk.modules.fog_vs, NULL);
qvkDestroyShaderModule(vk.device, vk.modules.fog_fs, NULL);
qvkDestroyShaderModule(vk.device, vk.modules.dot_vs, NULL);
qvkDestroyShaderModule(vk.device, vk.modules.dot_fs, NULL);
qvkDestroyShaderModule(vk.device, vk.modules.bloom_fs, NULL);
qvkDestroyShaderModule(vk.device, vk.modules.blur_fs, NULL);
qvkDestroyShaderModule(vk.device, vk.modules.blend_fs, NULL);
qvkDestroyShaderModule(vk.device, vk.modules.gamma_vs, NULL);
qvkDestroyShaderModule(vk.device, vk.modules.gamma_fs, NULL);
__cleanup:
if ( vk.device != VK_NULL_HANDLE )
qvkDestroyDevice( vk.device, NULL );
if ( vk.surface != VK_NULL_HANDLE )
qvkDestroySurfaceKHR( vk.instance, vk.surface, NULL );
#ifdef USE_VK_VALIDATION
if ( qvkDestroyDebugReportCallbackEXT && vk.debug_callback )
qvkDestroyDebugReportCallbackEXT( vk.instance, vk.debug_callback, NULL );
#endif
if ( vk.instance != VK_NULL_HANDLE )
qvkDestroyInstance( vk.instance, NULL );
Com_Memset( &vk, 0, sizeof( vk ) );
Com_Memset( &vk_world, 0, sizeof( vk_world ) );
deinit_vulkan_library();
}
void vk_wait_idle( void )
{
VK_CHECK( qvkDeviceWaitIdle( vk.device ) );
}
void vk_release_resources( void ) {
int i, j;
vk_wait_idle();
for (i = 0; i < vk_world.num_image_chunks; i++)
qvkFreeMemory(vk.device, vk_world.image_chunks[i].memory, NULL);
if (vk_world.staging_buffer != VK_NULL_HANDLE)
qvkDestroyBuffer(vk.device, vk_world.staging_buffer, NULL);
if (vk_world.staging_buffer_memory != VK_NULL_HANDLE)
qvkFreeMemory(vk.device, vk_world.staging_buffer_memory, NULL);
for (i = 0; i < vk_world.num_samplers; i++)
qvkDestroySampler(vk.device, vk_world.samplers[i], NULL);
for ( i = vk.pipelines_world_base; i < vk.pipelines_count; i++ ) {
for ( j = 0; j < RENDER_PASS_COUNT; j++ ) {
if ( vk.pipelines[i].handle[j] != VK_NULL_HANDLE ) {
qvkDestroyPipeline( vk.device, vk.pipelines[i].handle[j], NULL );
vk.pipelines[i].handle[j] = VK_NULL_HANDLE;
vk.pipeline_create_count--;
}
}
Com_Memset( &vk.pipelines[i], 0, sizeof( vk.pipelines[0] ) );
}
vk.pipelines_count = vk.pipelines_world_base;
VK_CHECK( qvkResetDescriptorPool( vk.device, vk.descriptor_pool, 0 ) );
if ( vk_world.num_image_chunks > 1 ) {
// if we allocated more than 2 image chunks - use doubled default size
vk.image_chunk_size = (IMAGE_CHUNK_SIZE * 2);
} else if ( vk_world.num_image_chunks == 1 ) {
// otherwise set to default if used less than a half
if ( vk_world.image_chunks[0].used < ( IMAGE_CHUNK_SIZE - (IMAGE_CHUNK_SIZE / 10) ) ) {
vk.image_chunk_size = IMAGE_CHUNK_SIZE;
}
}
Com_Memset( &vk_world, 0, sizeof( vk_world ) );
// Reset geometry buffers offsets
for ( i = 0; i < NUM_COMMAND_BUFFERS; i++ ) {
vk.tess[i].uniform_read_offset = 0;
vk.tess[i].vertex_buffer_offset = 0;
}
Com_Memset( vk.cmd->buf_offset, 0, sizeof( vk.cmd->buf_offset ) );
Com_Memset( vk.cmd->vbo_offset, 0, sizeof( vk.cmd->vbo_offset ) );
Com_Memset( &vk.stats, 0, sizeof( vk.stats ) );
}
static void record_buffer_memory_barrier(VkCommandBuffer cb, VkBuffer buffer, VkDeviceSize size,
VkPipelineStageFlags src_stages, VkPipelineStageFlags dst_stages,
VkAccessFlags src_access, VkAccessFlags dst_access) {
VkBufferMemoryBarrier barrier;
barrier.sType = VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER;
barrier.pNext = NULL;
barrier.srcAccessMask = src_access;
barrier.dstAccessMask = dst_access;
barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
barrier.buffer = buffer;
barrier.offset = 0;
barrier.size = size;
qvkCmdPipelineBarrier( cb, src_stages, dst_stages, 0, 0, NULL, 1, &barrier, 0, NULL );
}
void vk_create_image( image_t *image, int width, int height, int mip_levels ) {
VkFormat format = image->internalFormat;
if ( image->handle ) {
qvkDestroyImage( vk.device, image->handle, NULL );
}
if ( image->view ) {
qvkDestroyImageView( vk.device, image->view, NULL );
}
// create image
{
VkImageCreateInfo desc;
desc.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
desc.pNext = NULL;
desc.flags = 0;
desc.imageType = VK_IMAGE_TYPE_2D;
desc.format = format;
desc.extent.width = width;
desc.extent.height = height;
desc.extent.depth = 1;
desc.mipLevels = mip_levels;
desc.arrayLayers = 1;
desc.samples = VK_SAMPLE_COUNT_1_BIT;
desc.tiling = VK_IMAGE_TILING_OPTIMAL;
desc.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT;
desc.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
desc.queueFamilyIndexCount = 0;
desc.pQueueFamilyIndices = NULL;
desc.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
VK_CHECK( qvkCreateImage( vk.device, &desc, NULL, &image->handle ) );
allocate_and_bind_image_memory( image->handle );
}
// create image view
{
VkImageViewCreateInfo desc;
desc.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
desc.pNext = NULL;
desc.flags = 0;
desc.image = image->handle;
desc.viewType = VK_IMAGE_VIEW_TYPE_2D;
desc.format = format;
desc.components.r = VK_COMPONENT_SWIZZLE_IDENTITY;
desc.components.g = VK_COMPONENT_SWIZZLE_IDENTITY;
desc.components.b = VK_COMPONENT_SWIZZLE_IDENTITY;
desc.components.a = VK_COMPONENT_SWIZZLE_IDENTITY;
desc.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
desc.subresourceRange.baseMipLevel = 0;
desc.subresourceRange.levelCount = VK_REMAINING_MIP_LEVELS;
desc.subresourceRange.baseArrayLayer = 0;
desc.subresourceRange.layerCount = 1;
VK_CHECK( qvkCreateImageView( vk.device, &desc, NULL, &image->view ) );
}
// create associated descriptor set
if ( image->descriptor == VK_NULL_HANDLE )
{
VkDescriptorSetAllocateInfo desc;
desc.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
desc.pNext = NULL;
desc.descriptorPool = vk.descriptor_pool;
desc.descriptorSetCount = 1;
desc.pSetLayouts = &vk.set_layout_sampler;
VK_CHECK( qvkAllocateDescriptorSets( vk.device, &desc, &image->descriptor ) );
}
vk_update_descriptor_set( image, mip_levels > 1 ? qtrue : qfalse );
SET_OBJECT_NAME( image->handle, image->imgName, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT );
SET_OBJECT_NAME( image->view, image->imgName, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_VIEW_EXT );
SET_OBJECT_NAME( image->descriptor, image->imgName, VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT );
}
static byte *resample_image_data( const int target_format, byte *data, const int data_size, int *bytes_per_pixel )
{
byte* buffer;
uint16_t* p;
int i, n;
switch ( target_format ) {
case VK_FORMAT_B4G4R4A4_UNORM_PACK16:
buffer = (byte*)ri.Hunk_AllocateTempMemory( data_size / 2 );
p = (uint16_t*)buffer;
for ( i = 0; i < data_size; i += 4, p++ ) {
byte r = data[i + 0];
byte g = data[i + 1];
byte b = data[i + 2];
byte a = data[i + 3];
*p = (uint32_t)((a / 255.0) * 15.0 + 0.5) |
((uint32_t)((r / 255.0) * 15.0 + 0.5) << 4) |
((uint32_t)((g / 255.0) * 15.0 + 0.5) << 8) |
((uint32_t)((b / 255.0) * 15.0 + 0.5) << 12);
}
*bytes_per_pixel = 2;
return buffer; // must be freed after upload!
case VK_FORMAT_A1R5G5B5_UNORM_PACK16:
buffer = (byte*)ri.Hunk_AllocateTempMemory( data_size / 2 );
p = (uint16_t*)buffer;
for ( i = 0; i < data_size; i += 4, p++ ) {
byte r = data[i + 0];
byte g = data[i + 1];
byte b = data[i + 2];
*p = (uint32_t)((b / 255.0) * 31.0 + 0.5) |
((uint32_t)((g / 255.0) * 31.0 + 0.5) << 5) |
((uint32_t)((r / 255.0) * 31.0 + 0.5) << 10) |
(1 << 15);
}
*bytes_per_pixel = 2;
return buffer; // must be freed after upload!
case VK_FORMAT_B8G8R8A8_UNORM:
buffer = (byte*)ri.Hunk_AllocateTempMemory( data_size );
for ( i = 0; i < data_size; i += 4 ) {
buffer[i + 0] = data[i + 2];
buffer[i + 1] = data[i + 1];
buffer[i + 2] = data[i + 0];
buffer[i + 3] = data[i + 3];
}
*bytes_per_pixel = 4;
return buffer;
case VK_FORMAT_R8G8B8_UNORM: {
buffer = (byte*)ri.Hunk_AllocateTempMemory( (data_size * 3) / 4 );
for ( i = 0, n = 0; i < data_size; i += 4, n += 3 ) {
buffer[n + 0] = data[i + 0];
buffer[n + 1] = data[i + 1];
buffer[n + 2] = data[i + 2];
}
*bytes_per_pixel = 3;
return buffer;
}
default:
*bytes_per_pixel = 4;
return data;
}
}
void vk_upload_image_data( image_t *image, int x, int y, int width, int height, int mipmaps, byte *pixels, int size ) {
VkCommandBuffer command_buffer;
VkBufferImageCopy regions[16];
VkBufferImageCopy region;
byte *buf;
int bpp;
int num_regions = 0;
int buffer_size = 0;
buf = resample_image_data( image->internalFormat, pixels, size, &bpp );
while (qtrue) {
Com_Memset(&region, 0, sizeof(region));
region.bufferOffset = buffer_size;
region.bufferRowLength = 0;
region.bufferImageHeight = 0;
region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
region.imageSubresource.mipLevel = num_regions;
region.imageSubresource.baseArrayLayer = 0;
region.imageSubresource.layerCount = 1;
region.imageOffset.x = x;
region.imageOffset.y = y;
region.imageOffset.z = 0;
region.imageExtent.width = width;
region.imageExtent.height = height;
region.imageExtent.depth = 1;
regions[num_regions] = region;
num_regions++;
buffer_size += width * height * bpp;
if ( num_regions >= mipmaps || (width == 1 && height == 1) || num_regions >= ARRAY_LEN( regions ) )
break;
x >>= 1;
y >>= 1;
width >>= 1;
if (width < 1) width = 1;
height >>= 1;
if (height < 1) height = 1;
}
ensure_staging_buffer_allocation(buffer_size);
Com_Memcpy( vk_world.staging_buffer_ptr, buf, buffer_size );
command_buffer = begin_command_buffer();
record_buffer_memory_barrier( command_buffer, vk_world.staging_buffer, VK_WHOLE_SIZE, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_ACCESS_HOST_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT );
record_image_layout_transition( command_buffer, image->handle, VK_IMAGE_ASPECT_COLOR_BIT, 0, VK_IMAGE_LAYOUT_UNDEFINED, VK_ACCESS_TRANSFER_WRITE_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL );
qvkCmdCopyBufferToImage( command_buffer, vk_world.staging_buffer, image->handle, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, num_regions, regions );
record_image_layout_transition( command_buffer, image->handle, VK_IMAGE_ASPECT_COLOR_BIT, VK_ACCESS_TRANSFER_WRITE_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_ACCESS_SHADER_READ_BIT, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL );
end_command_buffer( command_buffer );
if ( buf != pixels ) {
ri.Hunk_FreeTempMemory( buf );
}
}
void vk_update_descriptor_set( image_t *image, qboolean mipmap ) {
Vk_Sampler_Def sampler_def;
VkDescriptorImageInfo image_info;
VkWriteDescriptorSet descriptor_write;
Com_Memset( &sampler_def, 0, sizeof( sampler_def ) );
sampler_def.address_mode = image->wrapClampMode;
if ( mipmap ) {
sampler_def.gl_mag_filter = gl_filter_max;
sampler_def.gl_min_filter = gl_filter_min;
} else {
sampler_def.gl_mag_filter = GL_LINEAR;
sampler_def.gl_min_filter = GL_LINEAR;
// no anisotropy without mipmaps
sampler_def.noAnisotropy = qtrue;
}
image_info.sampler = vk_find_sampler( &sampler_def );
image_info.imageView = image->view;
image_info.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
descriptor_write.dstSet = image->descriptor;
descriptor_write.dstBinding = 0;
descriptor_write.dstArrayElement = 0;
descriptor_write.descriptorCount = 1;
descriptor_write.pNext = NULL;
descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
descriptor_write.pImageInfo = &image_info;
descriptor_write.pBufferInfo = NULL;
descriptor_write.pTexelBufferView = NULL;
qvkUpdateDescriptorSets( vk.device, 1, &descriptor_write, 0, NULL );
}
void vk_destroy_image_resources( VkImage *image, VkImageView *imageView )
{
if ( image != NULL ) {
if ( *image != VK_NULL_HANDLE ) {
qvkDestroyImage( vk.device, *image, NULL );
*image = VK_NULL_HANDLE;
}
}
if ( imageView != NULL ) {
if ( *imageView != VK_NULL_HANDLE ) {
qvkDestroyImageView( vk.device, *imageView, NULL );
*imageView = VK_NULL_HANDLE;
}
}
}
static void set_shader_stage_desc(VkPipelineShaderStageCreateInfo *desc, VkShaderStageFlagBits stage, VkShaderModule shader_module, const char *entry) {
desc->sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO;
desc->pNext = NULL;
desc->flags = 0;
desc->stage = stage;
desc->module = shader_module;
desc->pName = entry;
desc->pSpecializationInfo = NULL;
}
#define FORMAT_DEPTH(format, r_bits, g_bits, b_bits) case(VK_FORMAT_##format): *r = r_bits; *b = b_bits; *g = g_bits; return qtrue;
static qboolean vk_surface_format_color_depth( VkFormat format, int *r, int *g, int *b ) {
switch (format) {
// Common formats from https://vulkan.gpuinfo.org/listsurfaceformats.php
FORMAT_DEPTH(B8G8R8A8_UNORM, 255, 255, 255)
FORMAT_DEPTH(B8G8R8A8_SRGB, 255, 255, 255)
FORMAT_DEPTH(A2B10G10R10_UNORM_PACK32, 1023, 1023, 1023)
FORMAT_DEPTH(R8G8B8A8_UNORM, 255, 255, 255)
FORMAT_DEPTH(R8G8B8A8_SRGB, 255, 255, 255)
FORMAT_DEPTH(A2R10G10B10_UNORM_PACK32, 1023, 1023, 1023)
FORMAT_DEPTH(R5G6B5_UNORM_PACK16, 31, 63, 31)
FORMAT_DEPTH(R8G8B8A8_SNORM, 255, 255, 255)
FORMAT_DEPTH(A8B8G8R8_UNORM_PACK32, 255, 255, 255)
FORMAT_DEPTH(A8B8G8R8_SNORM_PACK32, 255, 255, 255)
FORMAT_DEPTH(A8B8G8R8_SRGB_PACK32, 255, 255, 255)
FORMAT_DEPTH(R16G16B16A16_UNORM, 65535, 65535, 65535)
FORMAT_DEPTH(R16G16B16A16_SNORM, 65535, 65535, 65535)
FORMAT_DEPTH(B5G6R5_UNORM_PACK16, 31, 63, 31)
FORMAT_DEPTH(B8G8R8A8_SNORM, 255, 255, 255)
FORMAT_DEPTH(R4G4B4A4_UNORM_PACK16, 15, 15, 15)
FORMAT_DEPTH(B4G4R4A4_UNORM_PACK16, 15, 15, 15)
FORMAT_DEPTH(A1R5G5B5_UNORM_PACK16, 31, 31, 31)
FORMAT_DEPTH(R5G5B5A1_UNORM_PACK16, 31, 31, 31)
FORMAT_DEPTH(B5G5R5A1_UNORM_PACK16, 31, 31, 31)
default:
*r = 255; *g = 255; *b = 255; return qfalse;
}
}
void vk_create_post_process_pipeline( int program_index, uint32_t width, uint32_t height )
{
VkPipelineShaderStageCreateInfo shader_stages[2];
VkPipelineVertexInputStateCreateInfo vertex_input_state;
VkPipelineInputAssemblyStateCreateInfo input_assembly_state;
VkPipelineRasterizationStateCreateInfo rasterization_state;
VkPipelineDepthStencilStateCreateInfo depth_stencil_state;
VkPipelineViewportStateCreateInfo viewport_state;
VkPipelineMultisampleStateCreateInfo multisample_state;
VkPipelineColorBlendStateCreateInfo blend_state;
VkPipelineColorBlendAttachmentState attachment_blend_state;
VkGraphicsPipelineCreateInfo create_info;
VkViewport viewport;
VkRect2D scissor;
VkSpecializationMapEntry spec_entries[9];
VkSpecializationInfo frag_spec_info;
VkPipeline *pipeline;
VkShaderModule fsmodule;
VkRenderPass renderpass;
VkPipelineLayout layout;
VkSampleCountFlagBits samples;
const char *pipeline_name;
qboolean blend;
struct FragSpecData {
float gamma;
float overbright;
float greyscale;
float bloom_threshold;
float bloom_intensity;
int dither;
int depth_r;
int depth_g;
int depth_b;
} frag_spec_data;
switch ( program_index ) {
case 1: // bloom extraction
pipeline = &vk.bloom_extract_pipeline;
fsmodule = vk.modules.bloom_fs;
renderpass = vk.render_pass.bloom_extract;
layout = vk.pipeline_layout_post_process;
samples = VK_SAMPLE_COUNT_1_BIT;
pipeline_name = "bloom extraction pipeline";
blend = qfalse;
break;
case 2: // final bloom blend
pipeline = &vk.bloom_blend_pipeline;
fsmodule = vk.modules.blend_fs;
renderpass = vk.render_pass.post_bloom;
layout = vk.pipeline_layout_blend;
samples = vkSamples;
pipeline_name = "bloom blend pipeline";
blend = qtrue;
break;
case 3: // capture buffer extraction
pipeline = &vk.capture_pipeline;
fsmodule = vk.modules.gamma_fs;
renderpass = vk.render_pass.capture;
layout = vk.pipeline_layout_post_process;
samples = VK_SAMPLE_COUNT_1_BIT;
pipeline_name = "capture buffer pipeline";
blend = qfalse;
break;
default: // gamma correction
pipeline = &vk.gamma_pipeline;
fsmodule = vk.modules.gamma_fs;
renderpass = vk.render_pass.gamma;
layout = vk.pipeline_layout_post_process;
samples = VK_SAMPLE_COUNT_1_BIT;
pipeline_name = "gamma-correction pipeline";
blend = qfalse;
break;
}
if ( *pipeline != VK_NULL_HANDLE ) {
vk_wait_idle();
qvkDestroyPipeline( vk.device, *pipeline, NULL );
*pipeline = VK_NULL_HANDLE;
}
vertex_input_state.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO;
vertex_input_state.pNext = NULL;
vertex_input_state.flags = 0;
vertex_input_state.vertexBindingDescriptionCount = 0;
vertex_input_state.pVertexBindingDescriptions = NULL;
vertex_input_state.vertexAttributeDescriptionCount = 0;
vertex_input_state.pVertexBindingDescriptions = NULL;
// shaders
set_shader_stage_desc( shader_stages+0, VK_SHADER_STAGE_VERTEX_BIT, vk.modules.gamma_vs, "main" );
set_shader_stage_desc( shader_stages+1, VK_SHADER_STAGE_FRAGMENT_BIT, fsmodule, "main" );
frag_spec_data.gamma = 1.0 / (r_gamma->value);
frag_spec_data.overbright = (float)(1 << tr.overbrightBits);
frag_spec_data.greyscale = r_greyscale->value;
frag_spec_data.bloom_threshold = r_bloom_threshold->value;
frag_spec_data.bloom_intensity = r_bloom_intensity->value;
frag_spec_data.dither = r_dither->integer;
if ( !vk_surface_format_color_depth( vk.present_format.format, &frag_spec_data.depth_r, &frag_spec_data.depth_g, &frag_spec_data.depth_b ) )
ri.Printf( PRINT_ALL, "Format %s not recognized, dither to assume 8bpc\n", vk_format_string( vk.base_format.format ) );
spec_entries[0].constantID = 0;
spec_entries[0].offset = offsetof( struct FragSpecData, gamma );
spec_entries[0].size = sizeof( frag_spec_data.gamma );
spec_entries[1].constantID = 1;
spec_entries[1].offset = offsetof( struct FragSpecData, overbright );
spec_entries[1].size = sizeof( frag_spec_data.overbright );
spec_entries[2].constantID = 2;
spec_entries[2].offset = offsetof( struct FragSpecData, greyscale );
spec_entries[2].size = sizeof( frag_spec_data.greyscale );
spec_entries[3].constantID = 3;
spec_entries[3].offset = offsetof( struct FragSpecData, bloom_threshold );
spec_entries[3].size = sizeof( frag_spec_data.bloom_threshold );
spec_entries[4].constantID = 4;
spec_entries[4].offset = offsetof( struct FragSpecData, bloom_intensity );
spec_entries[4].size = sizeof( frag_spec_data.bloom_intensity );
spec_entries[5].constantID = 5;
spec_entries[5].offset = offsetof( struct FragSpecData, dither );
spec_entries[5].size = sizeof( frag_spec_data.dither );
spec_entries[6].constantID = 6;
spec_entries[6].offset = offsetof( struct FragSpecData, depth_r );
spec_entries[6].size = sizeof( frag_spec_data.depth_r );
spec_entries[7].constantID = 7;
spec_entries[7].offset = offsetof(struct FragSpecData, depth_g);
spec_entries[7].size = sizeof(frag_spec_data.depth_g);
spec_entries[8].constantID = 8;
spec_entries[8].offset = offsetof(struct FragSpecData, depth_b);
spec_entries[8].size = sizeof(frag_spec_data.depth_b);
frag_spec_info.mapEntryCount = 9;
frag_spec_info.pMapEntries = spec_entries;
frag_spec_info.dataSize = sizeof( frag_spec_data );
frag_spec_info.pData = &frag_spec_data;
shader_stages[1].pSpecializationInfo = &frag_spec_info;
//
// Primitive assembly.
//
input_assembly_state.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO;
input_assembly_state.pNext = NULL;
input_assembly_state.flags = 0;
input_assembly_state.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP;
input_assembly_state.primitiveRestartEnable = VK_FALSE;
//
// Viewport.
//
if ( program_index == 0 ) {
// gamma correction
viewport.x = 0.0 + vk.blitX0;
viewport.y = 0.0 + vk.blitY0;
viewport.width = gls.windowWidth - vk.blitX0 * 2;
viewport.height = gls.windowHeight - vk.blitY0 * 2;
} else {
// other post-processing
viewport.x = 0.0;
viewport.y = 0.0;
viewport.width = width;
viewport.height = height;
}
viewport.minDepth = 0.0;
viewport.maxDepth = 1.0;
scissor.offset.x = viewport.x;
scissor.offset.y = viewport.y;
scissor.extent.width = viewport.width;
scissor.extent.height = viewport.height;
viewport_state.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO;
viewport_state.pNext = NULL;
viewport_state.flags = 0;
viewport_state.viewportCount = 1;
viewport_state.pViewports = &viewport;
viewport_state.scissorCount = 1;
viewport_state.pScissors = &scissor;
//
// Rasterization.
//
rasterization_state.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO;
rasterization_state.pNext = NULL;
rasterization_state.flags = 0;
rasterization_state.depthClampEnable = VK_FALSE;
rasterization_state.rasterizerDiscardEnable = VK_FALSE;
rasterization_state.polygonMode = VK_POLYGON_MODE_FILL;
//rasterization_state.cullMode = VK_CULL_MODE_BACK_BIT; // VK_CULL_MODE_NONE;
rasterization_state.cullMode = VK_CULL_MODE_NONE;
rasterization_state.frontFace = VK_FRONT_FACE_CLOCKWISE; // Q3 defaults to clockwise vertex order
rasterization_state.depthBiasEnable = VK_FALSE;
rasterization_state.depthBiasConstantFactor = 0.0f;
rasterization_state.depthBiasClamp = 0.0f;
rasterization_state.depthBiasSlopeFactor = 0.0f;
rasterization_state.lineWidth = 1.0f;
multisample_state.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO;
multisample_state.pNext = NULL;
multisample_state.flags = 0;
multisample_state.rasterizationSamples = samples;
multisample_state.sampleShadingEnable = VK_FALSE;
multisample_state.minSampleShading = 1.0f;
multisample_state.pSampleMask = NULL;
multisample_state.alphaToCoverageEnable = VK_FALSE;
multisample_state.alphaToOneEnable = VK_FALSE;
Com_Memset(&attachment_blend_state, 0, sizeof(attachment_blend_state));
attachment_blend_state.colorWriteMask = VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT | VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT;
if ( blend ) {
attachment_blend_state.blendEnable = VK_TRUE;
attachment_blend_state.srcColorBlendFactor = VK_BLEND_FACTOR_ONE;
attachment_blend_state.dstColorBlendFactor = VK_BLEND_FACTOR_ONE;
} else {
attachment_blend_state.blendEnable = VK_FALSE;
}
blend_state.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO;
blend_state.pNext = NULL;
blend_state.flags = 0;
blend_state.logicOpEnable = VK_FALSE;
blend_state.logicOp = VK_LOGIC_OP_COPY;
blend_state.attachmentCount = 1;
blend_state.pAttachments = &attachment_blend_state;
blend_state.blendConstants[0] = 0.0f;
blend_state.blendConstants[1] = 0.0f;
blend_state.blendConstants[2] = 0.0f;
blend_state.blendConstants[3] = 0.0f;
Com_Memset( &depth_stencil_state, 0, sizeof( depth_stencil_state ) );
depth_stencil_state.sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO;
depth_stencil_state.pNext = NULL;
depth_stencil_state.flags = 0;
depth_stencil_state.depthTestEnable = VK_FALSE;
depth_stencil_state.depthWriteEnable = VK_FALSE;
depth_stencil_state.depthCompareOp = VK_COMPARE_OP_NEVER;
depth_stencil_state.depthBoundsTestEnable = VK_FALSE;
depth_stencil_state.stencilTestEnable = VK_FALSE;
depth_stencil_state.minDepthBounds = 0.0f;
depth_stencil_state.maxDepthBounds = 1.0f;
create_info.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO;
create_info.pNext = NULL;
create_info.flags = 0;
create_info.stageCount = 2;
create_info.pStages = shader_stages;
create_info.pVertexInputState = &vertex_input_state;
create_info.pInputAssemblyState = &input_assembly_state;
create_info.pTessellationState = NULL;
create_info.pViewportState = &viewport_state;
create_info.pRasterizationState = &rasterization_state;
create_info.pMultisampleState = &multisample_state;
create_info.pDepthStencilState = (program_index == 2) ? &depth_stencil_state : NULL;
create_info.pDepthStencilState = &depth_stencil_state;
create_info.pColorBlendState = &blend_state;
create_info.pDynamicState = NULL;
create_info.layout = layout;
create_info.renderPass = renderpass;
create_info.subpass = 0;
create_info.basePipelineHandle = VK_NULL_HANDLE;
create_info.basePipelineIndex = -1;
VK_CHECK( qvkCreateGraphicsPipelines( vk.device, VK_NULL_HANDLE, 1, &create_info, NULL, pipeline ) );
SET_OBJECT_NAME( *pipeline, pipeline_name, VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT );
}
void vk_create_blur_pipeline( uint32_t index, uint32_t width, uint32_t height, qboolean horizontal_pass )
{
VkPipelineShaderStageCreateInfo shader_stages[2];
VkPipelineVertexInputStateCreateInfo vertex_input_state;
VkPipelineInputAssemblyStateCreateInfo input_assembly_state;
VkPipelineRasterizationStateCreateInfo rasterization_state;
VkPipelineViewportStateCreateInfo viewport_state;
VkPipelineMultisampleStateCreateInfo multisample_state;
VkPipelineColorBlendStateCreateInfo blend_state;
VkPipelineColorBlendAttachmentState attachment_blend_state;
VkGraphicsPipelineCreateInfo create_info;
VkViewport viewport;
VkRect2D scissor;
float frag_spec_data[3]; // x-offset, y-offset, correction
VkSpecializationMapEntry spec_entries[3];
VkSpecializationInfo frag_spec_info;
VkPipeline *pipeline;
pipeline = &vk.blur_pipeline[ index ];
if ( *pipeline != VK_NULL_HANDLE ) {
vk_wait_idle();
qvkDestroyPipeline( vk.device, *pipeline, NULL );
*pipeline = VK_NULL_HANDLE;
}
vertex_input_state.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO;
vertex_input_state.pNext = NULL;
vertex_input_state.flags = 0;
vertex_input_state.vertexBindingDescriptionCount = 0;
vertex_input_state.pVertexBindingDescriptions = NULL;
vertex_input_state.vertexAttributeDescriptionCount = 0;
vertex_input_state.pVertexBindingDescriptions = NULL;
// shaders
set_shader_stage_desc( shader_stages+0, VK_SHADER_STAGE_VERTEX_BIT, vk.modules.gamma_vs, "main" );
set_shader_stage_desc( shader_stages+1, VK_SHADER_STAGE_FRAGMENT_BIT, vk.modules.blur_fs, "main" );
frag_spec_data[0] = 1.2 / (float) width; // x offset
frag_spec_data[1] = 1.2 / (float) height; // y offset
frag_spec_data[2] = 1.0; // intensity?
if ( horizontal_pass ) {
frag_spec_data[1] = 0.0;
} else {
frag_spec_data[0] = 0.0;
}
spec_entries[0].constantID = 0;
spec_entries[0].offset = 0 * sizeof( float );
spec_entries[0].size = sizeof( float );
spec_entries[1].constantID = 1;
spec_entries[1].offset = 1 * sizeof( float );
spec_entries[1].size = sizeof( float );
spec_entries[2].constantID = 2;
spec_entries[2].offset = 2 * sizeof( float );
spec_entries[2].size = sizeof( float );
frag_spec_info.mapEntryCount = 3;
frag_spec_info.pMapEntries = spec_entries;
frag_spec_info.dataSize = 3 * sizeof( float );
frag_spec_info.pData = &frag_spec_data[0];
shader_stages[1].pSpecializationInfo = &frag_spec_info;
//
// Primitive assembly.
//
input_assembly_state.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO;
input_assembly_state.pNext = NULL;
input_assembly_state.flags = 0;
input_assembly_state.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP;
input_assembly_state.primitiveRestartEnable = VK_FALSE;
//
// Viewport.
//
viewport.x = 0.0;
viewport.y = 0.0;
viewport.width = width;
viewport.height = height;
viewport.minDepth = 0.0;
viewport.maxDepth = 1.0;
scissor.offset.x = viewport.x;
scissor.offset.y = viewport.y;
scissor.extent.width = viewport.width;
scissor.extent.height = viewport.height;
viewport_state.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO;
viewport_state.pNext = NULL;
viewport_state.flags = 0;
viewport_state.viewportCount = 1;
viewport_state.pViewports = &viewport;
viewport_state.scissorCount = 1;
viewport_state.pScissors = &scissor;
//
// Rasterization.
//
rasterization_state.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO;
rasterization_state.pNext = NULL;
rasterization_state.flags = 0;
rasterization_state.depthClampEnable = VK_FALSE;
rasterization_state.rasterizerDiscardEnable = VK_FALSE;
rasterization_state.polygonMode = VK_POLYGON_MODE_FILL;
//rasterization_state.cullMode = VK_CULL_MODE_BACK_BIT; // VK_CULL_MODE_NONE;
rasterization_state.cullMode = VK_CULL_MODE_NONE;
rasterization_state.frontFace = VK_FRONT_FACE_CLOCKWISE; // Q3 defaults to clockwise vertex order
rasterization_state.depthBiasEnable = VK_FALSE;
rasterization_state.depthBiasConstantFactor = 0.0f;
rasterization_state.depthBiasClamp = 0.0f;
rasterization_state.depthBiasSlopeFactor = 0.0f;
rasterization_state.lineWidth = 1.0f;
multisample_state.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO;
multisample_state.pNext = NULL;
multisample_state.flags = 0;
multisample_state.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT;
multisample_state.sampleShadingEnable = VK_FALSE;
multisample_state.minSampleShading = 1.0f;
multisample_state.pSampleMask = NULL;
multisample_state.alphaToCoverageEnable = VK_FALSE;
multisample_state.alphaToOneEnable = VK_FALSE;
Com_Memset(&attachment_blend_state, 0, sizeof(attachment_blend_state));
attachment_blend_state.blendEnable = VK_FALSE;
attachment_blend_state.colorWriteMask = VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT | VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT;
blend_state.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO;
blend_state.pNext = NULL;
blend_state.flags = 0;
blend_state.logicOpEnable = VK_FALSE;
blend_state.logicOp = VK_LOGIC_OP_COPY;
blend_state.attachmentCount = 1;
blend_state.pAttachments = &attachment_blend_state;
blend_state.blendConstants[0] = 0.0f;
blend_state.blendConstants[1] = 0.0f;
blend_state.blendConstants[2] = 0.0f;
blend_state.blendConstants[3] = 0.0f;
create_info.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO;
create_info.pNext = NULL;
create_info.flags = 0;
create_info.stageCount = 2;
create_info.pStages = shader_stages;
create_info.pVertexInputState = &vertex_input_state;
create_info.pInputAssemblyState = &input_assembly_state;
create_info.pTessellationState = NULL;
create_info.pViewportState = &viewport_state;
create_info.pRasterizationState = &rasterization_state;
create_info.pMultisampleState = &multisample_state;
create_info.pDepthStencilState = NULL;
create_info.pColorBlendState = &blend_state;
create_info.pDynamicState = NULL;
create_info.layout = vk.pipeline_layout_post_process; // one input attachment
create_info.renderPass = vk.render_pass.blur[ index ];
create_info.subpass = 0;
create_info.basePipelineHandle = VK_NULL_HANDLE;
create_info.basePipelineIndex = -1;
VK_CHECK( qvkCreateGraphicsPipelines( vk.device, VK_NULL_HANDLE, 1, &create_info, NULL, pipeline ) );
SET_OBJECT_NAME( *pipeline, va( "%s blur pipeline %i", horizontal_pass ? "horizontal" : "vertical", index/2 + 1 ), VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT );
}
static VkVertexInputBindingDescription bindings[8];
static VkVertexInputAttributeDescription attribs[8];
static uint32_t num_binds;
static uint32_t num_attrs;
static void push_bind( uint32_t binding, uint32_t stride )
{
bindings[ num_binds ].binding = binding;
bindings[ num_binds ].stride = stride;
bindings[ num_binds ].inputRate = VK_VERTEX_INPUT_RATE_VERTEX;
num_binds++;
}
static void push_attr( uint32_t location, uint32_t binding, VkFormat format )
{
attribs[ num_attrs ].location = location;
attribs[ num_attrs ].binding = binding;
attribs[ num_attrs ].format = format;
attribs[ num_attrs ].offset = 0;
num_attrs++;
}
VkPipeline create_pipeline( const Vk_Pipeline_Def *def, renderPass_t renderPassIndex ) {
VkShaderModule *vs_module = NULL;
VkShaderModule *fs_module = NULL;
int32_t vert_spec_data[1]; // clippping
floatint_t frag_spec_data[9]; // 0:alpha-test-func, 1:alpha-test-value, 2:depth-fragment, 3:alpha-to-coverage, 4:color_mode, 5:abs_light, 6:multitexture mode, 7:discard mode, 8:identity color
VkSpecializationMapEntry spec_entries[10];
VkSpecializationInfo vert_spec_info;
VkSpecializationInfo frag_spec_info;
VkPipelineVertexInputStateCreateInfo vertex_input_state;
VkPipelineInputAssemblyStateCreateInfo input_assembly_state;
VkPipelineRasterizationStateCreateInfo rasterization_state;
VkPipelineViewportStateCreateInfo viewport_state;
VkPipelineMultisampleStateCreateInfo multisample_state;
VkPipelineDepthStencilStateCreateInfo depth_stencil_state;
VkPipelineColorBlendStateCreateInfo blend_state;
VkPipelineColorBlendAttachmentState attachment_blend_state;
VkPipelineDynamicStateCreateInfo dynamic_state;
VkDynamicState dynamic_state_array[] = { VK_DYNAMIC_STATE_VIEWPORT, VK_DYNAMIC_STATE_SCISSOR };
VkGraphicsPipelineCreateInfo create_info;
VkPipeline pipeline;
VkPipelineShaderStageCreateInfo shader_stages[2];
VkBool32 alphaToCoverage = VK_FALSE;
unsigned int atest_bits;
unsigned int state_bits = def->state_bits;
switch ( def->shader_type ) {
case TYPE_SIGNLE_TEXTURE_LIGHTING:
vs_module = &vk.modules.vert.light[0];
fs_module = &vk.modules.frag.light[0][0];
break;
case TYPE_SIGNLE_TEXTURE_LIGHTING_LINEAR:
vs_module = &vk.modules.vert.light[0];
fs_module = &vk.modules.frag.light[1][0];
break;
case TYPE_SIGNLE_TEXTURE_DF:
state_bits |= GLS_DEPTHMASK_TRUE;
vs_module = &vk.modules.vert.gen[0][0][0][0];
fs_module = &vk.modules.frag.gen0_df;
break;
case TYPE_SIGNLE_TEXTURE_IDENTITY:
vs_module = &vk.modules.vert.gen0_ident;
fs_module = &vk.modules.frag.gen0_ident;
break;
case TYPE_SIGNLE_TEXTURE:
vs_module = &vk.modules.vert.gen[0][0][0][0];
fs_module = &vk.modules.frag.gen[0][0][0];
break;
case TYPE_SIGNLE_TEXTURE_ENV:
vs_module = &vk.modules.vert.gen[0][0][1][0];
fs_module = &vk.modules.frag.gen[0][0][0];
break;
case TYPE_MULTI_TEXTURE_MUL2:
case TYPE_MULTI_TEXTURE_ADD2_IDENTITY:
case TYPE_MULTI_TEXTURE_ADD2:
vs_module = &vk.modules.vert.gen[1][0][0][0];
fs_module = &vk.modules.frag.gen[1][0][0];
break;
case TYPE_MULTI_TEXTURE_MUL2_ENV:
case TYPE_MULTI_TEXTURE_ADD2_IDENTITY_ENV:
case TYPE_MULTI_TEXTURE_ADD2_ENV:
vs_module = &vk.modules.vert.gen[1][0][1][0];
fs_module = &vk.modules.frag.gen[1][0][0];
break;
case TYPE_MULTI_TEXTURE_MUL3:
case TYPE_MULTI_TEXTURE_ADD3_IDENTITY:
case TYPE_MULTI_TEXTURE_ADD3:
vs_module = &vk.modules.vert.gen[2][0][0][0];
fs_module = &vk.modules.frag.gen[2][0][0];
break;
case TYPE_MULTI_TEXTURE_MUL3_ENV:
case TYPE_MULTI_TEXTURE_ADD3_IDENTITY_ENV:
case TYPE_MULTI_TEXTURE_ADD3_ENV:
vs_module = &vk.modules.vert.gen[2][0][1][0];
fs_module = &vk.modules.frag.gen[2][0][0];
break;
case TYPE_BLEND2_ADD:
case TYPE_BLEND2_MUL:
case TYPE_BLEND2_ALPHA:
case TYPE_BLEND2_ONE_MINUS_ALPHA:
case TYPE_BLEND2_MIX_ALPHA:
case TYPE_BLEND2_MIX_ONE_MINUS_ALPHA:
case TYPE_BLEND2_DST_COLOR_SRC_ALPHA:
vs_module = &vk.modules.vert.gen[1][1][0][0];
fs_module = &vk.modules.frag.gen[1][1][0];
break;
case TYPE_BLEND2_ADD_ENV:
case TYPE_BLEND2_MUL_ENV:
case TYPE_BLEND2_ALPHA_ENV:
case TYPE_BLEND2_ONE_MINUS_ALPHA_ENV:
case TYPE_BLEND2_MIX_ALPHA_ENV:
case TYPE_BLEND2_MIX_ONE_MINUS_ALPHA_ENV:
case TYPE_BLEND2_DST_COLOR_SRC_ALPHA_ENV:
vs_module = &vk.modules.vert.gen[1][1][1][0];
fs_module = &vk.modules.frag.gen[1][1][0];
break;
case TYPE_BLEND3_ADD:
case TYPE_BLEND3_MUL:
case TYPE_BLEND3_ALPHA:
case TYPE_BLEND3_ONE_MINUS_ALPHA:
case TYPE_BLEND3_MIX_ALPHA:
case TYPE_BLEND3_MIX_ONE_MINUS_ALPHA:
case TYPE_BLEND3_DST_COLOR_SRC_ALPHA:
vs_module = &vk.modules.vert.gen[2][1][0][0];
fs_module = &vk.modules.frag.gen[2][1][0];
break;
case TYPE_BLEND3_ADD_ENV:
case TYPE_BLEND3_MUL_ENV:
case TYPE_BLEND3_ALPHA_ENV:
case TYPE_BLEND3_ONE_MINUS_ALPHA_ENV:
case TYPE_BLEND3_MIX_ALPHA_ENV:
case TYPE_BLEND3_MIX_ONE_MINUS_ALPHA_ENV:
case TYPE_BLEND3_DST_COLOR_SRC_ALPHA_ENV:
vs_module = &vk.modules.vert.gen[2][1][1][0];
fs_module = &vk.modules.frag.gen[2][1][0];
break;
case TYPE_COLOR_WHITE:
case TYPE_COLOR_GREEN:
case TYPE_COLOR_RED:
vs_module = &vk.modules.color_vs;
fs_module = &vk.modules.color_fs;
break;
case TYPE_FOG_ONLY:
vs_module = &vk.modules.fog_vs;
fs_module = &vk.modules.fog_fs;
break;
case TYPE_DOT:
vs_module = &vk.modules.dot_vs;
fs_module = &vk.modules.dot_fs;
break;
default:
ri.Error(ERR_DROP, "create_pipeline: unknown shader type %i\n", def->shader_type);
return 0;
}
if ( def->fog_stage ) {
switch ( def->shader_type ) {
case TYPE_FOG_ONLY:
case TYPE_DOT:
case TYPE_SIGNLE_TEXTURE_DF:
case TYPE_SIGNLE_TEXTURE_IDENTITY:
case TYPE_COLOR_WHITE:
case TYPE_COLOR_GREEN:
case TYPE_COLOR_RED:
break;
default:
// switch to fogged modules
vs_module++;
fs_module++;
break;
}
}
set_shader_stage_desc(shader_stages+0, VK_SHADER_STAGE_VERTEX_BIT, *vs_module, "main");
set_shader_stage_desc(shader_stages+1, VK_SHADER_STAGE_FRAGMENT_BIT, *fs_module, "main");
Com_Memset( vert_spec_data, 0, sizeof( vert_spec_data ) );
Com_Memset( frag_spec_data, 0, sizeof( frag_spec_data ) );
//vert_spec_data[0] = def->clipping_plane ? 1 : 0;
// fragment shader specialization data
atest_bits = state_bits & GLS_ATEST_BITS;
switch ( atest_bits ) {
case GLS_ATEST_GT_0:
frag_spec_data[0].i = 1; // not equal
frag_spec_data[1].f = 0.0f;
break;
case GLS_ATEST_LT_80:
frag_spec_data[0].i = 2; // less than
frag_spec_data[1].f = 0.5f;
break;
case GLS_ATEST_GE_80:
frag_spec_data[0].i = 3; // greater or equal
frag_spec_data[1].f = 0.5f;
break;
default:
frag_spec_data[0].i = 0;
frag_spec_data[1].f = 0.0f;
break;
};
// depth fragment threshold
frag_spec_data[2].f = 0.85f;
if ( r_ext_alpha_to_coverage->integer && vkSamples != VK_SAMPLE_COUNT_1_BIT && frag_spec_data[0].i ) {
frag_spec_data[3].i = 1;
alphaToCoverage = VK_TRUE;
}
// constant color
switch ( def->shader_type ) {
default: frag_spec_data[4].i = 0; break;
case TYPE_COLOR_GREEN: frag_spec_data[4].i = 1; break;
case TYPE_COLOR_RED: frag_spec_data[4].i = 2; break;
}
// abs lighting
switch ( def->shader_type ) {
case TYPE_SIGNLE_TEXTURE_LIGHTING:
case TYPE_SIGNLE_TEXTURE_LIGHTING_LINEAR:
frag_spec_data[5].i = def->abs_light ? 1 : 0;
default:
break;
}
// multutexture mode
switch ( def->shader_type ) {
case TYPE_MULTI_TEXTURE_MUL2:
case TYPE_MULTI_TEXTURE_MUL2_ENV:
case TYPE_MULTI_TEXTURE_MUL3:
case TYPE_MULTI_TEXTURE_MUL3_ENV:
case TYPE_BLEND2_MUL:
case TYPE_BLEND2_MUL_ENV:
case TYPE_BLEND3_MUL:
case TYPE_BLEND3_MUL_ENV:
frag_spec_data[6].i = 0;
break;
case TYPE_MULTI_TEXTURE_ADD2_IDENTITY:
case TYPE_MULTI_TEXTURE_ADD2_IDENTITY_ENV:
case TYPE_MULTI_TEXTURE_ADD3_IDENTITY:
case TYPE_MULTI_TEXTURE_ADD3_IDENTITY_ENV:
frag_spec_data[6].i = 1;
break;
case TYPE_MULTI_TEXTURE_ADD2:
case TYPE_MULTI_TEXTURE_ADD2_ENV:
case TYPE_MULTI_TEXTURE_ADD3:
case TYPE_MULTI_TEXTURE_ADD3_ENV:
case TYPE_BLEND2_ADD:
case TYPE_BLEND2_ADD_ENV:
case TYPE_BLEND3_ADD:
case TYPE_BLEND3_ADD_ENV:
frag_spec_data[6].i = 2;
break;
case TYPE_BLEND2_ALPHA:
case TYPE_BLEND2_ALPHA_ENV:
case TYPE_BLEND3_ALPHA:
case TYPE_BLEND3_ALPHA_ENV:
frag_spec_data[6].i = 3;
break;
case TYPE_BLEND2_ONE_MINUS_ALPHA:
case TYPE_BLEND2_ONE_MINUS_ALPHA_ENV:
case TYPE_BLEND3_ONE_MINUS_ALPHA:
case TYPE_BLEND3_ONE_MINUS_ALPHA_ENV:
frag_spec_data[6].i = 4;
break;
case TYPE_BLEND2_MIX_ALPHA:
case TYPE_BLEND2_MIX_ALPHA_ENV:
case TYPE_BLEND3_MIX_ALPHA:
case TYPE_BLEND3_MIX_ALPHA_ENV:
frag_spec_data[6].i = 5;
break;
case TYPE_BLEND2_MIX_ONE_MINUS_ALPHA:
case TYPE_BLEND2_MIX_ONE_MINUS_ALPHA_ENV:
case TYPE_BLEND3_MIX_ONE_MINUS_ALPHA:
case TYPE_BLEND3_MIX_ONE_MINUS_ALPHA_ENV:
frag_spec_data[6].i = 6;
break;
case TYPE_BLEND2_DST_COLOR_SRC_ALPHA:
case TYPE_BLEND2_DST_COLOR_SRC_ALPHA_ENV:
case TYPE_BLEND3_DST_COLOR_SRC_ALPHA:
case TYPE_BLEND3_DST_COLOR_SRC_ALPHA_ENV:
frag_spec_data[6].i = 7;
break;
default:
break;
}
frag_spec_data[8].f = tr.identityLight;
//
// vertex module specialization data
//
spec_entries[0].constantID = 0; // clip_plane
spec_entries[0].offset = 0 * sizeof( int32_t );
spec_entries[0].size = sizeof( int32_t );
vert_spec_info.mapEntryCount = 1;
vert_spec_info.pMapEntries = spec_entries + 0;
vert_spec_info.dataSize = 1 * sizeof( int32_t );
vert_spec_info.pData = &vert_spec_data[0];
shader_stages[0].pSpecializationInfo = &vert_spec_info;
//
// fragment module specialization data
//
spec_entries[1].constantID = 0; // alpha-test-function
spec_entries[1].offset = 0 * sizeof( int32_t );
spec_entries[1].size = sizeof( int32_t );
spec_entries[2].constantID = 1; // alpha-test-value
spec_entries[2].offset = 1 * sizeof( int32_t );
spec_entries[2].size = sizeof( float );
spec_entries[3].constantID = 2; // depth-fragment
spec_entries[3].offset = 2 * sizeof( int32_t );
spec_entries[3].size = sizeof( float );
spec_entries[4].constantID = 3; // alpha-to-coverage
spec_entries[4].offset = 3 * sizeof( int32_t );
spec_entries[4].size = sizeof( int32_t );
spec_entries[5].constantID = 4; // color_mode
spec_entries[5].offset = 4 * sizeof( int32_t );
spec_entries[5].size = sizeof( int32_t );
spec_entries[6].constantID = 5; // abs_light
spec_entries[6].offset = 5 * sizeof( int32_t );
spec_entries[6].size = sizeof( int32_t );
spec_entries[7].constantID = 6; // multitexture mode
spec_entries[7].offset = 6 * sizeof( int32_t );
spec_entries[7].size = sizeof( int32_t );
spec_entries[8].constantID = 7; // discard mode
spec_entries[8].offset = 7 * sizeof( int32_t );
spec_entries[8].size = sizeof( int32_t );
spec_entries[9].constantID = 8; // identity color
spec_entries[9].offset = 8 * sizeof( int32_t );
spec_entries[9].size = sizeof( float );
frag_spec_info.mapEntryCount = 9;
frag_spec_info.pMapEntries = spec_entries + 1;
frag_spec_info.dataSize = sizeof( int32_t ) * 9;
frag_spec_info.pData = &frag_spec_data[0];
shader_stages[1].pSpecializationInfo = &frag_spec_info;
//
// Vertex input
//
num_binds = num_attrs = 0;
switch ( def->shader_type ) {
case TYPE_FOG_ONLY:
case TYPE_DOT:
push_bind( 0, sizeof( vec4_t ) ); // xyz array
push_attr( 0, 0, VK_FORMAT_R32G32B32A32_SFLOAT );
break;
case TYPE_COLOR_WHITE:
case TYPE_COLOR_GREEN:
case TYPE_COLOR_RED:
push_bind( 0, sizeof( vec4_t ) ); // xyz array
push_attr( 0, 0, VK_FORMAT_R32G32B32A32_SFLOAT );
break;
case TYPE_SIGNLE_TEXTURE_IDENTITY:
push_bind( 0, sizeof( vec4_t ) ); // xyz array
push_bind( 2, sizeof( vec2_t ) ); // st0 array
push_attr( 0, 0, VK_FORMAT_R32G32B32A32_SFLOAT );
push_attr( 2, 2, VK_FORMAT_R32G32_SFLOAT );
break;
case TYPE_SIGNLE_TEXTURE:
case TYPE_SIGNLE_TEXTURE_DF:
push_bind( 0, sizeof( vec4_t ) ); // xyz array
push_bind( 1, sizeof( color4ub_t ) ); // color array
push_bind( 2, sizeof( vec2_t ) ); // st0 array
push_attr( 0, 0, VK_FORMAT_R32G32B32A32_SFLOAT );
push_attr( 1, 1, VK_FORMAT_R8G8B8A8_UNORM );
push_attr( 2, 2, VK_FORMAT_R32G32_SFLOAT );
break;
case TYPE_SIGNLE_TEXTURE_ENV:
push_bind( 0, sizeof( vec4_t ) ); // xyz array
push_bind( 1, sizeof( color4ub_t ) ); // color array
//push_bind( 2, sizeof( vec2_t ) ); // st0 array
push_bind( 5, sizeof( vec4_t ) ); // normals
push_attr( 0, 0, VK_FORMAT_R32G32B32A32_SFLOAT );
push_attr( 1, 1, VK_FORMAT_R8G8B8A8_UNORM );
//push_attr( 2, 2, VK_FORMAT_R8G8B8A8_UNORM );
push_attr( 5, 5, VK_FORMAT_R32G32B32A32_SFLOAT );
break;
case TYPE_SIGNLE_TEXTURE_LIGHTING:
case TYPE_SIGNLE_TEXTURE_LIGHTING_LINEAR:
push_bind( 0, sizeof( vec4_t ) ); // xyz array
push_bind( 1, sizeof( vec2_t ) ); // st0 array
push_bind( 2, sizeof( vec4_t ) ); // normals array
push_attr( 0, 0, VK_FORMAT_R32G32B32A32_SFLOAT );
push_attr( 1, 1, VK_FORMAT_R32G32_SFLOAT );
push_attr( 2, 2, VK_FORMAT_R32G32B32A32_SFLOAT );
break;
case TYPE_MULTI_TEXTURE_MUL2:
case TYPE_MULTI_TEXTURE_ADD2_IDENTITY:
case TYPE_MULTI_TEXTURE_ADD2:
push_bind( 0, sizeof( vec4_t ) ); // xyz array
push_bind( 1, sizeof( color4ub_t ) ); // color array
push_bind( 2, sizeof( vec2_t ) ); // st0 array
push_bind( 3, sizeof( vec2_t ) ); // st1 array
push_attr( 0, 0, VK_FORMAT_R32G32B32A32_SFLOAT );
push_attr( 1, 1, VK_FORMAT_R8G8B8A8_UNORM );
push_attr( 2, 2, VK_FORMAT_R32G32_SFLOAT );
push_attr( 3, 3, VK_FORMAT_R32G32_SFLOAT );
break;
case TYPE_MULTI_TEXTURE_MUL2_ENV:
case TYPE_MULTI_TEXTURE_ADD2_IDENTITY_ENV:
case TYPE_MULTI_TEXTURE_ADD2_ENV:
push_bind( 0, sizeof( vec4_t ) ); // xyz array
push_bind( 1, sizeof( color4ub_t ) ); // color array
//push_bind( 2, sizeof( vec2_t ) ); // st0 array
push_bind( 3, sizeof( vec2_t ) ); // st1 array
push_bind( 5, sizeof( vec4_t ) ); // normals
push_attr( 0, 0, VK_FORMAT_R32G32B32A32_SFLOAT );
push_attr( 1, 1, VK_FORMAT_R8G8B8A8_UNORM );
//push_attr( 2, 2, VK_FORMAT_R32G32_SFLOAT );
push_attr( 3, 3, VK_FORMAT_R32G32_SFLOAT );
push_attr( 5, 5, VK_FORMAT_R32G32B32A32_SFLOAT );
break;
case TYPE_MULTI_TEXTURE_MUL3:
case TYPE_MULTI_TEXTURE_ADD3_IDENTITY:
case TYPE_MULTI_TEXTURE_ADD3:
push_bind( 0, sizeof( vec4_t ) ); // xyz array
push_bind( 1, sizeof( color4ub_t ) ); // color array
push_bind( 2, sizeof( vec2_t ) ); // st0 array
push_bind( 3, sizeof( vec2_t ) ); // st1 array
push_bind( 4, sizeof( vec2_t ) ); // st2 array
push_attr( 0, 0, VK_FORMAT_R32G32B32A32_SFLOAT );
push_attr( 1, 1, VK_FORMAT_R8G8B8A8_UNORM );
push_attr( 2, 2, VK_FORMAT_R32G32_SFLOAT );
push_attr( 3, 3, VK_FORMAT_R32G32_SFLOAT );
push_attr( 4, 4, VK_FORMAT_R32G32_SFLOAT );
break;
case TYPE_MULTI_TEXTURE_MUL3_ENV:
case TYPE_MULTI_TEXTURE_ADD3_IDENTITY_ENV:
case TYPE_MULTI_TEXTURE_ADD3_ENV:
push_bind( 0, sizeof( vec4_t ) ); // xyz array
push_bind( 1, sizeof( color4ub_t ) ); // color array
//push_bind( 2, sizeof( vec2_t ) ); // st0 array
push_bind( 3, sizeof( vec2_t ) ); // st1 array
push_bind( 4, sizeof( vec2_t ) ); // st2 array
push_bind( 5, sizeof( vec4_t ) ); // normals
push_attr( 0, 0, VK_FORMAT_R32G32B32A32_SFLOAT );
push_attr( 1, 1, VK_FORMAT_R8G8B8A8_UNORM );
//push_attr( 2, 2, VK_FORMAT_R32G32_SFLOAT );
push_attr( 3, 3, VK_FORMAT_R32G32_SFLOAT );
push_attr( 4, 4, VK_FORMAT_R32G32_SFLOAT );
push_attr( 5, 5, VK_FORMAT_R32G32B32A32_SFLOAT );
break;
case TYPE_BLEND2_ADD:
case TYPE_BLEND2_MUL:
case TYPE_BLEND2_ALPHA:
case TYPE_BLEND2_ONE_MINUS_ALPHA:
case TYPE_BLEND2_MIX_ALPHA:
case TYPE_BLEND2_MIX_ONE_MINUS_ALPHA:
case TYPE_BLEND2_DST_COLOR_SRC_ALPHA:
push_bind( 0, sizeof( vec4_t ) ); // xyz array
push_bind( 1, sizeof( color4ub_t ) ); // color0 array
push_bind( 2, sizeof( vec2_t ) ); // st0 array
push_bind( 3, sizeof( vec2_t ) ); // st1 array
push_bind( 6, sizeof( color4ub_t ) ); // color1 array
push_attr( 0, 0, VK_FORMAT_R32G32B32A32_SFLOAT );
push_attr( 1, 1, VK_FORMAT_R8G8B8A8_UNORM );
push_attr( 2, 2, VK_FORMAT_R32G32_SFLOAT );
push_attr( 3, 3, VK_FORMAT_R32G32_SFLOAT );
push_attr( 6, 6, VK_FORMAT_R8G8B8A8_UNORM );
break;
case TYPE_BLEND2_ADD_ENV:
case TYPE_BLEND2_MUL_ENV:
case TYPE_BLEND2_ALPHA_ENV:
case TYPE_BLEND2_ONE_MINUS_ALPHA_ENV:
case TYPE_BLEND2_MIX_ALPHA_ENV:
case TYPE_BLEND2_MIX_ONE_MINUS_ALPHA_ENV:
case TYPE_BLEND2_DST_COLOR_SRC_ALPHA_ENV:
push_bind( 0, sizeof( vec4_t ) ); // xyz array
push_bind( 1, sizeof( color4ub_t ) ); // color0 array
//push_bind( 2, sizeof( vec2_t ) ); // st0 array
push_bind( 3, sizeof( vec2_t ) ); // st1 array
push_bind( 5, sizeof( vec4_t ) ); // normals
push_bind( 6, sizeof( color4ub_t ) ); // color1 array
push_attr( 0, 0, VK_FORMAT_R32G32B32A32_SFLOAT );
push_attr( 1, 1, VK_FORMAT_R8G8B8A8_UNORM );
//push_attr( 2, 2, VK_FORMAT_R32G32_SFLOAT );
push_attr( 3, 3, VK_FORMAT_R32G32_SFLOAT );
push_attr( 5, 5, VK_FORMAT_R32G32B32A32_SFLOAT );
push_attr( 6, 6, VK_FORMAT_R8G8B8A8_UNORM );
break;
case TYPE_BLEND3_ADD:
case TYPE_BLEND3_MUL:
case TYPE_BLEND3_ALPHA:
case TYPE_BLEND3_ONE_MINUS_ALPHA:
case TYPE_BLEND3_MIX_ALPHA:
case TYPE_BLEND3_MIX_ONE_MINUS_ALPHA:
case TYPE_BLEND3_DST_COLOR_SRC_ALPHA:
push_bind( 0, sizeof( vec4_t ) ); // xyz array
push_bind( 1, sizeof( color4ub_t ) ); // color0 array
push_bind( 2, sizeof( vec2_t ) ); // st0 array
push_bind( 3, sizeof( vec2_t ) ); // st1 array
push_bind( 4, sizeof( vec2_t ) ); // st2 array
push_bind( 6, sizeof( color4ub_t ) ); // color1 array
push_bind( 7, sizeof( color4ub_t ) ); // color2 array
push_attr( 0, 0, VK_FORMAT_R32G32B32A32_SFLOAT );
push_attr( 1, 1, VK_FORMAT_R8G8B8A8_UNORM );
push_attr( 2, 2, VK_FORMAT_R32G32_SFLOAT );
push_attr( 3, 3, VK_FORMAT_R32G32_SFLOAT );
push_attr( 4, 4, VK_FORMAT_R32G32_SFLOAT );
push_attr( 6, 6, VK_FORMAT_R8G8B8A8_UNORM );
push_attr( 7, 7, VK_FORMAT_R8G8B8A8_UNORM );
break;
case TYPE_BLEND3_ADD_ENV:
case TYPE_BLEND3_MUL_ENV:
case TYPE_BLEND3_ALPHA_ENV:
case TYPE_BLEND3_ONE_MINUS_ALPHA_ENV:
case TYPE_BLEND3_MIX_ALPHA_ENV:
case TYPE_BLEND3_MIX_ONE_MINUS_ALPHA_ENV:
case TYPE_BLEND3_DST_COLOR_SRC_ALPHA_ENV:
push_bind( 0, sizeof( vec4_t ) ); // xyz array
push_bind( 1, sizeof( color4ub_t ) ); // color0 array
//push_bind( 2, sizeof( vec2_t ) ); // st0 array
push_bind( 3, sizeof( vec2_t ) ); // st1 array
push_bind( 4, sizeof( vec2_t ) ); // st2 array
push_bind( 5, sizeof( vec4_t ) ); // normals
push_bind( 6, sizeof( color4ub_t ) ); // color1 array
push_bind( 7, sizeof( color4ub_t ) ); // color2 array
push_attr( 0, 0, VK_FORMAT_R32G32B32A32_SFLOAT );
push_attr( 1, 1, VK_FORMAT_R8G8B8A8_UNORM );
//push_attr( 2, 2, VK_FORMAT_R32G32_SFLOAT );
push_attr( 3, 3, VK_FORMAT_R32G32_SFLOAT );
push_attr( 4, 4, VK_FORMAT_R32G32_SFLOAT );
push_attr( 5, 5, VK_FORMAT_R32G32B32A32_SFLOAT );
push_attr( 6, 6, VK_FORMAT_R8G8B8A8_UNORM );
push_attr( 7, 7, VK_FORMAT_R8G8B8A8_UNORM );
break;
default:
ri.Error( ERR_DROP, "%s: invalid shader type - %i", __func__, def->shader_type );
break;
}
vertex_input_state.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO;
vertex_input_state.pNext = NULL;
vertex_input_state.flags = 0;
vertex_input_state.pVertexBindingDescriptions = bindings;
vertex_input_state.pVertexAttributeDescriptions = attribs;
vertex_input_state.vertexBindingDescriptionCount = num_binds;
vertex_input_state.vertexAttributeDescriptionCount = num_attrs;
//
// Primitive assembly.
//
input_assembly_state.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO;
input_assembly_state.pNext = NULL;
input_assembly_state.flags = 0;
input_assembly_state.primitiveRestartEnable = VK_FALSE;
switch ( def->primitives ) {
case LINE_LIST: input_assembly_state.topology = VK_PRIMITIVE_TOPOLOGY_LINE_LIST; break;
case POINT_LIST: input_assembly_state.topology = VK_PRIMITIVE_TOPOLOGY_POINT_LIST; break;
case TRIANGLE_STRIP: input_assembly_state.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP; break;
default: input_assembly_state.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST; break;
}
//
// Viewport.
//
viewport_state.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO;
viewport_state.pNext = NULL;
viewport_state.flags = 0;
viewport_state.viewportCount = 1;
viewport_state.pViewports = NULL; // dynamic viewport state
viewport_state.scissorCount = 1;
viewport_state.pScissors = NULL; // dynamic scissor state
//
// Rasterization.
//
rasterization_state.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO;
rasterization_state.pNext = NULL;
rasterization_state.flags = 0;
rasterization_state.depthClampEnable = VK_FALSE;
rasterization_state.rasterizerDiscardEnable = VK_FALSE;
if ( def->shader_type == TYPE_DOT ) {
rasterization_state.polygonMode = VK_POLYGON_MODE_POINT;
} else {
rasterization_state.polygonMode = (state_bits & GLS_POLYMODE_LINE) ? VK_POLYGON_MODE_LINE : VK_POLYGON_MODE_FILL;
}
switch ( def->face_culling ) {
case CT_TWO_SIDED:
rasterization_state.cullMode = VK_CULL_MODE_NONE;
break;
case CT_FRONT_SIDED:
rasterization_state.cullMode = (def->mirror ? VK_CULL_MODE_FRONT_BIT : VK_CULL_MODE_BACK_BIT);
break;
case CT_BACK_SIDED:
rasterization_state.cullMode = (def->mirror ? VK_CULL_MODE_BACK_BIT : VK_CULL_MODE_FRONT_BIT);
break;
default:
ri.Error( ERR_DROP, "create_pipeline: invalid face culling mode %i\n", def->face_culling );
break;
}
rasterization_state.frontFace = VK_FRONT_FACE_CLOCKWISE; // Q3 defaults to clockwise vertex order
// depth bias state
if ( def->polygon_offset ) {
rasterization_state.depthBiasEnable = VK_TRUE;
rasterization_state.depthBiasClamp = 0.0f;
#ifdef USE_REVERSED_DEPTH
rasterization_state.depthBiasConstantFactor = -r_offsetUnits->value;
rasterization_state.depthBiasSlopeFactor = -r_offsetFactor->value;
#else
rasterization_state.depthBiasConstantFactor = r_offsetUnits->value;
rasterization_state.depthBiasSlopeFactor = r_offsetFactor->value;
#endif
} else {
rasterization_state.depthBiasEnable = VK_FALSE;
rasterization_state.depthBiasClamp = 0.0f;
rasterization_state.depthBiasConstantFactor = 0.0f;
rasterization_state.depthBiasSlopeFactor = 0.0f;
}
if ( def->line_width )
rasterization_state.lineWidth = (float)def->line_width;
else
rasterization_state.lineWidth = 1.0f;
multisample_state.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO;
multisample_state.pNext = NULL;
multisample_state.flags = 0;
multisample_state.rasterizationSamples = (vk.renderPassIndex == RENDER_PASS_SCREENMAP) ? vk.screenMapSamples : vkSamples;
multisample_state.sampleShadingEnable = VK_FALSE;
multisample_state.minSampleShading = 1.0f;
multisample_state.pSampleMask = NULL;
multisample_state.alphaToCoverageEnable = alphaToCoverage;
multisample_state.alphaToOneEnable = VK_FALSE;
Com_Memset( &depth_stencil_state, 0, sizeof( depth_stencil_state ) );
depth_stencil_state.sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO;
depth_stencil_state.pNext = NULL;
depth_stencil_state.flags = 0;
depth_stencil_state.depthTestEnable = (state_bits & GLS_DEPTHTEST_DISABLE) ? VK_FALSE : VK_TRUE;
depth_stencil_state.depthWriteEnable = (state_bits & GLS_DEPTHMASK_TRUE) ? VK_TRUE : VK_FALSE;
#ifdef USE_REVERSED_DEPTH
depth_stencil_state.depthCompareOp = (state_bits & GLS_DEPTHFUNC_EQUAL) ? VK_COMPARE_OP_EQUAL : VK_COMPARE_OP_GREATER_OR_EQUAL;
#else
depth_stencil_state.depthCompareOp = (state_bits & GLS_DEPTHFUNC_EQUAL) ? VK_COMPARE_OP_EQUAL : VK_COMPARE_OP_LESS_OR_EQUAL;
#endif
depth_stencil_state.depthBoundsTestEnable = VK_FALSE;
depth_stencil_state.stencilTestEnable = (def->shadow_phase != SHADOW_DISABLED) ? VK_TRUE : VK_FALSE;
if (def->shadow_phase == SHADOW_EDGES) {
depth_stencil_state.front.failOp = VK_STENCIL_OP_KEEP;
depth_stencil_state.front.passOp = (def->face_culling == CT_FRONT_SIDED) ? VK_STENCIL_OP_INCREMENT_AND_CLAMP : VK_STENCIL_OP_DECREMENT_AND_CLAMP;
depth_stencil_state.front.depthFailOp = VK_STENCIL_OP_KEEP;
depth_stencil_state.front.compareOp = VK_COMPARE_OP_ALWAYS;
depth_stencil_state.front.compareMask = 255;
depth_stencil_state.front.writeMask = 255;
depth_stencil_state.front.reference = 0;
depth_stencil_state.back = depth_stencil_state.front;
} else if (def->shadow_phase == SHADOW_FS_QUAD) {
depth_stencil_state.front.failOp = VK_STENCIL_OP_KEEP;
depth_stencil_state.front.passOp = VK_STENCIL_OP_KEEP;
depth_stencil_state.front.depthFailOp = VK_STENCIL_OP_KEEP;
depth_stencil_state.front.compareOp = VK_COMPARE_OP_NOT_EQUAL;
depth_stencil_state.front.compareMask = 255;
depth_stencil_state.front.writeMask = 255;
depth_stencil_state.front.reference = 0;
depth_stencil_state.back = depth_stencil_state.front;
}
depth_stencil_state.minDepthBounds = 0.0f;
depth_stencil_state.maxDepthBounds = 1.0f;
Com_Memset(&attachment_blend_state, 0, sizeof(attachment_blend_state));
attachment_blend_state.blendEnable = (state_bits & (GLS_SRCBLEND_BITS | GLS_DSTBLEND_BITS)) ? VK_TRUE : VK_FALSE;
if (def->shadow_phase == SHADOW_EDGES || def->shader_type == TYPE_SIGNLE_TEXTURE_DF)
attachment_blend_state.colorWriteMask = 0;
else
attachment_blend_state.colorWriteMask = VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT | VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT;
if (attachment_blend_state.blendEnable) {
switch (state_bits & GLS_SRCBLEND_BITS) {
case GLS_SRCBLEND_ZERO:
attachment_blend_state.srcColorBlendFactor = VK_BLEND_FACTOR_ZERO;
break;
case GLS_SRCBLEND_ONE:
attachment_blend_state.srcColorBlendFactor = VK_BLEND_FACTOR_ONE;
break;
case GLS_SRCBLEND_DST_COLOR:
attachment_blend_state.srcColorBlendFactor = VK_BLEND_FACTOR_DST_COLOR;
break;
case GLS_SRCBLEND_ONE_MINUS_DST_COLOR:
attachment_blend_state.srcColorBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR;
break;
case GLS_SRCBLEND_SRC_ALPHA:
attachment_blend_state.srcColorBlendFactor = VK_BLEND_FACTOR_SRC_ALPHA;
break;
case GLS_SRCBLEND_ONE_MINUS_SRC_ALPHA:
attachment_blend_state.srcColorBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA;
break;
case GLS_SRCBLEND_DST_ALPHA:
attachment_blend_state.srcColorBlendFactor = VK_BLEND_FACTOR_DST_ALPHA;
break;
case GLS_SRCBLEND_ONE_MINUS_DST_ALPHA:
attachment_blend_state.srcColorBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA;
break;
case GLS_SRCBLEND_ALPHA_SATURATE:
attachment_blend_state.srcColorBlendFactor = VK_BLEND_FACTOR_SRC_ALPHA_SATURATE;
break;
default:
ri.Error( ERR_DROP, "create_pipeline: invalid src blend state bits\n" );
break;
}
switch (state_bits & GLS_DSTBLEND_BITS) {
case GLS_DSTBLEND_ZERO:
attachment_blend_state.dstColorBlendFactor = VK_BLEND_FACTOR_ZERO;
break;
case GLS_DSTBLEND_ONE:
attachment_blend_state.dstColorBlendFactor = VK_BLEND_FACTOR_ONE;
break;
case GLS_DSTBLEND_SRC_COLOR:
attachment_blend_state.dstColorBlendFactor = VK_BLEND_FACTOR_SRC_COLOR;
break;
case GLS_DSTBLEND_ONE_MINUS_SRC_COLOR:
attachment_blend_state.dstColorBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR;
break;
case GLS_DSTBLEND_SRC_ALPHA:
attachment_blend_state.dstColorBlendFactor = VK_BLEND_FACTOR_SRC_ALPHA;
break;
case GLS_DSTBLEND_ONE_MINUS_SRC_ALPHA:
attachment_blend_state.dstColorBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA;
break;
case GLS_DSTBLEND_DST_ALPHA:
attachment_blend_state.dstColorBlendFactor = VK_BLEND_FACTOR_DST_ALPHA;
break;
case GLS_DSTBLEND_ONE_MINUS_DST_ALPHA:
attachment_blend_state.dstColorBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA;
break;
default:
ri.Error( ERR_DROP, "create_pipeline: invalid dst blend state bits\n" );
break;
}
attachment_blend_state.srcAlphaBlendFactor = attachment_blend_state.srcColorBlendFactor;
attachment_blend_state.dstAlphaBlendFactor = attachment_blend_state.dstColorBlendFactor;
attachment_blend_state.colorBlendOp = VK_BLEND_OP_ADD;
attachment_blend_state.alphaBlendOp = VK_BLEND_OP_ADD;
if ( def->allow_discard && vkSamples != VK_SAMPLE_COUNT_1_BIT ) {
// try to reduce pixel fillrate for transparent surfaces, this yields 1..10% fps increase when multisampling in enabled
if ( attachment_blend_state.srcColorBlendFactor == VK_BLEND_FACTOR_SRC_ALPHA && attachment_blend_state.dstColorBlendFactor == VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA ) {
frag_spec_data[7].i = 1;
} else if ( attachment_blend_state.srcColorBlendFactor == VK_BLEND_FACTOR_ONE && attachment_blend_state.dstColorBlendFactor == VK_BLEND_FACTOR_ONE ) {
frag_spec_data[7].i = 2;
}
}
}
blend_state.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO;
blend_state.pNext = NULL;
blend_state.flags = 0;
blend_state.logicOpEnable = VK_FALSE;
blend_state.logicOp = VK_LOGIC_OP_COPY;
blend_state.attachmentCount = 1;
blend_state.pAttachments = &attachment_blend_state;
blend_state.blendConstants[0] = 0.0f;
blend_state.blendConstants[1] = 0.0f;
blend_state.blendConstants[2] = 0.0f;
blend_state.blendConstants[3] = 0.0f;
dynamic_state.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO;
dynamic_state.pNext = NULL;
dynamic_state.flags = 0;
dynamic_state.dynamicStateCount = ARRAY_LEN( dynamic_state_array );
dynamic_state.pDynamicStates = dynamic_state_array;
create_info.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO;
create_info.pNext = NULL;
create_info.flags = 0;
create_info.stageCount = ARRAY_LEN(shader_stages);
create_info.pStages = shader_stages;
create_info.pVertexInputState = &vertex_input_state;
create_info.pInputAssemblyState = &input_assembly_state;
create_info.pTessellationState = NULL;
create_info.pViewportState = &viewport_state;
create_info.pRasterizationState = &rasterization_state;
create_info.pMultisampleState = &multisample_state;
create_info.pDepthStencilState = &depth_stencil_state;
create_info.pColorBlendState = &blend_state;
create_info.pDynamicState = &dynamic_state;
//if ( def->shader_type == TYPE_DOT )
// create_info.layout = vk.pipeline_layout_storage;
//else
create_info.layout = vk.pipeline_layout;
if ( renderPassIndex == RENDER_PASS_SCREENMAP )
create_info.renderPass = vk.render_pass.screenmap;
else
create_info.renderPass = vk.render_pass.main;
create_info.subpass = 0;
create_info.basePipelineHandle = VK_NULL_HANDLE;
create_info.basePipelineIndex = -1;
VK_CHECK( qvkCreateGraphicsPipelines( vk.device, vk.pipelineCache, 1, &create_info, NULL, &pipeline ) );
vk.pipeline_create_count++;
return pipeline;
}
uint32_t vk_alloc_pipeline( const Vk_Pipeline_Def *def ) {
VK_Pipeline_t *pipeline;
if ( vk.pipelines_count >= MAX_VK_PIPELINES ) {
ri.Error( ERR_DROP, "alloc_pipeline: MAX_VK_PIPELINES reached" );
return 0;
} else {
int j;
pipeline = &vk.pipelines[ vk.pipelines_count ];
pipeline->def = *def;
for ( j = 0; j < RENDER_PASS_COUNT; j++ ) {
pipeline->handle[j] = VK_NULL_HANDLE;
}
return vk.pipelines_count++;
}
}
VkPipeline vk_gen_pipeline( uint32_t index ) {
if ( index < vk.pipelines_count ) {
VK_Pipeline_t *pipeline = vk.pipelines + index;
if ( pipeline->handle[ vk.renderPassIndex ] == VK_NULL_HANDLE )
pipeline->handle[ vk.renderPassIndex ] = create_pipeline( &pipeline->def, vk.renderPassIndex );
return pipeline->handle[ vk.renderPassIndex ];
} else {
return VK_NULL_HANDLE;
}
}
uint32_t vk_find_pipeline_ext( uint32_t base, const Vk_Pipeline_Def *def, qboolean use ) {
const Vk_Pipeline_Def *cur_def;
uint32_t index;
for ( index = base; index < vk.pipelines_count; index++ ) {
cur_def = &vk.pipelines[ index ].def;
if ( memcmp( cur_def, def, sizeof( *def ) ) == 0 ) {
goto found;
}
}
index = vk_alloc_pipeline( def );
found:
if ( use )
vk_gen_pipeline( index );
return index;
}
void vk_get_pipeline_def( uint32_t pipeline, Vk_Pipeline_Def *def ) {
if ( pipeline >= vk.pipelines_count ) {
Com_Memset( def, 0, sizeof( *def ) );
} else {
Com_Memcpy( def, &vk.pipelines[ pipeline ].def, sizeof( *def ) );
}
}
static void get_viewport_rect(VkRect2D *r)
{
if ( backEnd.projection2D )
{
r->offset.x = 0;
r->offset.y = 0;
r->extent.width = vk.renderWidth;
r->extent.height = vk.renderHeight;
}
else
{
r->offset.x = backEnd.viewParms.viewportX * vk.renderScaleX;
r->offset.y = vk.renderHeight - (backEnd.viewParms.viewportY + backEnd.viewParms.viewportHeight) * vk.renderScaleY;
r->extent.width = (float)backEnd.viewParms.viewportWidth * vk.renderScaleX;
r->extent.height = (float)backEnd.viewParms.viewportHeight * vk.renderScaleY;
}
}
static void get_viewport(VkViewport *viewport, Vk_Depth_Range depth_range) {
VkRect2D r;
get_viewport_rect( &r );
viewport->x = (float)r.offset.x;
viewport->y = (float)r.offset.y;
viewport->width = (float)r.extent.width;
viewport->height = (float)r.extent.height;
switch ( depth_range ) {
default:
#ifdef USE_REVERSED_DEPTH
//case DEPTH_RANGE_NORMAL:
viewport->minDepth = 0.0f;
viewport->maxDepth = 1.0f;
break;
case DEPTH_RANGE_ZERO:
viewport->minDepth = 1.0f;
viewport->maxDepth = 1.0f;
break;
case DEPTH_RANGE_ONE:
viewport->minDepth = 0.0f;
viewport->maxDepth = 0.0f;
break;
case DEPTH_RANGE_WEAPON:
viewport->minDepth = 0.6f;
viewport->maxDepth = 1.0f;
break;
#else
//case DEPTH_RANGE_NORMAL:
viewport->minDepth = 0.0f;
viewport->maxDepth = 1.0f;
break;
case DEPTH_RANGE_ZERO:
viewport->minDepth = 0.0f;
viewport->maxDepth = 0.0f;
break;
case DEPTH_RANGE_ONE:
viewport->minDepth = 1.0f;
viewport->maxDepth = 1.0f;
break;
case DEPTH_RANGE_WEAPON:
viewport->minDepth = 0.0f;
viewport->maxDepth = 0.3f;
break;
#endif
}
}
static void get_scissor_rect(VkRect2D *r) {
if ( backEnd.viewParms.portalView != PV_NONE )
{
r->offset.x = backEnd.viewParms.scissorX;
r->offset.y = glConfig.vidHeight - backEnd.viewParms.scissorY - backEnd.viewParms.scissorHeight;
r->extent.width = backEnd.viewParms.scissorWidth;
r->extent.height = backEnd.viewParms.scissorHeight;
}
else
{
get_viewport_rect(r);
if (r->offset.x < 0)
r->offset.x = 0;
if (r->offset.y < 0)
r->offset.y = 0;
if (r->offset.x + r->extent.width > glConfig.vidWidth)
r->extent.width = glConfig.vidWidth - r->offset.x;
if (r->offset.y + r->extent.height > glConfig.vidHeight)
r->extent.height = glConfig.vidHeight - r->offset.y;
}
}
static void get_mvp_transform( float *mvp )
{
if ( backEnd.projection2D )
{
float mvp0 = 2.0f / glConfig.vidWidth;
float mvp5 = 2.0f / glConfig.vidHeight;
mvp[0] = mvp0; mvp[1] = 0.0f; mvp[2] = 0.0f; mvp[3] = 0.0f;
mvp[4] = 0.0f; mvp[5] = mvp5; mvp[6] = 0.0f; mvp[7] = 0.0f;
#ifdef USE_REVERSED_DEPTH
mvp[8] = 0.0f; mvp[9] = 0.0f; mvp[10] = 0.0f; mvp[11] = 0.0f;
mvp[12] = -1.0f; mvp[13] = -1.0f; mvp[14] = 1.0f; mvp[15] = 1.0f;
#else
mvp[8] = 0.0f; mvp[9] = 0.0f; mvp[10] = 1.0f; mvp[11] = 0.0f;
mvp[12] = -1.0f; mvp[13] = -1.0f; mvp[14] = 0.0f; mvp[15] = 1.0f;
#endif
}
else
{
const float *p = backEnd.viewParms.projectionMatrix;
float proj[16];
Com_Memcpy( proj, p, 64 );
// update q3's proj matrix (opengl) to vulkan conventions: z - [0, 1] instead of [-1, 1] and invert y direction
proj[5] = -p[5];
//proj[10] = ( p[10] - 1.0f ) / 2.0f;
//proj[14] = p[14] / 2.0f;
myGlMultMatrix( vk_world.modelview_transform, proj, mvp );
}
}
void vk_clear_color( const vec4_t color ) {
VkClearAttachment attachment;
VkClearRect clear_rect[2];
uint32_t rect_count;
if ( !vk.active )
return;
attachment.colorAttachment = 0;
attachment.clearValue.color.float32[0] = color[0];
attachment.clearValue.color.float32[1] = color[1];
attachment.clearValue.color.float32[2] = color[2];
attachment.clearValue.color.float32[3] = color[3];
attachment.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
get_scissor_rect( &clear_rect[0].rect );
clear_rect[0].baseArrayLayer = 0;
clear_rect[0].layerCount = 1;
rect_count = 1;
#ifdef _DEBUG
// Split viewport rectangle into two non-overlapping rectangles.
// It's a HACK to prevent Vulkan validation layer's performance warning:
// "vkCmdClearAttachments() issued on command buffer object XXX prior to any Draw Cmds.
// It is recommended you use RenderPass LOAD_OP_CLEAR on Attachments prior to any Draw."
//
// NOTE: we don't use LOAD_OP_CLEAR for color attachment when we begin renderpass
// since at that point we don't know whether we need collor buffer clear (usually we don't).
{
uint32_t h = clear_rect[0].rect.extent.height / 2;
clear_rect[0].rect.extent.height = h;
clear_rect[1] = clear_rect[0];
clear_rect[1].rect.offset.y = h;
rect_count = 2;
}
#endif
qvkCmdClearAttachments( vk.cmd->command_buffer, 1, &attachment, rect_count, clear_rect );
}
void vk_clear_depth( qboolean clear_stencil ) {
VkClearAttachment attachment;
VkClearRect clear_rect[1];
if ( !vk.active )
return;
if ( vk_world.dirty_depth_attachment == 0 )
return;
attachment.colorAttachment = 0;
#ifdef USE_REVERSED_DEPTH
attachment.clearValue.depthStencil.depth = 0.0f;
#else
attachment.clearValue.depthStencil.depth = 1.0f;
#endif
attachment.clearValue.depthStencil.stencil = 0;
if ( clear_stencil && r_stencilbits->integer ) {
attachment.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
} else {
attachment.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
}
get_scissor_rect( &clear_rect[0].rect );
clear_rect[0].baseArrayLayer = 0;
clear_rect[0].layerCount = 1;
qvkCmdClearAttachments( vk.cmd->command_buffer, 1, &attachment, 1, clear_rect );
}
void vk_update_mvp( const float *m ) {
float push_constants[16]; // mvp transform
//
// Specify push constants.
//
if ( m )
Com_Memcpy( push_constants, m, sizeof( push_constants ) );
else
get_mvp_transform( push_constants );
qvkCmdPushConstants( vk.cmd->command_buffer, vk.pipeline_layout, VK_SHADER_STAGE_VERTEX_BIT, 0, sizeof( push_constants ), push_constants );
vk.stats.push_size += sizeof( push_constants );
}
static VkBuffer shade_bufs[8];
static int bind_base;
static int bind_count;
static void vk_bind_index_attr( int index )
{
if ( bind_base == -1 ) {
bind_base = index;
bind_count = 1;
} else {
bind_count = index - bind_base + 1;
}
}
static void vk_bind_attr( int index, unsigned int item_size, const void *src ) {
const uint32_t offset = PAD( vk.cmd->vertex_buffer_offset, 32 );
const uint32_t size = tess.numVertexes * item_size;
if ( offset + size > vk.geometry_buffer_size ) {
// schedule geometry buffer resize
vk.geometry_buffer_size_new = log2pad( offset + size, 1 );
} else {
vk.cmd->buf_offset[ index ] = offset;
Com_Memcpy( vk.cmd->vertex_buffer_ptr + offset, src, size );
vk.cmd->vertex_buffer_offset = (VkDeviceSize)offset + size;
}
vk_bind_index_attr( index );
}
uint32_t vk_tess_index( uint32_t numIndexes, const void *src ) {
const uint32_t offset = vk.cmd->vertex_buffer_offset;
const uint32_t size = numIndexes * sizeof( tess.indexes[0] );
if ( offset + size > vk.geometry_buffer_size ) {
// schedule geometry buffer resize
vk.geometry_buffer_size_new = log2pad( offset + size, 1 );
return ~0U;
} else {
Com_Memcpy( vk.cmd->vertex_buffer_ptr + offset, src, size );
vk.cmd->vertex_buffer_offset = (VkDeviceSize)offset + size;
return offset;
}
}
void vk_bind_index_buffer( VkBuffer buffer, uint32_t offset )
{
if ( vk.cmd->curr_index_buffer != buffer || vk.cmd->curr_index_offset != offset )
qvkCmdBindIndexBuffer( vk.cmd->command_buffer, buffer, offset, VK_INDEX_TYPE_UINT32 );
vk.cmd->curr_index_buffer = buffer;
vk.cmd->curr_index_offset = offset;
}
void vk_draw_indexed( uint32_t indexCount, uint32_t firstIndex )
{
qvkCmdDrawIndexed( vk.cmd->command_buffer, indexCount, 1, firstIndex, 0, 0 );
}
void vk_bind_index( void )
{
#ifdef USE_VBO
if ( tess.vboIndex ) {
vk.cmd->num_indexes = 0;
//qvkCmdBindIndexBuffer( vk.cmd->command_buffer, vk.vbo.index_buffer, tess.shader->iboOffset, VK_INDEX_TYPE_UINT32 );
return;
}
#endif
vk_bind_index_ext( tess.numIndexes, tess.indexes );
}
void vk_bind_index_ext( const int numIndexes, const uint32_t *indexes )
{
uint32_t offset = vk_tess_index( numIndexes, indexes );
if ( offset != ~0U ) {
vk_bind_index_buffer( vk.cmd->vertex_buffer, offset );
vk.cmd->num_indexes = numIndexes;
} else {
// overflowed
vk.cmd->num_indexes = 0;
}
}
void vk_bind_geometry( uint32_t flags )
{
//unsigned int size;
bind_base = -1;
bind_count = 0;
if ( ( flags & ( TESS_XYZ | TESS_RGBA0 | TESS_ST0 | TESS_ST1 | TESS_ST2 | TESS_NNN | TESS_RGBA1 | TESS_RGBA2 ) ) == 0 )
return;
#ifdef USE_VBO
if ( tess.vboIndex ) {
shade_bufs[0] = shade_bufs[1] = shade_bufs[2] = shade_bufs[3] = shade_bufs[4] = shade_bufs[5] = shade_bufs[6] = shade_bufs[7] = vk.vbo.vertex_buffer;
if ( flags & TESS_XYZ ) { // 0
vk.cmd->vbo_offset[0] = tess.shader->vboOffset + 0;
vk_bind_index_attr( 0 );
}
if ( flags & TESS_RGBA0 ) { // 1
vk.cmd->vbo_offset[1] = tess.shader->stages[ tess.vboStage ]->rgb_offset[0];
vk_bind_index_attr( 1 );
}
if ( flags & TESS_ST0 ) { // 2
vk.cmd->vbo_offset[2] = tess.shader->stages[ tess.vboStage ]->tex_offset[0];
vk_bind_index_attr( 2 );
}
if ( flags & TESS_ST1 ) { // 3
vk.cmd->vbo_offset[3] = tess.shader->stages[ tess.vboStage ]->tex_offset[1];
vk_bind_index_attr( 3 );
}
if ( flags & TESS_ST2 ) { // 4
vk.cmd->vbo_offset[4] = tess.shader->stages[ tess.vboStage ]->tex_offset[2];
vk_bind_index_attr( 4 );
}
if ( flags & TESS_NNN ) { // 5
vk.cmd->vbo_offset[5] = tess.shader->normalOffset;
vk_bind_index_attr( 5 );
}
if ( flags & TESS_RGBA1 ) { // 6
vk.cmd->vbo_offset[6] = tess.shader->stages[ tess.vboStage ]->rgb_offset[1];
vk_bind_index_attr( 6 );
}
if ( flags & TESS_RGBA2 ) { // 7
vk.cmd->vbo_offset[7] = tess.shader->stages[ tess.vboStage ]->rgb_offset[2];
vk_bind_index_attr( 7 );
}
qvkCmdBindVertexBuffers( vk.cmd->command_buffer, bind_base, bind_count, shade_bufs, vk.cmd->vbo_offset + bind_base );
} else
#endif // USE_VBO
{
shade_bufs[0] = shade_bufs[1] = shade_bufs[2] = shade_bufs[3] = shade_bufs[4] = shade_bufs[5] = shade_bufs[6] = shade_bufs[7] = vk.cmd->vertex_buffer;
if ( flags & TESS_XYZ ) {
vk_bind_attr(0, sizeof(tess.xyz[0]), &tess.xyz[0]);
}
if ( flags & TESS_RGBA0 ) {
vk_bind_attr(1, sizeof( color4ub_t ), tess.svars.colors[0][0].rgba);
}
if ( flags & TESS_ST0 ) {
vk_bind_attr(2, sizeof( vec2_t ), tess.svars.texcoordPtr[0]);
}
if ( flags & TESS_ST1 ) {
vk_bind_attr(3, sizeof( vec2_t ), tess.svars.texcoordPtr[1]);
}
if ( flags & TESS_ST2 ) {
vk_bind_attr(4, sizeof( vec2_t ), tess.svars.texcoordPtr[2]);
}
if ( flags & TESS_NNN ) {
vk_bind_attr(5, sizeof(tess.normal[0]), tess.normal);
}
if ( flags & TESS_RGBA1 ) {
vk_bind_attr(6, sizeof( color4ub_t ), tess.svars.colors[1][0].rgba);
}
if ( flags & TESS_RGBA2 ) {
vk_bind_attr(7, sizeof( color4ub_t ), tess.svars.colors[2][0].rgba);
}
qvkCmdBindVertexBuffers( vk.cmd->command_buffer, bind_base, bind_count, shade_bufs, vk.cmd->buf_offset + bind_base );
}
}
void vk_bind_lighting( int stage, int bundle )
{
bind_base = -1;
bind_count = 0;
#ifdef USE_VBO
if ( tess.vboIndex ) {
shade_bufs[0] = shade_bufs[1] = shade_bufs[2] = vk.vbo.vertex_buffer;
vk.cmd->vbo_offset[0] = tess.shader->vboOffset + 0;
vk.cmd->vbo_offset[1] = tess.shader->stages[ stage ]->tex_offset[ bundle ];
vk.cmd->vbo_offset[2] = tess.shader->normalOffset;
qvkCmdBindVertexBuffers( vk.cmd->command_buffer, 0, 3, shade_bufs, vk.cmd->vbo_offset + 0 );
}
else
#endif // USE_VBO
{
shade_bufs[0] = shade_bufs[1] = shade_bufs[2] = vk.cmd->vertex_buffer;
vk_bind_attr( 0, sizeof( tess.xyz[0] ), &tess.xyz[0] );
vk_bind_attr( 1, sizeof( vec2_t ), tess.svars.texcoordPtr[ bundle ] );
vk_bind_attr( 2, sizeof( tess.normal[0] ), tess.normal );
qvkCmdBindVertexBuffers( vk.cmd->command_buffer, bind_base, bind_count, shade_bufs, vk.cmd->buf_offset + bind_base );
}
}
void vk_reset_descriptor( int index )
{
vk.cmd->descriptor_set.current[ index ] = VK_NULL_HANDLE;
}
void vk_update_descriptor( int index, VkDescriptorSet descriptor )
{
if ( vk.cmd->descriptor_set.current[ index ] != descriptor ) {
vk.cmd->descriptor_set.start = ( index < vk.cmd->descriptor_set.start ) ? index : vk.cmd->descriptor_set.start;
vk.cmd->descriptor_set.end = ( index > vk.cmd->descriptor_set.end ) ? index : vk.cmd->descriptor_set.end;
}
vk.cmd->descriptor_set.current[ index ] = descriptor;
}
void vk_update_descriptor_offset( int index, uint32_t offset )
{
vk.cmd->descriptor_set.offset[ index ] = offset;
}
void vk_bind_descriptor_sets( void )
{
uint32_t offsets[2], offset_count;
uint32_t start, end, count;
start = vk.cmd->descriptor_set.start;
if ( start == ~0U )
return;
end = vk.cmd->descriptor_set.end;
offset_count = 0;
if ( start <= 1 ) { // uniform offset or storage offset
offsets[ offset_count++ ] = vk.cmd->descriptor_set.offset[ start ];
}
count = end - start + 1;
qvkCmdBindDescriptorSets( vk.cmd->command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, vk.pipeline_layout, start, count, vk.cmd->descriptor_set.current + start, offset_count, offsets );
vk.cmd->descriptor_set.end = 0;
vk.cmd->descriptor_set.start = ~0U;
}
void vk_bind_pipeline( uint32_t pipeline ) {
VkPipeline vkpipe;
vkpipe = vk_gen_pipeline( pipeline );
if ( vkpipe != vk.cmd->last_pipeline ) {
qvkCmdBindPipeline( vk.cmd->command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, vkpipe );
vk.cmd->last_pipeline = vkpipe;
}
vk_world.dirty_depth_attachment |= ( vk.pipelines[ pipeline ].def.state_bits & GLS_DEPTHMASK_TRUE );
}
void vk_draw_geometry( Vk_Depth_Range depth_range, qboolean indexed ) {
VkRect2D scissor_rect;
VkViewport viewport;
if ( vk.geometry_buffer_size_new ) {
// geometry buffer overflow happened this frame
return;
}
vk_bind_descriptor_sets();
// configure pipeline's dynamic state
if ( vk.cmd->depth_range != depth_range ) {
vk.cmd->depth_range = depth_range;
get_scissor_rect( &scissor_rect );
if ( memcmp( &vk.cmd->scissor_rect, &scissor_rect, sizeof( scissor_rect ) ) != 0 ) {
qvkCmdSetScissor( vk.cmd->command_buffer, 0, 1, &scissor_rect );
vk.cmd->scissor_rect = scissor_rect;
}
get_viewport( &viewport, depth_range );
qvkCmdSetViewport( vk.cmd->command_buffer, 0, 1, &viewport );
}
// issue draw call(s)
#ifdef USE_VBO
if ( tess.vboIndex )
VBO_RenderIBOItems();
else
#endif
if ( indexed ) {
qvkCmdDrawIndexed( vk.cmd->command_buffer, vk.cmd->num_indexes, 1, 0, 0, 0 );
} else {
qvkCmdDraw( vk.cmd->command_buffer, tess.numVertexes, 1, 0, 0 );
}
}
static void vk_begin_render_pass( VkRenderPass renderPass, VkFramebuffer frameBuffer, qboolean clearValues, uint32_t width, uint32_t height )
{
VkRenderPassBeginInfo render_pass_begin_info;
VkClearValue clear_values[3];
// Begin render pass.
render_pass_begin_info.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
render_pass_begin_info.pNext = NULL;
render_pass_begin_info.renderPass = renderPass;
render_pass_begin_info.framebuffer = frameBuffer;
render_pass_begin_info.renderArea.offset.x = 0;
render_pass_begin_info.renderArea.offset.y = 0;
render_pass_begin_info.renderArea.extent.width = width;
render_pass_begin_info.renderArea.extent.height = height;
if ( clearValues ) {
// attachments layout:
// [0] - resolve/color/presentation
// [1] - depth/stencil
// [2] - multisampled color, optional
Com_Memset( clear_values, 0, sizeof( clear_values ) );
#ifndef USE_REVERSED_DEPTH
clear_values[1].depthStencil.depth = 1.0;
#endif
render_pass_begin_info.clearValueCount = vk.msaaActive ? 3 : 2;
render_pass_begin_info.pClearValues = clear_values;
vk_world.dirty_depth_attachment = 0;
} else {
render_pass_begin_info.clearValueCount = 0;
render_pass_begin_info.pClearValues = NULL;
}
qvkCmdBeginRenderPass( vk.cmd->command_buffer, &render_pass_begin_info, VK_SUBPASS_CONTENTS_INLINE );
}
void vk_begin_main_render_pass( void )
{
VkFramebuffer frameBuffer = vk.framebuffers.main[ vk.swapchain_image_index ];
vk.renderPassIndex = RENDER_PASS_MAIN;
vk.renderWidth = glConfig.vidWidth;
vk.renderHeight = glConfig.vidHeight;
//vk.renderScaleX = (float)vk.renderWidth / (float)glConfig.vidWidth;
//vk.renderScaleY = (float)vk.renderHeight / (float)glConfig.vidHeight;
vk.renderScaleX = vk.renderScaleY = 1.0f;
vk_begin_render_pass( vk.render_pass.main, frameBuffer, qtrue, vk.renderWidth, vk.renderHeight );
}
void vk_begin_post_bloom_render_pass( void )
{
VkFramebuffer frameBuffer = vk.framebuffers.main[ vk.swapchain_image_index ];
vk.renderPassIndex = RENDER_PASS_POST_BLOOM;
vk.renderWidth = glConfig.vidWidth;
vk.renderHeight = glConfig.vidHeight;
//vk.renderScaleX = (float)vk.renderWidth / (float)glConfig.vidWidth;
//vk.renderScaleY = (float)vk.renderHeight / (float)glConfig.vidHeight;
vk.renderScaleX = vk.renderScaleY = 1.0f;
vk_begin_render_pass( vk.render_pass.post_bloom, frameBuffer, qfalse, vk.renderWidth, vk.renderHeight );
}
void vk_begin_bloom_extract_render_pass( void )
{
VkFramebuffer frameBuffer = vk.framebuffers.bloom_extract;
//vk.renderPassIndex = RENDER_PASS_BLOOM_EXTRACT; // doesn't matter, we will use dedicated pipelines
vk.renderWidth = gls.captureWidth;
vk.renderHeight = gls.captureHeight;
//vk.renderScaleX = (float)vk.renderWidth / (float)glConfig.vidWidth;
//vk.renderScaleY = (float)vk.renderHeight / (float)glConfig.vidHeight;
vk.renderScaleX = vk.renderScaleY = 1.0f;
vk_begin_render_pass( vk.render_pass.bloom_extract, frameBuffer, qfalse, vk.renderWidth, vk.renderHeight );
}
void vk_begin_blur_render_pass( uint32_t index )
{
VkFramebuffer frameBuffer = vk.framebuffers.blur[ index ];
//vk.renderPassIndex = RENDER_PASS_BLOOM_EXTRACT; // doesn't matter, we will use dedicated pipelines
vk.renderWidth = gls.captureWidth / ( 2 << ( index / 2 ) );
vk.renderHeight = gls.captureHeight / ( 2 << ( index / 2 ) );
//vk.renderScaleX = (float)vk.renderWidth / (float)glConfig.vidWidth;
//vk.renderScaleY = (float)vk.renderHeight / (float)glConfig.vidHeight;
vk.renderScaleX = vk.renderScaleY = 1.0f;
vk_begin_render_pass( vk.render_pass.blur[ index ], frameBuffer, qfalse, vk.renderWidth, vk.renderHeight );
}
static void vk_begin_screenmap_render_pass( void )
{
VkFramebuffer frameBuffer = vk.framebuffers.screenmap;
record_image_layout_transition( vk.cmd->command_buffer, vk.screenMap.color_image, VK_IMAGE_ASPECT_COLOR_BIT,
0, VK_IMAGE_LAYOUT_UNDEFINED, VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL );
vk.renderPassIndex = RENDER_PASS_SCREENMAP;
vk.renderWidth = vk.screenMapWidth;
vk.renderHeight = vk.screenMapHeight;
vk.renderScaleX = (float)vk.renderWidth / (float)glConfig.vidWidth;
vk.renderScaleY = (float)vk.renderHeight / (float)glConfig.vidHeight;
vk_begin_render_pass( vk.render_pass.screenmap, frameBuffer, qtrue, vk.renderWidth, vk.renderHeight );
}
void vk_end_render_pass( void )
{
qvkCmdEndRenderPass( vk.cmd->command_buffer );
// vk.renderPassIndex = RENDER_PASS_MAIN;
}
static qboolean vk_find_screenmap_drawsurfs( void )
{
const void *curCmd = &backEndData->commands.cmds;
const drawBufferCommand_t *db_cmd;
const drawSurfsCommand_t *ds_cmd;
for ( ;; ) {
curCmd = PADP( curCmd, sizeof(void *) );
switch ( *(const int *)curCmd ) {
case RC_DRAW_BUFFER:
db_cmd = (const drawBufferCommand_t *)curCmd;
curCmd = (const void *)(db_cmd + 1);
break;
case RC_DRAW_SURFS:
ds_cmd = (const drawSurfsCommand_t *)curCmd;
return ds_cmd->refdef.needScreenMap;
default:
return qfalse;
}
}
}
#ifndef UINT64_MAX
#define UINT64_MAX 0xFFFFFFFFFFFFFFFFULL
#endif
void vk_begin_frame( void )
{
VkCommandBufferBeginInfo begin_info;
//VkFramebuffer frameBuffer;
VkResult res;
if ( vk.frame_count++ ) // might happen during stereo rendering
return;
if ( vk.cmd->waitForFence ) {
vk.cmd = &vk.tess[ vk.cmd_index++ ];
vk.cmd_index %= NUM_COMMAND_BUFFERS;
if ( !ri.CL_IsMinimized() ) {
res = qvkAcquireNextImageKHR( vk.device, vk.swapchain, 5 * 1000000000LLU, vk.cmd->image_acquired, VK_NULL_HANDLE, &vk.swapchain_image_index );
// when running via RDP: "Application has already acquired the maximum number of images (0x2)"
// probably caused by "device lost" errors
if ( res < 0 ) {
if ( res == VK_ERROR_OUT_OF_DATE_KHR ) {
// swapchain re-creation needed
vk_restart_swapchain( __func__ );
} else {
ri.Error( ERR_FATAL, "vkAcquireNextImageKHR returned %s", vk_result_string( res ) );
}
}
} else {
vk.swapchain_image_index++;
vk.swapchain_image_index %= vk.swapchain_image_count;
}
//vk.cmd = &vk.tess[ vk.cmd_index++ ];
//vk.cmd_index %= NUM_COMMAND_BUFFERS;
vk.cmd->waitForFence = qfalse;
res = qvkWaitForFences( vk.device, 1, &vk.cmd->rendering_finished_fence, VK_FALSE, 1e10 );
if ( res != VK_SUCCESS ) {
if ( res == VK_ERROR_DEVICE_LOST ) {
// silently discard previous command buffer
ri.Printf( PRINT_WARNING, "Vulkan: %s returned %s", "vkWaitForfences", vk_result_string( res ) );
} else {
ri.Error( ERR_FATAL, "Vulkan: %s returned %s", "vkWaitForfences", vk_result_string( res ) );
}
}
VK_CHECK( qvkWaitForFences( vk.device, 1, &vk.cmd->rendering_finished_fence, VK_FALSE, 1e10 ) );
} else {
// current command buffer has been reset due to geometry buffer overflow/update
// so we will reuse it with current swapchain image as well
}
VK_CHECK( qvkResetFences( vk.device, 1, &vk.cmd->rendering_finished_fence ) );
begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
begin_info.pNext = NULL;
begin_info.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
begin_info.pInheritanceInfo = NULL;
VK_CHECK( qvkBeginCommandBuffer( vk.cmd->command_buffer, &begin_info ) );
// Ensure visibility of geometry buffers writes.
//record_buffer_memory_barrier( vk.cmd->command_buffer, vk.cmd->vertex_buffer, vk.cmd->vertex_buffer_offset, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, VK_ACCESS_HOST_WRITE_BIT, VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT );
#if 0
// add explicit layout transition dependency
if ( vk.fboActive ) {
record_image_layout_transition( vk.cmd->command_buffer,
vk.color_image, VK_IMAGE_ASPECT_COLOR_BIT,
VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, VK_IMAGE_LAYOUT_UNDEFINED,
VK_ACCESS_SHADER_READ_BIT, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL );
} else {
record_image_layout_transition( vk.cmd->command_buffer,
vk.swapchain_images[ vk.swapchain_image_index ], VK_IMAGE_ASPECT_COLOR_BIT,
VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, VK_IMAGE_LAYOUT_UNDEFINED,
VK_ACCESS_MEMORY_READ_BIT, VK_IMAGE_LAYOUT_PRESENT_SRC_KHR );
}
#endif
if ( vk.cmd->vertex_buffer_offset > vk.stats.vertex_buffer_max ) {
vk.stats.vertex_buffer_max = vk.cmd->vertex_buffer_offset;
}
if ( vk.stats.push_size > vk.stats.push_size_max ) {
vk.stats.push_size_max = vk.stats.push_size;
}
vk.cmd->last_pipeline = VK_NULL_HANDLE;
backEnd.screenMapDone = qfalse;
if ( vk_find_screenmap_drawsurfs() ) {
vk_begin_screenmap_render_pass();
} else {
vk_begin_main_render_pass();
}
// dynamic vertex buffer layout
vk.cmd->uniform_read_offset = 0;
vk.cmd->vertex_buffer_offset = 0;
Com_Memset( vk.cmd->buf_offset, 0, sizeof( vk.cmd->buf_offset ) );
Com_Memset( vk.cmd->vbo_offset, 0, sizeof( vk.cmd->vbo_offset ) );
vk.cmd->curr_index_buffer = VK_NULL_HANDLE;
vk.cmd->curr_index_offset = 0;
Com_Memset( &vk.cmd->descriptor_set, 0, sizeof( vk.cmd->descriptor_set ) );
vk.cmd->descriptor_set.start = ~0U;
//vk.cmd->descriptor_set.end = 0;
Com_Memset( &vk.cmd->scissor_rect, 0, sizeof( vk.cmd->scissor_rect ) );
vk_update_descriptor( 2, tr.whiteImage->descriptor );
vk_update_descriptor( 3, tr.whiteImage->descriptor );
if ( vk.maxBoundDescriptorSets >= 6 ) {
vk_update_descriptor( 4, tr.whiteImage->descriptor );
}
// other stats
vk.stats.push_size = 0;
}
static void vk_resize_geometry_buffer( void )
{
int i;
vk_end_render_pass();
VK_CHECK( qvkEndCommandBuffer( vk.cmd->command_buffer ) );
qvkResetCommandBuffer( vk.cmd->command_buffer, 0 );
vk_wait_idle();
vk_release_geometry_buffers();
vk_create_geometry_buffers( vk.geometry_buffer_size_new );
vk.geometry_buffer_size_new = 0;
for ( i = 0; i < NUM_COMMAND_BUFFERS; i++ )
vk_update_uniform_descriptor( vk.tess[ i ].uniform_descriptor, vk.tess[ i ].vertex_buffer );
ri.Printf( PRINT_DEVELOPER, "...geometry buffer resized to %iK\n", (int)( vk.geometry_buffer_size / 1024 ) );
}
void vk_end_frame( void )
{
const VkPipelineStageFlags wait_dst_stage_mask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT;
VkPresentInfoKHR present_info;
VkSubmitInfo submit_info;
VkResult res;
if ( vk.frame_count == 0 )
return;
vk.frame_count = 0;
if ( vk.geometry_buffer_size_new )
{
vk_resize_geometry_buffer();
return;
}
if ( vk.fboActive )
{
vk.cmd->last_pipeline = VK_NULL_HANDLE; // do not restore clobbered descriptors in vk_bloom()
if ( r_bloom->integer )
{
vk_bloom();
}
if ( backEnd.screenshotMask && vk.capture.image )
{
vk_end_render_pass();
// render to capture FBO
vk_begin_render_pass( vk.render_pass.capture, vk.framebuffers.capture, qfalse, gls.captureWidth, gls.captureHeight );
qvkCmdBindPipeline( vk.cmd->command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, vk.capture_pipeline );
qvkCmdBindDescriptorSets( vk.cmd->command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, vk.pipeline_layout_post_process, 0, 1, &vk.color_descriptor, 0, NULL );
qvkCmdDraw( vk.cmd->command_buffer, 4, 1, 0, 0 );
}
if ( !ri.CL_IsMinimized() )
{
vk_end_render_pass();
vk.renderWidth = gls.windowWidth;
vk.renderHeight = gls.windowHeight;
vk.renderScaleX = 1.0;
vk.renderScaleY = 1.0;
vk_begin_render_pass( vk.render_pass.gamma, vk.framebuffers.gamma[ vk.swapchain_image_index ], qfalse, vk.renderWidth, vk.renderHeight );
qvkCmdBindPipeline( vk.cmd->command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, vk.gamma_pipeline );
qvkCmdBindDescriptorSets( vk.cmd->command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, vk.pipeline_layout_post_process, 0, 1, &vk.color_descriptor, 0, NULL );
qvkCmdDraw( vk.cmd->command_buffer, 4, 1, 0, 0 );
}
}
vk_end_render_pass();
VK_CHECK( qvkEndCommandBuffer( vk.cmd->command_buffer ) );
submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submit_info.pNext = NULL;
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &vk.cmd->command_buffer;
if ( !ri.CL_IsMinimized() ) {
submit_info.waitSemaphoreCount = 1;
submit_info.pWaitSemaphores = &vk.cmd->image_acquired;
submit_info.pWaitDstStageMask = &wait_dst_stage_mask;
submit_info.signalSemaphoreCount = 1;
submit_info.pSignalSemaphores = &vk.cmd->rendering_finished;
} else {
submit_info.waitSemaphoreCount = 0;
submit_info.pWaitSemaphores = NULL;
submit_info.pWaitDstStageMask = NULL;
submit_info.signalSemaphoreCount = 0;
submit_info.pSignalSemaphores = NULL;
}
VK_CHECK( qvkQueueSubmit( vk.queue, 1, &submit_info, vk.cmd->rendering_finished_fence ) );
vk.cmd->waitForFence = qtrue;
// presentation may take undefined time to complete, we can't measure it in a reliable way
backEnd.pc.msec = ri.Milliseconds() - backEnd.pc.msec;
if ( ri.CL_IsMinimized() )
return;
present_info.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR;
present_info.pNext = NULL;
present_info.waitSemaphoreCount = 1;
present_info.pWaitSemaphores = &vk.cmd->rendering_finished;
present_info.swapchainCount = 1;
present_info.pSwapchains = &vk.swapchain;
present_info.pImageIndices = &vk.swapchain_image_index;
present_info.pResults = NULL;
res = qvkQueuePresentKHR( vk.queue, &present_info );
if ( res < 0 ) {
if ( res == VK_ERROR_DEVICE_LOST ) {
// we can ignore that
ri.Printf( PRINT_DEVELOPER, "vkQueuePresentKHR: device lost\n" );
} else if ( res == VK_ERROR_OUT_OF_DATE_KHR ) {
// swapchain re-creation needed
vk_restart_swapchain( __func__ );
} else {
// or we don't
ri.Error( ERR_FATAL, "vkQueuePresentKHR returned %s", vk_result_string( res ) );
}
}
}
static qboolean is_bgr( VkFormat format ) {
switch ( format ) {
case VK_FORMAT_B8G8R8A8_UNORM:
case VK_FORMAT_B8G8R8A8_SNORM:
case VK_FORMAT_B8G8R8A8_UINT:
case VK_FORMAT_B8G8R8A8_SINT:
case VK_FORMAT_B8G8R8A8_SRGB:
case VK_FORMAT_B4G4R4A4_UNORM_PACK16:
return qtrue;
default:
return qfalse;
}
}
void vk_read_pixels( byte *buffer, uint32_t width, uint32_t height )
{
VkCommandBuffer command_buffer;
VkDeviceMemory memory;
VkMemoryRequirements memory_requirements;
VkMemoryPropertyFlags memory_reqs;
VkMemoryPropertyFlags memory_flags;
VkMemoryAllocateInfo alloc_info;
VkImageSubresource subresource;
VkSubresourceLayout layout;
VkImageCreateInfo desc;
VkImage srcImage;
VkImageLayout srcImageLayout;
VkAccessFlagBits srcImageAccess;
VkImage dstImage;
byte *buffer_ptr;
byte *data;
uint32_t pixel_width;
uint32_t i, n;
qboolean invalidate_ptr;
VK_CHECK( qvkWaitForFences( vk.device, 1, &vk.cmd->rendering_finished_fence, VK_FALSE, 1e12 ) );
if ( vk.fboActive ) {
srcImageAccess = VK_ACCESS_SHADER_READ_BIT;
if ( vk.capture.image ) {
// dedicated capture buffer
srcImageLayout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL;
srcImage = vk.capture.image;
} else {
srcImageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
srcImage = vk.color_image;
}
} else {
srcImageAccess = VK_ACCESS_MEMORY_READ_BIT;
srcImageLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR;
srcImage = vk.swapchain_images[ vk.swapchain_image_index ];
}
Com_Memset( &desc, 0, sizeof( desc ) );
// Create image in host visible memory to serve as a destination for framebuffer pixels.
desc.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
desc.pNext = NULL;
desc.flags = 0;
desc.imageType = VK_IMAGE_TYPE_2D;
desc.format = vk.capture_format;
desc.extent.width = width;
desc.extent.height = height;
desc.extent.depth = 1;
desc.mipLevels = 1;
desc.arrayLayers = 1;
desc.samples = VK_SAMPLE_COUNT_1_BIT;
desc.tiling = VK_IMAGE_TILING_LINEAR;
desc.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT;
desc.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
desc.queueFamilyIndexCount = 0;
desc.pQueueFamilyIndices = NULL;
desc.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
VK_CHECK( qvkCreateImage( vk.device, &desc, NULL, &dstImage ) );
qvkGetImageMemoryRequirements( vk.device, dstImage, &memory_requirements );
alloc_info.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
alloc_info.pNext = NULL;
alloc_info.allocationSize = memory_requirements.size;
// host_cached bit is desirable for fast reads
memory_reqs = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
alloc_info.memoryTypeIndex = find_memory_type2( memory_requirements.memoryTypeBits, memory_reqs, &memory_flags );
if ( alloc_info.memoryTypeIndex == ~0 ) {
// try less explicit flags, without host_coherent
memory_reqs = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
alloc_info.memoryTypeIndex = find_memory_type2( memory_requirements.memoryTypeBits, memory_reqs, &memory_flags );
if ( alloc_info.memoryTypeIndex == ~0U ) {
// slowest case
memory_reqs = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
alloc_info.memoryTypeIndex = find_memory_type2( memory_requirements.memoryTypeBits, memory_reqs, &memory_flags );
if ( alloc_info.memoryTypeIndex == ~0U ) {
ri.Error( ERR_FATAL, "%s(): failed to find matching memory type for image capture", __func__ );
}
}
}
if ( memory_flags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT ) {
invalidate_ptr = qfalse;
} else {
// according to specification - must be performed if host_coherent is not set
invalidate_ptr = qtrue;
}
VK_CHECK(qvkAllocateMemory(vk.device, &alloc_info, NULL, &memory));
VK_CHECK(qvkBindImageMemory(vk.device, dstImage, memory, 0));
command_buffer = begin_command_buffer();
if ( srcImageLayout != VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL ) {
record_image_layout_transition( command_buffer, srcImage,
VK_IMAGE_ASPECT_COLOR_BIT,
srcImageAccess, srcImageLayout,
VK_ACCESS_TRANSFER_READ_BIT, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL );
}
record_image_layout_transition( command_buffer, dstImage,
VK_IMAGE_ASPECT_COLOR_BIT,
0, VK_IMAGE_LAYOUT_UNDEFINED,
VK_ACCESS_TRANSFER_WRITE_BIT, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL );
// end_command_buffer( command_buffer );
// command_buffer = begin_command_buffer();
if ( vk.blitEnabled ) {
VkImageBlit region;
region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
region.srcSubresource.mipLevel = 0;
region.srcSubresource.baseArrayLayer = 0;
region.srcSubresource.layerCount = 1;
region.srcOffsets[0].x = 0;
region.srcOffsets[0].y = 0;
region.srcOffsets[0].z = 0;
region.srcOffsets[1].x = width;
region.srcOffsets[1].y = height;
region.srcOffsets[1].z = 1;
region.dstSubresource = region.srcSubresource;
region.dstOffsets[0] = region.srcOffsets[0];
region.dstOffsets[1] = region.srcOffsets[1];
qvkCmdBlitImage( command_buffer, srcImage, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, dstImage, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &region, VK_FILTER_NEAREST );
} else {
VkImageCopy region;
region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
region.srcSubresource.mipLevel = 0;
region.srcSubresource.baseArrayLayer = 0;
region.srcSubresource.layerCount = 1;
region.srcOffset.x = 0;
region.srcOffset.y = 0;
region.srcOffset.z = 0;
region.dstSubresource = region.srcSubresource;
region.dstOffset = region.srcOffset;
region.extent.width = width;
region.extent.height = height;
region.extent.depth = 1;
qvkCmdCopyImage( command_buffer, srcImage, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, dstImage, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &region );
}
end_command_buffer( command_buffer );
// Copy data from destination image to memory buffer.
subresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
subresource.mipLevel = 0;
subresource.arrayLayer = 0;
qvkGetImageSubresourceLayout( vk.device, dstImage, &subresource, &layout );
VK_CHECK( qvkMapMemory( vk.device, memory, 0, VK_WHOLE_SIZE, 0, (void**)&data ) );
if ( invalidate_ptr )
{
VkMappedMemoryRange range;
range.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
range.pNext = NULL;
range.memory = memory;
range.size = VK_WHOLE_SIZE;
range.offset = 0;
qvkInvalidateMappedMemoryRanges( vk.device, 1, &range );
}
data += layout.offset;
switch ( vk.capture_format ) {
case VK_FORMAT_B4G4R4A4_UNORM_PACK16: pixel_width = 2; break;
case VK_FORMAT_R16G16B16A16_UNORM: pixel_width = 8; break;
default: pixel_width = 4; break;
}
buffer_ptr = buffer + width * (height - 1) * 3;
for ( i = 0; i < height; i++ ) {
switch ( pixel_width ) {
case 2: {
uint16_t *src = (uint16_t*)data;
for ( n = 0; n < width; n++ ) {
buffer_ptr[n*3+0] = ((src[n]>>12)&0xF)<<4;
buffer_ptr[n*3+1] = ((src[n]>>8)&0xF)<<4;
buffer_ptr[n*3+2] = ((src[n]>>4)&0xF)<<4;
}
} break;
case 4: {
for ( n = 0; n < width; n++ ) {
Com_Memcpy( &buffer_ptr[n*3], &data[n*4], 3 );
//buffer_ptr[n*3+0] = data[n*4+0];
//buffer_ptr[n*3+1] = data[n*4+1];
//buffer_ptr[n*3+2] = data[n*4+2];
}
} break;
case 8: {
const uint16_t *src = (uint16_t*)data;
for ( n = 0; n < width; n++ ) {
buffer_ptr[n*3+0] = src[n*4+0]>>8;
buffer_ptr[n*3+1] = src[n*4+1]>>8;
buffer_ptr[n*3+2] = src[n*4+2]>>8;
}
} break;
}
buffer_ptr -= width * 3;
data += layout.rowPitch;
}
if ( is_bgr( vk.capture_format ) ) {
buffer_ptr = buffer;
for ( i = 0; i < width * height; i++ ) {
byte tmp = buffer_ptr[0];
buffer_ptr[0] = buffer_ptr[2];
buffer_ptr[2] = tmp;
buffer_ptr += 3;
}
}
qvkDestroyImage( vk.device, dstImage, NULL );
qvkFreeMemory( vk.device, memory, NULL );
// restore previous layout
if ( srcImage == vk.color_image ) {
command_buffer = begin_command_buffer();
record_image_layout_transition( command_buffer, srcImage,
VK_IMAGE_ASPECT_COLOR_BIT,
VK_ACCESS_TRANSFER_READ_BIT, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
srcImageAccess, srcImageLayout );
end_command_buffer( command_buffer );
}
}
qboolean vk_bloom( void )
{
uint32_t i;
if ( vk.renderPassIndex == RENDER_PASS_SCREENMAP )
{
return qfalse;
}
if ( backEnd.doneBloom || !backEnd.doneSurfaces || !vk.fboActive )
{
return qfalse;
}
vk_end_render_pass(); // end main
// bloom extraction
vk_begin_bloom_extract_render_pass();
qvkCmdBindPipeline( vk.cmd->command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, vk.bloom_extract_pipeline );
qvkCmdBindDescriptorSets( vk.cmd->command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, vk.pipeline_layout_post_process, 0, 1, &vk.color_descriptor, 0, NULL );
qvkCmdDraw( vk.cmd->command_buffer, 4, 1, 0, 0 );
vk_end_render_pass();
for ( i = 0; i < VK_NUM_BLOOM_PASSES*2; i+=2 ) {
// horizontal blur
vk_begin_blur_render_pass( i+0 );
qvkCmdBindPipeline( vk.cmd->command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, vk.blur_pipeline[i+0] );
qvkCmdBindDescriptorSets( vk.cmd->command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, vk.pipeline_layout_post_process, 0, 1, &vk.bloom_image_descriptor[i+0], 0, NULL );
qvkCmdDraw( vk.cmd->command_buffer, 4, 1, 0, 0 );
vk_end_render_pass();
// vectical blur
vk_begin_blur_render_pass( i+1 );
qvkCmdBindPipeline( vk.cmd->command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, vk.blur_pipeline[i+1] );
qvkCmdBindDescriptorSets( vk.cmd->command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, vk.pipeline_layout_post_process, 0, 1, &vk.bloom_image_descriptor[i+1], 0, NULL );
qvkCmdDraw( vk.cmd->command_buffer, 4, 1, 0, 0 );
vk_end_render_pass();
#if 0
// horizontal blur
vk_begin_blur_render_pass( i+0 );
qvkCmdBindPipeline( vk.cmd->command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, vk.blur_pipeline[i+0] );
qvkCmdBindDescriptorSets( vk.cmd->command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, vk.pipeline_layout_post_process, 0, 1, &vk.bloom_image_descriptor[i+2], 0, NULL );
qvkCmdDraw( vk.cmd->command_buffer, 4, 1, 0, 0 );
vk_end_render_pass();
// vectical blur
vk_begin_blur_render_pass( i+1 );
qvkCmdBindPipeline( vk.cmd->command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, vk.blur_pipeline[i+1] );
qvkCmdBindDescriptorSets( vk.cmd->command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, vk.pipeline_layout_post_process, 0, 1, &vk.bloom_image_descriptor[i+1], 0, NULL );
qvkCmdDraw( vk.cmd->command_buffer, 4, 1, 0, 0 );
vk_end_render_pass();
#endif
}
vk_begin_post_bloom_render_pass(); // begin post-bloom
{
VkDescriptorSet dset[VK_NUM_BLOOM_PASSES];
for ( i = 0; i < VK_NUM_BLOOM_PASSES; i++ )
{
dset[i] = vk.bloom_image_descriptor[(i+1)*2];
}
// blend downscaled buffers to main fbo
qvkCmdBindPipeline( vk.cmd->command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, vk.bloom_blend_pipeline );
qvkCmdBindDescriptorSets( vk.cmd->command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, vk.pipeline_layout_blend, 0, ARRAY_LEN(dset), dset, 0, NULL );
qvkCmdDraw( vk.cmd->command_buffer, 4, 1, 0, 0 );
}
// invalidate pipeline state cache
//vk.cmd->last_pipeline = VK_NULL_HANDLE;
if ( vk.cmd->last_pipeline != VK_NULL_HANDLE )
{
// restore last pipeline
qvkCmdBindPipeline( vk.cmd->command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, vk.cmd->last_pipeline );
vk_update_mvp( NULL );
// force depth range and viewport/scissor updates
vk.cmd->depth_range = DEPTH_RANGE_COUNT;
// restore clobbered descriptor sets
for ( i = 0; i < VK_NUM_BLOOM_PASSES; i++ ) {
if ( vk.cmd->descriptor_set.current[i] != VK_NULL_HANDLE ) {
if ( i == 0 || i == 1 )
qvkCmdBindDescriptorSets( vk.cmd->command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, vk.pipeline_layout, i, 1, &vk.cmd->descriptor_set.current[i], 1, &vk.cmd->descriptor_set.offset[i] );
else
qvkCmdBindDescriptorSets( vk.cmd->command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, vk.pipeline_layout, i, 1, &vk.cmd->descriptor_set.current[i], 0, NULL );
}
}
}
backEnd.doneBloom = qtrue;
return qtrue;
}