zdoom-macos-deps/deps/vulkan-headers/include/vulkan/vulkan_beta.h

217 lines
10 KiB
C
Raw Permalink Normal View History

2020-04-06 07:31:49 +00:00
#ifndef VULKAN_BETA_H_
#define VULKAN_BETA_H_ 1
/*
2024-01-12 08:00:22 +00:00
** Copyright 2015-2024 The Khronos Group Inc.
2020-04-06 07:31:49 +00:00
**
2020-08-01 12:43:20 +00:00
** SPDX-License-Identifier: Apache-2.0
2020-04-06 07:31:49 +00:00
*/
/*
** This header is generated from the Khronos Vulkan XML API Registry.
**
*/
#ifdef __cplusplus
extern "C" {
#endif
2023-08-17 06:42:51 +00:00
// VK_KHR_portability_subset is a preprocessor guard. Do not pass it to API calls.
2020-09-30 07:25:45 +00:00
#define VK_KHR_portability_subset 1
#define VK_KHR_PORTABILITY_SUBSET_SPEC_VERSION 1
#define VK_KHR_PORTABILITY_SUBSET_EXTENSION_NAME "VK_KHR_portability_subset"
typedef struct VkPhysicalDevicePortabilitySubsetFeaturesKHR {
VkStructureType sType;
void* pNext;
VkBool32 constantAlphaColorBlendFactors;
VkBool32 events;
VkBool32 imageViewFormatReinterpretation;
VkBool32 imageViewFormatSwizzle;
VkBool32 imageView2DOn3DImage;
VkBool32 multisampleArrayImage;
VkBool32 mutableComparisonSamplers;
VkBool32 pointPolygons;
VkBool32 samplerMipLodBias;
VkBool32 separateStencilMaskRef;
VkBool32 shaderSampleRateInterpolationFunctions;
VkBool32 tessellationIsolines;
VkBool32 tessellationPointMode;
VkBool32 triangleFans;
VkBool32 vertexAttributeAccessBeyondStride;
} VkPhysicalDevicePortabilitySubsetFeaturesKHR;
typedef struct VkPhysicalDevicePortabilitySubsetPropertiesKHR {
VkStructureType sType;
void* pNext;
uint32_t minVertexInputBindingStrideAlignment;
} VkPhysicalDevicePortabilitySubsetPropertiesKHR;
2021-04-28 08:58:26 +00:00
2023-08-17 06:42:51 +00:00
// VK_AMDX_shader_enqueue is a preprocessor guard. Do not pass it to API calls.
#define VK_AMDX_shader_enqueue 1
#define VK_AMDX_SHADER_ENQUEUE_SPEC_VERSION 1
#define VK_AMDX_SHADER_ENQUEUE_EXTENSION_NAME "VK_AMDX_shader_enqueue"
#define VK_SHADER_INDEX_UNUSED_AMDX (~0U)
typedef struct VkPhysicalDeviceShaderEnqueueFeaturesAMDX {
VkStructureType sType;
void* pNext;
VkBool32 shaderEnqueue;
} VkPhysicalDeviceShaderEnqueueFeaturesAMDX;
typedef struct VkPhysicalDeviceShaderEnqueuePropertiesAMDX {
VkStructureType sType;
void* pNext;
uint32_t maxExecutionGraphDepth;
uint32_t maxExecutionGraphShaderOutputNodes;
uint32_t maxExecutionGraphShaderPayloadSize;
uint32_t maxExecutionGraphShaderPayloadCount;
uint32_t executionGraphDispatchAddressAlignment;
} VkPhysicalDeviceShaderEnqueuePropertiesAMDX;
typedef struct VkExecutionGraphPipelineScratchSizeAMDX {
VkStructureType sType;
void* pNext;
VkDeviceSize size;
} VkExecutionGraphPipelineScratchSizeAMDX;
typedef struct VkExecutionGraphPipelineCreateInfoAMDX {
VkStructureType sType;
const void* pNext;
VkPipelineCreateFlags flags;
uint32_t stageCount;
const VkPipelineShaderStageCreateInfo* pStages;
const VkPipelineLibraryCreateInfoKHR* pLibraryInfo;
VkPipelineLayout layout;
VkPipeline basePipelineHandle;
int32_t basePipelineIndex;
} VkExecutionGraphPipelineCreateInfoAMDX;
typedef union VkDeviceOrHostAddressConstAMDX {
VkDeviceAddress deviceAddress;
const void* hostAddress;
} VkDeviceOrHostAddressConstAMDX;
typedef struct VkDispatchGraphInfoAMDX {
uint32_t nodeIndex;
uint32_t payloadCount;
VkDeviceOrHostAddressConstAMDX payloads;
uint64_t payloadStride;
} VkDispatchGraphInfoAMDX;
typedef struct VkDispatchGraphCountInfoAMDX {
uint32_t count;
VkDeviceOrHostAddressConstAMDX infos;
uint64_t stride;
} VkDispatchGraphCountInfoAMDX;
typedef struct VkPipelineShaderStageNodeCreateInfoAMDX {
VkStructureType sType;
const void* pNext;
const char* pName;
uint32_t index;
} VkPipelineShaderStageNodeCreateInfoAMDX;
typedef VkResult (VKAPI_PTR *PFN_vkCreateExecutionGraphPipelinesAMDX)(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkExecutionGraphPipelineCreateInfoAMDX* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines);
typedef VkResult (VKAPI_PTR *PFN_vkGetExecutionGraphPipelineScratchSizeAMDX)(VkDevice device, VkPipeline executionGraph, VkExecutionGraphPipelineScratchSizeAMDX* pSizeInfo);
typedef VkResult (VKAPI_PTR *PFN_vkGetExecutionGraphPipelineNodeIndexAMDX)(VkDevice device, VkPipeline executionGraph, const VkPipelineShaderStageNodeCreateInfoAMDX* pNodeInfo, uint32_t* pNodeIndex);
typedef void (VKAPI_PTR *PFN_vkCmdInitializeGraphScratchMemoryAMDX)(VkCommandBuffer commandBuffer, VkDeviceAddress scratch);
typedef void (VKAPI_PTR *PFN_vkCmdDispatchGraphAMDX)(VkCommandBuffer commandBuffer, VkDeviceAddress scratch, const VkDispatchGraphCountInfoAMDX* pCountInfo);
typedef void (VKAPI_PTR *PFN_vkCmdDispatchGraphIndirectAMDX)(VkCommandBuffer commandBuffer, VkDeviceAddress scratch, const VkDispatchGraphCountInfoAMDX* pCountInfo);
typedef void (VKAPI_PTR *PFN_vkCmdDispatchGraphIndirectCountAMDX)(VkCommandBuffer commandBuffer, VkDeviceAddress scratch, VkDeviceAddress countInfo);
#ifndef VK_NO_PROTOTYPES
VKAPI_ATTR VkResult VKAPI_CALL vkCreateExecutionGraphPipelinesAMDX(
VkDevice device,
VkPipelineCache pipelineCache,
uint32_t createInfoCount,
const VkExecutionGraphPipelineCreateInfoAMDX* pCreateInfos,
const VkAllocationCallbacks* pAllocator,
VkPipeline* pPipelines);
VKAPI_ATTR VkResult VKAPI_CALL vkGetExecutionGraphPipelineScratchSizeAMDX(
VkDevice device,
VkPipeline executionGraph,
VkExecutionGraphPipelineScratchSizeAMDX* pSizeInfo);
VKAPI_ATTR VkResult VKAPI_CALL vkGetExecutionGraphPipelineNodeIndexAMDX(
VkDevice device,
VkPipeline executionGraph,
const VkPipelineShaderStageNodeCreateInfoAMDX* pNodeInfo,
uint32_t* pNodeIndex);
VKAPI_ATTR void VKAPI_CALL vkCmdInitializeGraphScratchMemoryAMDX(
VkCommandBuffer commandBuffer,
VkDeviceAddress scratch);
VKAPI_ATTR void VKAPI_CALL vkCmdDispatchGraphAMDX(
VkCommandBuffer commandBuffer,
VkDeviceAddress scratch,
const VkDispatchGraphCountInfoAMDX* pCountInfo);
VKAPI_ATTR void VKAPI_CALL vkCmdDispatchGraphIndirectAMDX(
VkCommandBuffer commandBuffer,
VkDeviceAddress scratch,
const VkDispatchGraphCountInfoAMDX* pCountInfo);
VKAPI_ATTR void VKAPI_CALL vkCmdDispatchGraphIndirectCountAMDX(
VkCommandBuffer commandBuffer,
VkDeviceAddress scratch,
VkDeviceAddress countInfo);
#endif
2021-11-16 08:17:53 +00:00
2023-05-25 09:06:29 +00:00
2023-08-17 06:42:51 +00:00
// VK_NV_displacement_micromap is a preprocessor guard. Do not pass it to API calls.
2023-05-25 09:06:29 +00:00
#define VK_NV_displacement_micromap 1
2023-08-17 06:42:51 +00:00
#define VK_NV_DISPLACEMENT_MICROMAP_SPEC_VERSION 2
2023-05-25 09:06:29 +00:00
#define VK_NV_DISPLACEMENT_MICROMAP_EXTENSION_NAME "VK_NV_displacement_micromap"
typedef enum VkDisplacementMicromapFormatNV {
VK_DISPLACEMENT_MICROMAP_FORMAT_64_TRIANGLES_64_BYTES_NV = 1,
VK_DISPLACEMENT_MICROMAP_FORMAT_256_TRIANGLES_128_BYTES_NV = 2,
VK_DISPLACEMENT_MICROMAP_FORMAT_1024_TRIANGLES_128_BYTES_NV = 3,
VK_DISPLACEMENT_MICROMAP_FORMAT_MAX_ENUM_NV = 0x7FFFFFFF
} VkDisplacementMicromapFormatNV;
typedef struct VkPhysicalDeviceDisplacementMicromapFeaturesNV {
VkStructureType sType;
void* pNext;
VkBool32 displacementMicromap;
} VkPhysicalDeviceDisplacementMicromapFeaturesNV;
typedef struct VkPhysicalDeviceDisplacementMicromapPropertiesNV {
VkStructureType sType;
void* pNext;
uint32_t maxDisplacementMicromapSubdivisionLevel;
} VkPhysicalDeviceDisplacementMicromapPropertiesNV;
typedef struct VkAccelerationStructureTrianglesDisplacementMicromapNV {
VkStructureType sType;
void* pNext;
VkFormat displacementBiasAndScaleFormat;
VkFormat displacementVectorFormat;
VkDeviceOrHostAddressConstKHR displacementBiasAndScaleBuffer;
VkDeviceSize displacementBiasAndScaleStride;
VkDeviceOrHostAddressConstKHR displacementVectorBuffer;
VkDeviceSize displacementVectorStride;
VkDeviceOrHostAddressConstKHR displacedMicromapPrimitiveFlags;
VkDeviceSize displacedMicromapPrimitiveFlagsStride;
VkIndexType indexType;
VkDeviceOrHostAddressConstKHR indexBuffer;
VkDeviceSize indexStride;
uint32_t baseTriangle;
uint32_t usageCountsCount;
const VkMicromapUsageEXT* pUsageCounts;
const VkMicromapUsageEXT* const* ppUsageCounts;
VkMicromapEXT micromap;
} VkAccelerationStructureTrianglesDisplacementMicromapNV;
2020-04-06 07:31:49 +00:00
#ifdef __cplusplus
}
#endif
#endif