update moltenvk to 1.1.1

This commit is contained in:
alexey.lysiuk 2020-12-10 13:43:35 +02:00
parent ac60a12ace
commit 9b49dc8f1d
7 changed files with 12577 additions and 10126 deletions

View file

@ -292,7 +292,7 @@ VkExtent3D mvkMipmapBaseSizeFromLevelSize3D(VkExtent3D levelSize, uint32_t level
*/
MTLSamplerAddressMode mvkMTLSamplerAddressModeFromVkSamplerAddressMode(VkSamplerAddressMode vkMode);
#ifdef __MAC_OS_X_VERSION_MAX_ALLOWED
#if MVK_MACOS_OR_IOS
/**
* Returns the Metal MTLSamplerBorderColor corresponding to the specified Vulkan VkBorderColor,
* or returns MTLSamplerBorderColorTransparentBlack if no corresponding MTLSamplerBorderColor exists.
@ -426,6 +426,14 @@ static inline VkExtent2D mvkVkExtent2DFromCGSize(CGSize cgSize) {
return vkExt;
}
/** Returns a CGSize that corresponds to the specified VkExtent2D. */
static inline CGSize mvkCGSizeFromVkExtent2D(VkExtent2D vkExtent) {
CGSize cgSize;
cgSize.width = vkExtent.width;
cgSize.height = vkExtent.height;
return cgSize;
}
/** Returns a Metal MTLOrigin constructed from a VkOffset3D. */
static inline MTLOrigin mvkMTLOriginFromVkOffset3D(VkOffset3D vkOffset) {
return MTLOriginMake(vkOffset.x, vkOffset.y, vkOffset.z);

View file

@ -50,12 +50,12 @@ typedef unsigned long MTLLanguageVersion;
*/
#define MVK_VERSION_MAJOR 1
#define MVK_VERSION_MINOR 1
#define MVK_VERSION_PATCH 0
#define MVK_VERSION_PATCH 1
#define MVK_MAKE_VERSION(major, minor, patch) (((major) * 10000) + ((minor) * 100) + (patch))
#define MVK_VERSION MVK_MAKE_VERSION(MVK_VERSION_MAJOR, MVK_VERSION_MINOR, MVK_VERSION_PATCH)
#define VK_MVK_MOLTENVK_SPEC_VERSION 28
#define VK_MVK_MOLTENVK_SPEC_VERSION 29
#define VK_MVK_MOLTENVK_EXTENSION_NAME "VK_MVK_moltenvk"
/**
@ -120,12 +120,12 @@ typedef unsigned long MTLLanguageVersion;
* 4. Setting the MVK_ALLOW_METAL_FENCES or MVK_ALLOW_METAL_EVENTS runtime environment variable
* or MoltenVK compile-time build setting to 1 will cause MoltenVK to use MTLFence or MTLEvent,
* respectively, if it is available on the device, for VkSemaphore synchronization behaviour.
* If both variables are set, MVK_ALLOW_METAL_FENCES takes priority over MVK_ALLOW_METAL_EVENTS.
* If both variables are set, MVK_ALLOW_METAL_EVENTS takes priority over MVK_ALLOW_METAL_FENCES.
* If both are disabled, or if MTLFence or MTLEvent is not available on the device, MoltenVK
* will use CPU synchronization to control VkSemaphore synchronization behaviour.
* By default, MVK_ALLOW_METAL_FENCES is enabled and MVK_ALLOW_METAL_EVENTS is disabled,
* meaning MoltenVK will use MTLFences, if they are available, to control VkSemaphore
* synchronization behaviour, by default.
* By default, both MVK_ALLOW_METAL_FENCES and MVK_ALLOW_METAL_EVENTS are enabled, meaning
* MoltenVK will preferentially use MTLEvents if they are available, followed by MTLFences
* if they are available, to control VkSemaphore synchronization behaviour, by default.
*
* 5. The MVK_CONFIG_AUTO_GPU_CAPTURE_SCOPE runtime environment variable or MoltenVK compile-time
* build setting controls whether Metal should run an automatic GPU capture without the user
@ -271,11 +271,16 @@ typedef struct {
* command buffer. Depending on the number of command buffers that you use, you may also need to
* change the value of the maxActiveMetalCommandBuffersPerQueue setting.
*
* In addition, if this feature is enabled, be aware that if you have recorded commands to a
* Vulkan command buffer, and then choose to reset that command buffer instead of submitting it,
* the corresponding prefilled Metal command buffer will still be submitted. This is because Metal
* command buffers do not support the concept of being reset after being filled. Depending on when
* and how often you do this, it may cause unexpected visual artifacts and unnecessary GPU load.
* If this feature is enabled, be aware that if you have recorded commands to a Vulkan command buffer,
* and then choose to reset that command buffer instead of submitting it, the corresponding prefilled
* Metal command buffer will still be submitted. This is because Metal command buffers do not support
* the concept of being reset after being filled. Depending on when and how often you do this,
* it may cause unexpected visual artifacts and unnecessary GPU load.
*
* This feature is incompatible with updating descriptors after binding. If any of the
* *UpdateAfterBind feature flags of VkPhysicalDeviceDescriptorIndexingFeaturesEXT or
* VkPhysicalDeviceInlineUniformBlockFeaturesEXT have been enabled, the value of this
* setting will be ignored and treated as if it is false.
*
* The value of this parameter may be changed at any time during application runtime,
* and the changed value will immediately effect subsequent MoltenVK behaviour.
@ -611,7 +616,7 @@ typedef struct {
VkBool32 placementHeaps; /**< If true, MTLHeap objects support placement of resources. */
VkDeviceSize pushConstantSizeAlignment; /**< The alignment used internally when allocating memory for push constants. Must be PoT. */
uint32_t maxTextureLayers; /**< The maximum number of layers in an array texture. */
uint32_t subgroupSize; /**< The number of threads in a SIMD-group. */
uint32_t maxSubgroupSize; /**< The maximum number of threads in a SIMD-group. */
VkDeviceSize vertexStrideAlignment; /**< The alignment used for the stride of vertex attribute bindings. */
VkBool32 indirectTessellationDrawing; /**< If true, tessellation draw calls support parameters held in a GPU buffer. */
VkBool32 nonUniformThreadgroups; /**< If true, the device supports arbitrary-sized grids in compute workloads. */
@ -621,6 +626,15 @@ typedef struct {
VkBool32 depthResolve; /**< If true, resolving depth textures with filters other than Sample0 is supported. */
VkBool32 stencilResolve; /**< If true, resolving stencil textures with filters other than Sample0 is supported. */
uint32_t maxPerStageDynamicMTLBufferCount; /**< The maximum number of inline buffers that can be set on a command buffer. */
uint32_t maxPerStageStorageTextureCount; /**< The total number of per-stage Metal textures with read-write access available for writing to from a shader. */
VkBool32 astcHDRTextures; /**< If true, ASTC HDR pixel formats are supported. */
VkBool32 renderLinearTextures; /**< If true, linear textures are renderable. */
VkBool32 pullModelInterpolation; /**< If true, explicit interpolation functions are supported. */
VkBool32 samplerMirrorClampToEdge; /**< If true, the mirrored clamp to edge address mode is supported in samplers. */
VkBool32 quadPermute; /**< If true, quadgroup permutation functions (vote, ballot, shuffle) are supported in shaders. */
VkBool32 simdPermute; /**< If true, SIMD-group permutation functions (vote, ballot, shuffle) are supported in shaders. */
VkBool32 simdReduction; /**< If true, SIMD-group reduction functions (arithmetic) are supported in shaders. */
uint32_t minSubgroupSize; /**< The minimum number of threads in a SIMD-group. */
} MVKPhysicalDeviceMetalFeatures;
/** MoltenVK performance of a particular type of activity. */

View file

@ -121,6 +121,7 @@ typedef enum {
VK_ICD_WSI_PLATFORM_METAL,
VK_ICD_WSI_PLATFORM_DIRECTFB,
VK_ICD_WSI_PLATFORM_VI,
VK_ICD_WSI_PLATFORM_GGP,
} VkIcdWsiPlatform;
typedef struct {
@ -196,6 +197,13 @@ typedef struct {
} VkIcdSurfaceIOS;
#endif // VK_USE_PLATFORM_IOS_MVK
#ifdef VK_USE_PLATFORM_GGP
typedef struct {
VkIcdSurfaceBase base;
GgpStreamDescriptor streamDescriptor;
} VkIcdSurfaceGgp;
#endif // VK_USE_PLATFORM_GGP
typedef struct {
VkIcdSurfaceBase base;
VkDisplayModeKHR displayMode;

File diff suppressed because it is too large Load diff

View file

@ -49,409 +49,6 @@ typedef struct VkPhysicalDevicePortabilitySubsetPropertiesKHR {
} VkPhysicalDevicePortabilitySubsetPropertiesKHR;
#define VK_KHR_deferred_host_operations 1
VK_DEFINE_NON_DISPATCHABLE_HANDLE(VkDeferredOperationKHR)
#define VK_KHR_DEFERRED_HOST_OPERATIONS_SPEC_VERSION 3
#define VK_KHR_DEFERRED_HOST_OPERATIONS_EXTENSION_NAME "VK_KHR_deferred_host_operations"
typedef struct VkDeferredOperationInfoKHR {
VkStructureType sType;
const void* pNext;
VkDeferredOperationKHR operationHandle;
} VkDeferredOperationInfoKHR;
typedef VkResult (VKAPI_PTR *PFN_vkCreateDeferredOperationKHR)(VkDevice device, const VkAllocationCallbacks* pAllocator, VkDeferredOperationKHR* pDeferredOperation);
typedef void (VKAPI_PTR *PFN_vkDestroyDeferredOperationKHR)(VkDevice device, VkDeferredOperationKHR operation, const VkAllocationCallbacks* pAllocator);
typedef uint32_t (VKAPI_PTR *PFN_vkGetDeferredOperationMaxConcurrencyKHR)(VkDevice device, VkDeferredOperationKHR operation);
typedef VkResult (VKAPI_PTR *PFN_vkGetDeferredOperationResultKHR)(VkDevice device, VkDeferredOperationKHR operation);
typedef VkResult (VKAPI_PTR *PFN_vkDeferredOperationJoinKHR)(VkDevice device, VkDeferredOperationKHR operation);
#ifndef VK_NO_PROTOTYPES
VKAPI_ATTR VkResult VKAPI_CALL vkCreateDeferredOperationKHR(
VkDevice device,
const VkAllocationCallbacks* pAllocator,
VkDeferredOperationKHR* pDeferredOperation);
VKAPI_ATTR void VKAPI_CALL vkDestroyDeferredOperationKHR(
VkDevice device,
VkDeferredOperationKHR operation,
const VkAllocationCallbacks* pAllocator);
VKAPI_ATTR uint32_t VKAPI_CALL vkGetDeferredOperationMaxConcurrencyKHR(
VkDevice device,
VkDeferredOperationKHR operation);
VKAPI_ATTR VkResult VKAPI_CALL vkGetDeferredOperationResultKHR(
VkDevice device,
VkDeferredOperationKHR operation);
VKAPI_ATTR VkResult VKAPI_CALL vkDeferredOperationJoinKHR(
VkDevice device,
VkDeferredOperationKHR operation);
#endif
#define VK_KHR_pipeline_library 1
#define VK_KHR_PIPELINE_LIBRARY_SPEC_VERSION 1
#define VK_KHR_PIPELINE_LIBRARY_EXTENSION_NAME "VK_KHR_pipeline_library"
typedef struct VkPipelineLibraryCreateInfoKHR {
VkStructureType sType;
const void* pNext;
uint32_t libraryCount;
const VkPipeline* pLibraries;
} VkPipelineLibraryCreateInfoKHR;
#define VK_KHR_ray_tracing 1
#define VK_KHR_RAY_TRACING_SPEC_VERSION 8
#define VK_KHR_RAY_TRACING_EXTENSION_NAME "VK_KHR_ray_tracing"
typedef enum VkAccelerationStructureBuildTypeKHR {
VK_ACCELERATION_STRUCTURE_BUILD_TYPE_HOST_KHR = 0,
VK_ACCELERATION_STRUCTURE_BUILD_TYPE_DEVICE_KHR = 1,
VK_ACCELERATION_STRUCTURE_BUILD_TYPE_HOST_OR_DEVICE_KHR = 2,
VK_ACCELERATION_STRUCTURE_BUILD_TYPE_MAX_ENUM_KHR = 0x7FFFFFFF
} VkAccelerationStructureBuildTypeKHR;
typedef union VkDeviceOrHostAddressKHR {
VkDeviceAddress deviceAddress;
void* hostAddress;
} VkDeviceOrHostAddressKHR;
typedef union VkDeviceOrHostAddressConstKHR {
VkDeviceAddress deviceAddress;
const void* hostAddress;
} VkDeviceOrHostAddressConstKHR;
typedef struct VkAccelerationStructureBuildOffsetInfoKHR {
uint32_t primitiveCount;
uint32_t primitiveOffset;
uint32_t firstVertex;
uint32_t transformOffset;
} VkAccelerationStructureBuildOffsetInfoKHR;
typedef struct VkRayTracingShaderGroupCreateInfoKHR {
VkStructureType sType;
const void* pNext;
VkRayTracingShaderGroupTypeKHR type;
uint32_t generalShader;
uint32_t closestHitShader;
uint32_t anyHitShader;
uint32_t intersectionShader;
const void* pShaderGroupCaptureReplayHandle;
} VkRayTracingShaderGroupCreateInfoKHR;
typedef struct VkRayTracingPipelineInterfaceCreateInfoKHR {
VkStructureType sType;
const void* pNext;
uint32_t maxPayloadSize;
uint32_t maxAttributeSize;
uint32_t maxCallableSize;
} VkRayTracingPipelineInterfaceCreateInfoKHR;
typedef struct VkRayTracingPipelineCreateInfoKHR {
VkStructureType sType;
const void* pNext;
VkPipelineCreateFlags flags;
uint32_t stageCount;
const VkPipelineShaderStageCreateInfo* pStages;
uint32_t groupCount;
const VkRayTracingShaderGroupCreateInfoKHR* pGroups;
uint32_t maxRecursionDepth;
VkPipelineLibraryCreateInfoKHR libraries;
const VkRayTracingPipelineInterfaceCreateInfoKHR* pLibraryInterface;
VkPipelineLayout layout;
VkPipeline basePipelineHandle;
int32_t basePipelineIndex;
} VkRayTracingPipelineCreateInfoKHR;
typedef struct VkAccelerationStructureGeometryTrianglesDataKHR {
VkStructureType sType;
const void* pNext;
VkFormat vertexFormat;
VkDeviceOrHostAddressConstKHR vertexData;
VkDeviceSize vertexStride;
VkIndexType indexType;
VkDeviceOrHostAddressConstKHR indexData;
VkDeviceOrHostAddressConstKHR transformData;
} VkAccelerationStructureGeometryTrianglesDataKHR;
typedef struct VkAccelerationStructureGeometryAabbsDataKHR {
VkStructureType sType;
const void* pNext;
VkDeviceOrHostAddressConstKHR data;
VkDeviceSize stride;
} VkAccelerationStructureGeometryAabbsDataKHR;
typedef struct VkAccelerationStructureGeometryInstancesDataKHR {
VkStructureType sType;
const void* pNext;
VkBool32 arrayOfPointers;
VkDeviceOrHostAddressConstKHR data;
} VkAccelerationStructureGeometryInstancesDataKHR;
typedef union VkAccelerationStructureGeometryDataKHR {
VkAccelerationStructureGeometryTrianglesDataKHR triangles;
VkAccelerationStructureGeometryAabbsDataKHR aabbs;
VkAccelerationStructureGeometryInstancesDataKHR instances;
} VkAccelerationStructureGeometryDataKHR;
typedef struct VkAccelerationStructureGeometryKHR {
VkStructureType sType;
const void* pNext;
VkGeometryTypeKHR geometryType;
VkAccelerationStructureGeometryDataKHR geometry;
VkGeometryFlagsKHR flags;
} VkAccelerationStructureGeometryKHR;
typedef struct VkAccelerationStructureBuildGeometryInfoKHR {
VkStructureType sType;
const void* pNext;
VkAccelerationStructureTypeKHR type;
VkBuildAccelerationStructureFlagsKHR flags;
VkBool32 update;
VkAccelerationStructureKHR srcAccelerationStructure;
VkAccelerationStructureKHR dstAccelerationStructure;
VkBool32 geometryArrayOfPointers;
uint32_t geometryCount;
const VkAccelerationStructureGeometryKHR* const* ppGeometries;
VkDeviceOrHostAddressKHR scratchData;
} VkAccelerationStructureBuildGeometryInfoKHR;
typedef struct VkAccelerationStructureCreateGeometryTypeInfoKHR {
VkStructureType sType;
const void* pNext;
VkGeometryTypeKHR geometryType;
uint32_t maxPrimitiveCount;
VkIndexType indexType;
uint32_t maxVertexCount;
VkFormat vertexFormat;
VkBool32 allowsTransforms;
} VkAccelerationStructureCreateGeometryTypeInfoKHR;
typedef struct VkAccelerationStructureCreateInfoKHR {
VkStructureType sType;
const void* pNext;
VkDeviceSize compactedSize;
VkAccelerationStructureTypeKHR type;
VkBuildAccelerationStructureFlagsKHR flags;
uint32_t maxGeometryCount;
const VkAccelerationStructureCreateGeometryTypeInfoKHR* pGeometryInfos;
VkDeviceAddress deviceAddress;
} VkAccelerationStructureCreateInfoKHR;
typedef struct VkAccelerationStructureMemoryRequirementsInfoKHR {
VkStructureType sType;
const void* pNext;
VkAccelerationStructureMemoryRequirementsTypeKHR type;
VkAccelerationStructureBuildTypeKHR buildType;
VkAccelerationStructureKHR accelerationStructure;
} VkAccelerationStructureMemoryRequirementsInfoKHR;
typedef struct VkPhysicalDeviceRayTracingFeaturesKHR {
VkStructureType sType;
void* pNext;
VkBool32 rayTracing;
VkBool32 rayTracingShaderGroupHandleCaptureReplay;
VkBool32 rayTracingShaderGroupHandleCaptureReplayMixed;
VkBool32 rayTracingAccelerationStructureCaptureReplay;
VkBool32 rayTracingIndirectTraceRays;
VkBool32 rayTracingIndirectAccelerationStructureBuild;
VkBool32 rayTracingHostAccelerationStructureCommands;
VkBool32 rayQuery;
VkBool32 rayTracingPrimitiveCulling;
} VkPhysicalDeviceRayTracingFeaturesKHR;
typedef struct VkPhysicalDeviceRayTracingPropertiesKHR {
VkStructureType sType;
void* pNext;
uint32_t shaderGroupHandleSize;
uint32_t maxRecursionDepth;
uint32_t maxShaderGroupStride;
uint32_t shaderGroupBaseAlignment;
uint64_t maxGeometryCount;
uint64_t maxInstanceCount;
uint64_t maxPrimitiveCount;
uint32_t maxDescriptorSetAccelerationStructures;
uint32_t shaderGroupHandleCaptureReplaySize;
} VkPhysicalDeviceRayTracingPropertiesKHR;
typedef struct VkAccelerationStructureDeviceAddressInfoKHR {
VkStructureType sType;
const void* pNext;
VkAccelerationStructureKHR accelerationStructure;
} VkAccelerationStructureDeviceAddressInfoKHR;
typedef struct VkAccelerationStructureVersionKHR {
VkStructureType sType;
const void* pNext;
const uint8_t* versionData;
} VkAccelerationStructureVersionKHR;
typedef struct VkStridedBufferRegionKHR {
VkBuffer buffer;
VkDeviceSize offset;
VkDeviceSize stride;
VkDeviceSize size;
} VkStridedBufferRegionKHR;
typedef struct VkTraceRaysIndirectCommandKHR {
uint32_t width;
uint32_t height;
uint32_t depth;
} VkTraceRaysIndirectCommandKHR;
typedef struct VkCopyAccelerationStructureToMemoryInfoKHR {
VkStructureType sType;
const void* pNext;
VkAccelerationStructureKHR src;
VkDeviceOrHostAddressKHR dst;
VkCopyAccelerationStructureModeKHR mode;
} VkCopyAccelerationStructureToMemoryInfoKHR;
typedef struct VkCopyMemoryToAccelerationStructureInfoKHR {
VkStructureType sType;
const void* pNext;
VkDeviceOrHostAddressConstKHR src;
VkAccelerationStructureKHR dst;
VkCopyAccelerationStructureModeKHR mode;
} VkCopyMemoryToAccelerationStructureInfoKHR;
typedef struct VkCopyAccelerationStructureInfoKHR {
VkStructureType sType;
const void* pNext;
VkAccelerationStructureKHR src;
VkAccelerationStructureKHR dst;
VkCopyAccelerationStructureModeKHR mode;
} VkCopyAccelerationStructureInfoKHR;
typedef VkResult (VKAPI_PTR *PFN_vkCreateAccelerationStructureKHR)(VkDevice device, const VkAccelerationStructureCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkAccelerationStructureKHR* pAccelerationStructure);
typedef void (VKAPI_PTR *PFN_vkGetAccelerationStructureMemoryRequirementsKHR)(VkDevice device, const VkAccelerationStructureMemoryRequirementsInfoKHR* pInfo, VkMemoryRequirements2* pMemoryRequirements);
typedef void (VKAPI_PTR *PFN_vkCmdBuildAccelerationStructureKHR)(VkCommandBuffer commandBuffer, uint32_t infoCount, const VkAccelerationStructureBuildGeometryInfoKHR* pInfos, const VkAccelerationStructureBuildOffsetInfoKHR* const* ppOffsetInfos);
typedef void (VKAPI_PTR *PFN_vkCmdBuildAccelerationStructureIndirectKHR)(VkCommandBuffer commandBuffer, const VkAccelerationStructureBuildGeometryInfoKHR* pInfo, VkBuffer indirectBuffer, VkDeviceSize indirectOffset, uint32_t indirectStride);
typedef VkResult (VKAPI_PTR *PFN_vkBuildAccelerationStructureKHR)(VkDevice device, uint32_t infoCount, const VkAccelerationStructureBuildGeometryInfoKHR* pInfos, const VkAccelerationStructureBuildOffsetInfoKHR* const* ppOffsetInfos);
typedef VkResult (VKAPI_PTR *PFN_vkCopyAccelerationStructureKHR)(VkDevice device, const VkCopyAccelerationStructureInfoKHR* pInfo);
typedef VkResult (VKAPI_PTR *PFN_vkCopyAccelerationStructureToMemoryKHR)(VkDevice device, const VkCopyAccelerationStructureToMemoryInfoKHR* pInfo);
typedef VkResult (VKAPI_PTR *PFN_vkCopyMemoryToAccelerationStructureKHR)(VkDevice device, const VkCopyMemoryToAccelerationStructureInfoKHR* pInfo);
typedef VkResult (VKAPI_PTR *PFN_vkWriteAccelerationStructuresPropertiesKHR)(VkDevice device, uint32_t accelerationStructureCount, const VkAccelerationStructureKHR* pAccelerationStructures, VkQueryType queryType, size_t dataSize, void* pData, size_t stride);
typedef void (VKAPI_PTR *PFN_vkCmdCopyAccelerationStructureKHR)(VkCommandBuffer commandBuffer, const VkCopyAccelerationStructureInfoKHR* pInfo);
typedef void (VKAPI_PTR *PFN_vkCmdCopyAccelerationStructureToMemoryKHR)(VkCommandBuffer commandBuffer, const VkCopyAccelerationStructureToMemoryInfoKHR* pInfo);
typedef void (VKAPI_PTR *PFN_vkCmdCopyMemoryToAccelerationStructureKHR)(VkCommandBuffer commandBuffer, const VkCopyMemoryToAccelerationStructureInfoKHR* pInfo);
typedef void (VKAPI_PTR *PFN_vkCmdTraceRaysKHR)(VkCommandBuffer commandBuffer, const VkStridedBufferRegionKHR* pRaygenShaderBindingTable, const VkStridedBufferRegionKHR* pMissShaderBindingTable, const VkStridedBufferRegionKHR* pHitShaderBindingTable, const VkStridedBufferRegionKHR* pCallableShaderBindingTable, uint32_t width, uint32_t height, uint32_t depth);
typedef VkResult (VKAPI_PTR *PFN_vkCreateRayTracingPipelinesKHR)(VkDevice device, VkPipelineCache pipelineCache, uint32_t createInfoCount, const VkRayTracingPipelineCreateInfoKHR* pCreateInfos, const VkAllocationCallbacks* pAllocator, VkPipeline* pPipelines);
typedef VkDeviceAddress (VKAPI_PTR *PFN_vkGetAccelerationStructureDeviceAddressKHR)(VkDevice device, const VkAccelerationStructureDeviceAddressInfoKHR* pInfo);
typedef VkResult (VKAPI_PTR *PFN_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR)(VkDevice device, VkPipeline pipeline, uint32_t firstGroup, uint32_t groupCount, size_t dataSize, void* pData);
typedef void (VKAPI_PTR *PFN_vkCmdTraceRaysIndirectKHR)(VkCommandBuffer commandBuffer, const VkStridedBufferRegionKHR* pRaygenShaderBindingTable, const VkStridedBufferRegionKHR* pMissShaderBindingTable, const VkStridedBufferRegionKHR* pHitShaderBindingTable, const VkStridedBufferRegionKHR* pCallableShaderBindingTable, VkBuffer buffer, VkDeviceSize offset);
typedef VkResult (VKAPI_PTR *PFN_vkGetDeviceAccelerationStructureCompatibilityKHR)(VkDevice device, const VkAccelerationStructureVersionKHR* version);
#ifndef VK_NO_PROTOTYPES
VKAPI_ATTR VkResult VKAPI_CALL vkCreateAccelerationStructureKHR(
VkDevice device,
const VkAccelerationStructureCreateInfoKHR* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkAccelerationStructureKHR* pAccelerationStructure);
VKAPI_ATTR void VKAPI_CALL vkGetAccelerationStructureMemoryRequirementsKHR(
VkDevice device,
const VkAccelerationStructureMemoryRequirementsInfoKHR* pInfo,
VkMemoryRequirements2* pMemoryRequirements);
VKAPI_ATTR void VKAPI_CALL vkCmdBuildAccelerationStructureKHR(
VkCommandBuffer commandBuffer,
uint32_t infoCount,
const VkAccelerationStructureBuildGeometryInfoKHR* pInfos,
const VkAccelerationStructureBuildOffsetInfoKHR* const* ppOffsetInfos);
VKAPI_ATTR void VKAPI_CALL vkCmdBuildAccelerationStructureIndirectKHR(
VkCommandBuffer commandBuffer,
const VkAccelerationStructureBuildGeometryInfoKHR* pInfo,
VkBuffer indirectBuffer,
VkDeviceSize indirectOffset,
uint32_t indirectStride);
VKAPI_ATTR VkResult VKAPI_CALL vkBuildAccelerationStructureKHR(
VkDevice device,
uint32_t infoCount,
const VkAccelerationStructureBuildGeometryInfoKHR* pInfos,
const VkAccelerationStructureBuildOffsetInfoKHR* const* ppOffsetInfos);
VKAPI_ATTR VkResult VKAPI_CALL vkCopyAccelerationStructureKHR(
VkDevice device,
const VkCopyAccelerationStructureInfoKHR* pInfo);
VKAPI_ATTR VkResult VKAPI_CALL vkCopyAccelerationStructureToMemoryKHR(
VkDevice device,
const VkCopyAccelerationStructureToMemoryInfoKHR* pInfo);
VKAPI_ATTR VkResult VKAPI_CALL vkCopyMemoryToAccelerationStructureKHR(
VkDevice device,
const VkCopyMemoryToAccelerationStructureInfoKHR* pInfo);
VKAPI_ATTR VkResult VKAPI_CALL vkWriteAccelerationStructuresPropertiesKHR(
VkDevice device,
uint32_t accelerationStructureCount,
const VkAccelerationStructureKHR* pAccelerationStructures,
VkQueryType queryType,
size_t dataSize,
void* pData,
size_t stride);
VKAPI_ATTR void VKAPI_CALL vkCmdCopyAccelerationStructureKHR(
VkCommandBuffer commandBuffer,
const VkCopyAccelerationStructureInfoKHR* pInfo);
VKAPI_ATTR void VKAPI_CALL vkCmdCopyAccelerationStructureToMemoryKHR(
VkCommandBuffer commandBuffer,
const VkCopyAccelerationStructureToMemoryInfoKHR* pInfo);
VKAPI_ATTR void VKAPI_CALL vkCmdCopyMemoryToAccelerationStructureKHR(
VkCommandBuffer commandBuffer,
const VkCopyMemoryToAccelerationStructureInfoKHR* pInfo);
VKAPI_ATTR void VKAPI_CALL vkCmdTraceRaysKHR(
VkCommandBuffer commandBuffer,
const VkStridedBufferRegionKHR* pRaygenShaderBindingTable,
const VkStridedBufferRegionKHR* pMissShaderBindingTable,
const VkStridedBufferRegionKHR* pHitShaderBindingTable,
const VkStridedBufferRegionKHR* pCallableShaderBindingTable,
uint32_t width,
uint32_t height,
uint32_t depth);
VKAPI_ATTR VkResult VKAPI_CALL vkCreateRayTracingPipelinesKHR(
VkDevice device,
VkPipelineCache pipelineCache,
uint32_t createInfoCount,
const VkRayTracingPipelineCreateInfoKHR* pCreateInfos,
const VkAllocationCallbacks* pAllocator,
VkPipeline* pPipelines);
VKAPI_ATTR VkDeviceAddress VKAPI_CALL vkGetAccelerationStructureDeviceAddressKHR(
VkDevice device,
const VkAccelerationStructureDeviceAddressInfoKHR* pInfo);
VKAPI_ATTR VkResult VKAPI_CALL vkGetRayTracingCaptureReplayShaderGroupHandlesKHR(
VkDevice device,
VkPipeline pipeline,
uint32_t firstGroup,
uint32_t groupCount,
size_t dataSize,
void* pData);
VKAPI_ATTR void VKAPI_CALL vkCmdTraceRaysIndirectKHR(
VkCommandBuffer commandBuffer,
const VkStridedBufferRegionKHR* pRaygenShaderBindingTable,
const VkStridedBufferRegionKHR* pMissShaderBindingTable,
const VkStridedBufferRegionKHR* pHitShaderBindingTable,
const VkStridedBufferRegionKHR* pCallableShaderBindingTable,
VkBuffer buffer,
VkDeviceSize offset);
VKAPI_ATTR VkResult VKAPI_CALL vkGetDeviceAccelerationStructureCompatibilityKHR(
VkDevice device,
const VkAccelerationStructureVersionKHR* version);
#endif
#ifdef __cplusplus
}
#endif

File diff suppressed because it is too large Load diff

Binary file not shown.