Merge branch 'master' of https://github.com/zdoom/gzdoom into qz-master-2023-01-20

This commit is contained in:
Rachael Alexanderson 2023-01-21 11:53:37 -05:00
commit 64d4136699
1288 changed files with 108307 additions and 46666 deletions

View file

@ -55,11 +55,11 @@ body:
validations:
required: false
- type: input
id: other
id: os_detail
attributes:
label: If Other OS, please describe
label: Please describe your specific OS version
description: Other details
placeholder: "Windows, Mac OSX version, Debian, Ubuntu, Arch, etc."
placeholder: "Windows 11 Home/Pro/Server/etc, Mac OSX version, Debian 10/11/etc, Ubuntu 18/20/etc, Arch, etc."
validations:
required: false
- type: input
@ -82,7 +82,7 @@ body:
id: description
attributes:
label: A clear and concise description of what the bug is.
description: Describe what happens, what software were you running? _Include screenshot if possible_
description: Describe what happens, what software were you running? _Include a small mod demonstrating the bug, or a screenshot if possible_
placeholder: "How & When does this occur?"
validations:
required: true

View file

@ -11,56 +11,62 @@ jobs:
matrix:
config:
- {
name: "Visual Studio 64-bit",
name: "Visual Studio 2022",
os: windows-2022,
extra_options: "-A x64",
build_type: "Release"
}
- {
name: "Visual Studio 2019",
os: windows-2019,
extra_options: "-A x64",
build_type: "Release"
}
- {
name: "Visual Studio 64-bit",
name: "Visual Studio 2019",
os: windows-2019,
extra_options: "-A x64",
build_type: "Debug"
}
- {
name: "macOS",
os: macos-11,
os: macos-12,
deps_cmdline: "brew install libvpx",
build_type: "Release"
}
- {
name: "macOS",
os: macos-11,
extra_options: "-DDYN_FLUIDSYNTH=OFF -DDYN_OPENAL=OFF -DDYN_SNDFILE=OFF -DDYN_MPG123=OFF",
deps_cmdline: "brew install fluidsynth mpg123 libsndfile",
os: macos-12,
extra_options: "-G Xcode -DDYN_OPENAL=OFF",
deps_cmdline: "brew install libvpx fluidsynth mpg123 libsndfile",
build_type: "Debug"
}
- {
name: "Linux GCC 7",
os: ubuntu-20.04,
extra_options: "-DCMAKE_C_COMPILER=gcc-7 -DCMAKE_CXX_COMPILER=g++-7",
deps_cmdline: "sudo apt update && sudo apt install g++-7 libsdl2-dev libgtk2.0-dev",
deps_cmdline: "sudo apt update && sudo apt install g++-7 libsdl2-dev libvpx-dev libgtk2.0-dev",
build_type: "RelWithDebInfo"
}
- {
name: "Linux GCC 11",
os: ubuntu-20.04,
extra_options: "-DCMAKE_C_COMPILER=gcc-11 -DCMAKE_CXX_COMPILER=g++-11",
deps_cmdline: "sudo apt update && sudo apt install g++-11 libsdl2-dev libgtk-3-dev",
deps_cmdline: "sudo apt update && sudo apt install g++-11 libsdl2-dev libvpx-dev libgtk-3-dev",
build_type: "MinSizeRel"
}
- {
name: "Linux Clang 6",
os: ubuntu-20.04,
extra_options: "-DCMAKE_C_COMPILER=clang-6.0 -DCMAKE_CXX_COMPILER=clang++-6.0 \
-DDYN_FLUIDSYNTH=OFF -DDYN_OPENAL=OFF -DDYN_SNDFILE=OFF -DDYN_MPG123=OFF",
deps_cmdline: "sudo apt update && sudo apt install clang-6.0 libsdl2-dev libopenal-dev libfluidsynth-dev libmpg123-dev libsndfile1-dev",
extra_options: "-DCMAKE_C_COMPILER=clang-6.0 -DCMAKE_CXX_COMPILER=clang++-6.0 -DDYN_OPENAL=OFF",
deps_cmdline: "sudo apt update && sudo apt install clang-6.0 libsdl2-dev libvpx-dev libopenal-dev libfluidsynth-dev libmpg123-dev libsndfile1-dev",
build_type: "Debug"
}
- {
name: "Linux Clang 12",
os: ubuntu-20.04,
extra_options: "-DCMAKE_C_COMPILER=clang-12 -DCMAKE_CXX_COMPILER=clang++-12",
deps_cmdline: "sudo apt update && sudo apt install clang-12 libsdl2-dev",
deps_cmdline: "sudo apt update && sudo apt install clang-12 libsdl2-dev libvpx-dev",
build_type: "Release"
}
@ -75,9 +81,9 @@ jobs:
fi
mkdir build
if [[ "${{ runner.os }}" == 'macOS' ]]; then
export ZMUSIC_PACKAGE=zmusic-1.1.7-macos.tar.bz2
export ZMUSIC_PACKAGE=zmusic-1.1.9-macos.tar.xz
elif [[ "${{ runner.os }}" == 'Linux' ]]; then
export ZMUSIC_PACKAGE=zmusic-1.1.7-linux.tar.bz2
export ZMUSIC_PACKAGE=zmusic-1.1.9-linux.tar.xz
fi
if [[ ! -z "${ZMUSIC_PACKAGE}" ]]; then
cd build
@ -88,6 +94,10 @@ jobs:
- name: Configure
shell: bash
run: |
if [[ "${{ runner.os }}" == 'Windows' ]]; then
# Remove Strawberry Perl from PATH environment variable to avoid usage of libraries it provides
export PATH=`echo $PATH | tr ":" "\n" | grep -v "Strawberry" | tr "\n" ":"`
fi
cmake -B build -DCMAKE_BUILD_TYPE=${{ matrix.config.build_type }} -DCMAKE_PREFIX_PATH=`pwd`/build/zmusic -DPK3_QUIET_ZIPDIR=ON ${{ matrix.config.extra_options }} .
- name: Build

View file

@ -139,12 +139,12 @@ else()
endif()
if( "${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU" OR "${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang" )
set( ZD_CMAKE_COMPILER_IS_GNUCXX_COMPATIBLE TRUE )
set( DEM_CMAKE_COMPILER_IS_GNUCXX_COMPATIBLE TRUE )
else()
set( ZD_CMAKE_COMPILER_IS_GNUCXX_COMPATIBLE FALSE )
set( DEM_CMAKE_COMPILER_IS_GNUCXX_COMPATIBLE FALSE )
endif()
if( ZD_CMAKE_COMPILER_IS_GNUCXX_COMPATIBLE )
if( DEM_CMAKE_COMPILER_IS_GNUCXX_COMPATIBLE )
set( PROFILE 0 CACHE BOOL "Enable profiling with gprof for Debug and RelWithDebInfo build types." )
if( CMAKE_CXX_STANDARD )
@ -157,7 +157,7 @@ endif()
# Fast math flags, required by some subprojects
set( ZD_FASTMATH_FLAG "" )
if( ZD_CMAKE_COMPILER_IS_GNUCXX_COMPATIBLE )
if( DEM_CMAKE_COMPILER_IS_GNUCXX_COMPATIBLE )
set( ZD_FASTMATH_FLAG "-ffast-math -ffp-contract=fast" )
elseif( MSVC )
set( ZD_FASTMATH_FLAG "/fp:fast" )
@ -189,13 +189,14 @@ option( NO_OPENAL "Disable OpenAL sound support" OFF )
find_package( BZip2 )
find_package( JPEG )
find_package( VPX )
find_package( ZLIB )
include( TargetArch )
target_architecture(ZDOOM_TARGET_ARCH)
target_architecture(TARGET_ARCHITECTURE)
if( ${ZDOOM_TARGET_ARCH} MATCHES "x86_64" )
if( ${TARGET_ARCHITECTURE} MATCHES "x86_64" )
set( HAVE_VM_JIT ON )
endif()
@ -217,7 +218,7 @@ if( MSVC )
# String pooling
# Function-level linking
# Disable run-time type information
set( ALL_C_FLAGS "/GF /Gy /permissive- /DHAVE_SOFTPOLY" )
set( ALL_C_FLAGS "/GF /Gy /permissive-" )
if ( HAVE_VULKAN )
set( ALL_C_FLAGS "${ALL_C_FLAGS} /DHAVE_VULKAN" )
@ -245,7 +246,7 @@ if( MSVC )
set( DEB_C_FLAGS "/D _CRTDBG_MAP_ALLOC /MTd" )
# Disable warnings for unsecure CRT functions from VC8+
set( ALL_C_FLAGS "${ALL_C_FLAGS} /wd4996 /DUNICODE /D_UNICODE /D_WIN32_WINNT=0x0600" )
set( ALL_C_FLAGS "${ALL_C_FLAGS} /DUNICODE /D_UNICODE /D_WIN32_WINNT=0x0600 /D_CRT_SECURE_NO_DEPRECATE /D_CRT_SECURE_NO_WARNINGS /D_CRT_NONSTDC_NO_WARNINGS" )
# The CMake configurations set /GR and /MD by default, which conflict with our settings.
string(REPLACE "/MD " " " CMAKE_CXX_FLAGS_RELEASE ${CMAKE_CXX_FLAGS_RELEASE} )
@ -259,7 +260,7 @@ if( MSVC )
string(REPLACE "/MDd " " " CMAKE_C_FLAGS_DEBUG ${CMAKE_C_FLAGS_DEBUG} )
else()
set( REL_LINKER_FLAGS "" )
set( ALL_C_FLAGS "-ffp-contract=off -DHAVE_SOFTPOLY" )
set( ALL_C_FLAGS "-ffp-contract=off" )
if ( HAVE_VULKAN )
set( ALL_C_FLAGS "${ALL_C_FLAGS} -DHAVE_VULKAN" )
@ -323,11 +324,14 @@ option(FORCE_INTERNAL_ASMJIT "Use internal asmjit" ON)
mark_as_advanced( FORCE_INTERNAL_ASMJIT )
if (HAVE_VULKAN)
add_subdirectory( libraries/glslang/glslang)
add_subdirectory( libraries/glslang/spirv )
add_subdirectory( libraries/glslang/OGLCompilersDLL )
add_subdirectory( libraries/ZVulkan )
endif()
add_subdirectory( libraries/discordrpc EXCLUDE_FROM_ALL )
set( DRPC_INCLUDE_DIR "${CMAKE_CURRENT_SOURCE_DIR}/libraries/discordrpc/include" )
set( DRPC_LIBRARIES discord-rpc )
set( DRPC_LIBRARY discord-rpc )
if( ZLIB_FOUND AND NOT FORCE_INTERNAL_ZLIB )
message( STATUS "Using system zlib, includes found at ${ZLIB_INCLUDE_DIR}" )
else()
@ -386,7 +390,6 @@ else()
set( BZIP2_LIBRARY bz2 )
endif()
set( LZMA_INCLUDE_DIR "${CMAKE_CURRENT_SOURCE_DIR}/libraries/lzma/C" )
if( NOT CMAKE_CROSSCOMPILING )

View file

@ -1,11 +1,11 @@
[![QZDoom Logo](https://forum.drdteam.org/download/file.php?id=3080)](https://zdoom.org/)
# Welcome to QZDoom!
[![Build Status](https://ci.appveyor.com/api/projects/status/github/madame-rachelle/qzdoom?branch=master&svg=true)](https://ci.appveyor.com/project/madame-rachelle/qzdoom) [![Build Status](https://travis-ci.org/madame-rachelle/qzdoom.svg?branch=master)](https://travis-ci.org/madame-rachelle/qzdoom)
[![Continuous Integration](https://github.com/ZDoom/qzdoom/actions/workflows/continuous_integration.yml/badge.svg)](https://github.com/ZDoom/qzdoom/actions/workflows/continuous_integration.yml)
## QZDoom is a fork of [GZDoom](https://github.com/coelckers/gzdoom) for beta testing new features that have not yet been included in GZDoom's mainline.
Copyright (c) 1998-2021 ZDoom + GZDoom teams, and contributors
Copyright (c) 1998-2022 ZDoom + GZDoom teams, and contributors
Doom Source (c) 1997 id Software, Raven Software, and contributors
@ -22,3 +22,9 @@ Special thanks to Coraline of the EDGE team for allowing us to use her [README.m
To build QZDoom, please see the [wiki](https://zdoom.org/wiki/) and see the "Programmer's Corner" on the bottom-right corner of the page to build for your platform.
Follow the same instructions as with GZDoom, except substituting "coelckers/gzdoom" with "madame-rachelle/qzdoom" in order to link this repository in your build environment.
# Resources
- https://zdoom.org/ - Home Page
- https://forum.zdoom.org/ - Forum
- https://zdoom.org/wiki/ - Wiki
- https://discord.gg/zdoom - Discord Server
- https://docs.google.com/spreadsheets/d/1pvwXEgytkor9SClCiDn4j5AH7FedyXS-ocCbsuQIXDU/edit?usp=sharing - Translation sheet (Google Docs)

View file

@ -0,0 +1,148 @@
/*
* Copyright (c) 2010 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/*!\defgroup vp8 VP8
* \ingroup codecs
* VP8 is vpx's newest video compression algorithm that uses motion
* compensated prediction, Discrete Cosine Transform (DCT) coding of the
* prediction error signal and context dependent entropy coding techniques
* based on arithmetic principles. It features:
* - YUV 4:2:0 image format
* - Macro-block based coding (16x16 luma plus two 8x8 chroma)
* - 1/4 (1/8) pixel accuracy motion compensated prediction
* - 4x4 DCT transform
* - 128 level linear quantizer
* - In loop deblocking filter
* - Context-based entropy coding
*
* @{
*/
/*!\file
* \brief Provides controls common to both the VP8 encoder and decoder.
*/
#ifndef VPX_VP8_H_
#define VPX_VP8_H_
#include "./vpx_codec.h"
#include "./vpx_image.h"
#ifdef __cplusplus
extern "C" {
#endif
/*!\brief Control functions
*
* The set of macros define the control functions of VP8 interface
*/
enum vp8_com_control_id {
VP8_SET_REFERENCE = 1, /**< pass in an external frame into decoder to be used as reference frame */
VP8_COPY_REFERENCE = 2, /**< get a copy of reference frame from the decoder */
VP8_SET_POSTPROC = 3, /**< set the decoder's post processing settings */
VP8_SET_DBG_COLOR_REF_FRAME = 4, /**< set the reference frames to color for each macroblock */
VP8_SET_DBG_COLOR_MB_MODES = 5, /**< set which macro block modes to color */
VP8_SET_DBG_COLOR_B_MODES = 6, /**< set which blocks modes to color */
VP8_SET_DBG_DISPLAY_MV = 7, /**< set which motion vector modes to draw */
/* TODO(jkoleszar): The encoder incorrectly reuses some of these values (5+)
* for its control ids. These should be migrated to something like the
* VP8_DECODER_CTRL_ID_START range next time we're ready to break the ABI.
*/
VP9_GET_REFERENCE = 128, /**< get a pointer to a reference frame */
VP8_COMMON_CTRL_ID_MAX,
VP8_DECODER_CTRL_ID_START = 256
};
/*!\brief post process flags
*
* The set of macros define VP8 decoder post processing flags
*/
enum vp8_postproc_level {
VP8_NOFILTERING = 0,
VP8_DEBLOCK = 1 << 0,
VP8_DEMACROBLOCK = 1 << 1,
VP8_ADDNOISE = 1 << 2,
VP8_DEBUG_TXT_FRAME_INFO = 1 << 3, /**< print frame information */
VP8_DEBUG_TXT_MBLK_MODES = 1 << 4, /**< print macro block modes over each macro block */
VP8_DEBUG_TXT_DC_DIFF = 1 << 5, /**< print dc diff for each macro block */
VP8_DEBUG_TXT_RATE_INFO = 1 << 6, /**< print video rate info (encoder only) */
VP8_MFQE = 1 << 10
};
/*!\brief post process flags
*
* This define a structure that describe the post processing settings. For
* the best objective measure (using the PSNR metric) set post_proc_flag
* to VP8_DEBLOCK and deblocking_level to 1.
*/
typedef struct vp8_postproc_cfg {
int post_proc_flag; /**< the types of post processing to be done, should be combination of "vp8_postproc_level" */
int deblocking_level; /**< the strength of deblocking, valid range [0, 16] */
int noise_level; /**< the strength of additive noise, valid range [0, 16] */
} vp8_postproc_cfg_t;
/*!\brief reference frame type
*
* The set of macros define the type of VP8 reference frames
*/
typedef enum vpx_ref_frame_type {
VP8_LAST_FRAME = 1,
VP8_GOLD_FRAME = 2,
VP8_ALTR_FRAME = 4
} vpx_ref_frame_type_t;
/*!\brief reference frame data struct
*
* Define the data struct to access vp8 reference frames.
*/
typedef struct vpx_ref_frame {
vpx_ref_frame_type_t frame_type; /**< which reference frame */
vpx_image_t img; /**< reference frame data in image format */
} vpx_ref_frame_t;
/*!\brief VP9 specific reference frame data struct
*
* Define the data struct to access vp9 reference frames.
*/
typedef struct vp9_ref_frame {
int idx; /**< frame index to get (input) */
vpx_image_t img; /**< img structure to populate (output) */
} vp9_ref_frame_t;
/*!\cond */
/*!\brief vp8 decoder control function parameter type
*
* defines the data type for each of VP8 decoder control function requires
*/
VPX_CTRL_USE_TYPE(VP8_SET_REFERENCE, vpx_ref_frame_t *)
#define VPX_CTRL_VP8_SET_REFERENCE
VPX_CTRL_USE_TYPE(VP8_COPY_REFERENCE, vpx_ref_frame_t *)
#define VPX_CTRL_VP8_COPY_REFERENCE
VPX_CTRL_USE_TYPE(VP8_SET_POSTPROC, vp8_postproc_cfg_t *)
#define VPX_CTRL_VP8_SET_POSTPROC
VPX_CTRL_USE_TYPE(VP8_SET_DBG_COLOR_REF_FRAME, int)
#define VPX_CTRL_VP8_SET_DBG_COLOR_REF_FRAME
VPX_CTRL_USE_TYPE(VP8_SET_DBG_COLOR_MB_MODES, int)
#define VPX_CTRL_VP8_SET_DBG_COLOR_MB_MODES
VPX_CTRL_USE_TYPE(VP8_SET_DBG_COLOR_B_MODES, int)
#define VPX_CTRL_VP8_SET_DBG_COLOR_B_MODES
VPX_CTRL_USE_TYPE(VP8_SET_DBG_DISPLAY_MV, int)
#define VPX_CTRL_VP8_SET_DBG_DISPLAY_MV
VPX_CTRL_USE_TYPE(VP9_GET_REFERENCE, vp9_ref_frame_t *)
#define VPX_CTRL_VP9_GET_REFERENCE
/*!\endcond */
/*! @} - end defgroup vp8 */
#ifdef __cplusplus
} // extern "C"
#endif
#endif // VPX_VP8_H_

View file

@ -0,0 +1,176 @@
/*
* Copyright (c) 2010 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/*!\defgroup vp8_decoder WebM VP8/VP9 Decoder
* \ingroup vp8
*
* @{
*/
/*!\file
* \brief Provides definitions for using VP8 or VP9 within the vpx Decoder
* interface.
*/
#ifndef VPX_VP8DX_H_
#define VPX_VP8DX_H_
#ifdef __cplusplus
extern "C" {
#endif
/* Include controls common to both the encoder and decoder */
#include "./vp8.h"
/*!\name Algorithm interface for VP8
*
* This interface provides the capability to decode VP8 streams.
* @{
*/
extern vpx_codec_iface_t vpx_codec_vp8_dx_algo;
extern vpx_codec_iface_t *vpx_codec_vp8_dx(void);
/*!@} - end algorithm interface member group*/
/*!\name Algorithm interface for VP9
*
* This interface provides the capability to decode VP9 streams.
* @{
*/
extern vpx_codec_iface_t vpx_codec_vp9_dx_algo;
extern vpx_codec_iface_t *vpx_codec_vp9_dx(void);
/*!@} - end algorithm interface member group*/
/*!\enum vp8_dec_control_id
* \brief VP8 decoder control functions
*
* This set of macros define the control functions available for the VP8
* decoder interface.
*
* \sa #vpx_codec_control
*/
enum vp8_dec_control_id {
/** control function to get info on which reference frames were updated
* by the last decode
*/
VP8D_GET_LAST_REF_UPDATES = VP8_DECODER_CTRL_ID_START,
/** check if the indicated frame is corrupted */
VP8D_GET_FRAME_CORRUPTED,
/** control function to get info on which reference frames were used
* by the last decode
*/
VP8D_GET_LAST_REF_USED,
/** decryption function to decrypt encoded buffer data immediately
* before decoding. Takes a vpx_decrypt_init, which contains
* a callback function and opaque context pointer.
*/
VPXD_SET_DECRYPTOR,
VP8D_SET_DECRYPTOR = VPXD_SET_DECRYPTOR,
/** control function to get the dimensions that the current frame is decoded
* at. This may be different to the intended display size for the frame as
* specified in the wrapper or frame header (see VP9D_GET_DISPLAY_SIZE). */
VP9D_GET_FRAME_SIZE,
/** control function to get the current frame's intended display dimensions
* (as specified in the wrapper or frame header). This may be different to
* the decoded dimensions of this frame (see VP9D_GET_FRAME_SIZE). */
VP9D_GET_DISPLAY_SIZE,
/** control function to get the bit depth of the stream. */
VP9D_GET_BIT_DEPTH,
/** control function to set the byte alignment of the planes in the reference
* buffers. Valid values are power of 2, from 32 to 1024. A value of 0 sets
* legacy alignment. I.e. Y plane is aligned to 32 bytes, U plane directly
* follows Y plane, and V plane directly follows U plane. Default value is 0.
*/
VP9_SET_BYTE_ALIGNMENT,
/** control function to invert the decoding order to from right to left. The
* function is used in a test to confirm the decoding independence of tile
* columns. The function may be used in application where this order
* of decoding is desired.
*
* TODO(yaowu): Rework the unit test that uses this control, and in a future
* release, this test-only control shall be removed.
*/
VP9_INVERT_TILE_DECODE_ORDER,
/** control function to set the skip loop filter flag. Valid values are
* integers. The decoder will skip the loop filter when its value is set to
* nonzero. If the loop filter is skipped the decoder may accumulate decode
* artifacts. The default value is 0.
*/
VP9_SET_SKIP_LOOP_FILTER,
VP8_DECODER_CTRL_ID_MAX
};
/** Decrypt n bytes of data from input -> output, using the decrypt_state
* passed in VPXD_SET_DECRYPTOR.
*/
typedef void (*vpx_decrypt_cb)(void *decrypt_state, const unsigned char *input,
unsigned char *output, int count);
/*!\brief Structure to hold decryption state
*
* Defines a structure to hold the decryption state and access function.
*/
typedef struct vpx_decrypt_init {
/*! Decrypt callback. */
vpx_decrypt_cb decrypt_cb;
/*! Decryption state. */
void *decrypt_state;
} vpx_decrypt_init;
/*!\brief A deprecated alias for vpx_decrypt_init.
*/
typedef vpx_decrypt_init vp8_decrypt_init;
/*!\cond */
/*!\brief VP8 decoder control function parameter type
*
* Defines the data types that VP8D control functions take. Note that
* additional common controls are defined in vp8.h
*
*/
VPX_CTRL_USE_TYPE(VP8D_GET_LAST_REF_UPDATES, int *)
#define VPX_CTRL_VP8D_GET_LAST_REF_UPDATES
VPX_CTRL_USE_TYPE(VP8D_GET_FRAME_CORRUPTED, int *)
#define VPX_CTRL_VP8D_GET_FRAME_CORRUPTED
VPX_CTRL_USE_TYPE(VP8D_GET_LAST_REF_USED, int *)
#define VPX_CTRL_VP8D_GET_LAST_REF_USED
VPX_CTRL_USE_TYPE(VPXD_SET_DECRYPTOR, vpx_decrypt_init *)
#define VPX_CTRL_VPXD_SET_DECRYPTOR
VPX_CTRL_USE_TYPE(VP8D_SET_DECRYPTOR, vpx_decrypt_init *)
#define VPX_CTRL_VP8D_SET_DECRYPTOR
VPX_CTRL_USE_TYPE(VP9D_GET_DISPLAY_SIZE, int *)
#define VPX_CTRL_VP9D_GET_DISPLAY_SIZE
VPX_CTRL_USE_TYPE(VP9D_GET_BIT_DEPTH, unsigned int *)
#define VPX_CTRL_VP9D_GET_BIT_DEPTH
VPX_CTRL_USE_TYPE(VP9D_GET_FRAME_SIZE, int *)
#define VPX_CTRL_VP9D_GET_FRAME_SIZE
VPX_CTRL_USE_TYPE(VP9_INVERT_TILE_DECODE_ORDER, int)
#define VPX_CTRL_VP9_INVERT_TILE_DECODE_ORDER
/*!\endcond */
/*! @} - end defgroup vp8_decoder */
#ifdef __cplusplus
} // extern "C"
#endif
#endif // VPX_VP8DX_H_

View file

@ -0,0 +1,479 @@
/*
* Copyright (c) 2010 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/*!\defgroup codec Common Algorithm Interface
* This abstraction allows applications to easily support multiple video
* formats with minimal code duplication. This section describes the interface
* common to all codecs (both encoders and decoders).
* @{
*/
/*!\file
* \brief Describes the codec algorithm interface to applications.
*
* This file describes the interface between an application and a
* video codec algorithm.
*
* An application instantiates a specific codec instance by using
* vpx_codec_init() and a pointer to the algorithm's interface structure:
* <pre>
* my_app.c:
* extern vpx_codec_iface_t my_codec;
* {
* vpx_codec_ctx_t algo;
* res = vpx_codec_init(&algo, &my_codec);
* }
* </pre>
*
* Once initialized, the instance is manged using other functions from
* the vpx_codec_* family.
*/
#ifndef VPX_VPX_CODEC_H_
#define VPX_VPX_CODEC_H_
#ifdef __cplusplus
extern "C" {
#endif
#include "./vpx_integer.h"
#include "./vpx_image.h"
/*!\brief Decorator indicating a function is deprecated */
#ifndef DEPRECATED
#if defined(__GNUC__) && __GNUC__
#define DEPRECATED __attribute__ ((deprecated))
#elif defined(_MSC_VER)
#define DEPRECATED
#else
#define DEPRECATED
#endif
#endif /* DEPRECATED */
#ifndef DECLSPEC_DEPRECATED
#if defined(__GNUC__) && __GNUC__
#define DECLSPEC_DEPRECATED /**< \copydoc #DEPRECATED */
#elif defined(_MSC_VER)
#define DECLSPEC_DEPRECATED __declspec(deprecated) /**< \copydoc #DEPRECATED */
#else
#define DECLSPEC_DEPRECATED /**< \copydoc #DEPRECATED */
#endif
#endif /* DECLSPEC_DEPRECATED */
/*!\brief Decorator indicating a function is potentially unused */
#ifdef UNUSED
#elif defined(__GNUC__) || defined(__clang__)
#define UNUSED __attribute__ ((unused))
#else
#define UNUSED
#endif
/*!\brief Current ABI version number
*
* \internal
* If this file is altered in any way that changes the ABI, this value
* must be bumped. Examples include, but are not limited to, changing
* types, removing or reassigning enums, adding/removing/rearranging
* fields to structures
*/
#define VPX_CODEC_ABI_VERSION (3 + VPX_IMAGE_ABI_VERSION) /**<\hideinitializer*/
/*!\brief Algorithm return codes */
typedef enum {
/*!\brief Operation completed without error */
VPX_CODEC_OK,
/*!\brief Unspecified error */
VPX_CODEC_ERROR,
/*!\brief Memory operation failed */
VPX_CODEC_MEM_ERROR,
/*!\brief ABI version mismatch */
VPX_CODEC_ABI_MISMATCH,
/*!\brief Algorithm does not have required capability */
VPX_CODEC_INCAPABLE,
/*!\brief The given bitstream is not supported.
*
* The bitstream was unable to be parsed at the highest level. The decoder
* is unable to proceed. This error \ref SHOULD be treated as fatal to the
* stream. */
VPX_CODEC_UNSUP_BITSTREAM,
/*!\brief Encoded bitstream uses an unsupported feature
*
* The decoder does not implement a feature required by the encoder. This
* return code should only be used for features that prevent future
* pictures from being properly decoded. This error \ref MAY be treated as
* fatal to the stream or \ref MAY be treated as fatal to the current GOP.
*/
VPX_CODEC_UNSUP_FEATURE,
/*!\brief The coded data for this stream is corrupt or incomplete
*
* There was a problem decoding the current frame. This return code
* should only be used for failures that prevent future pictures from
* being properly decoded. This error \ref MAY be treated as fatal to the
* stream or \ref MAY be treated as fatal to the current GOP. If decoding
* is continued for the current GOP, artifacts may be present.
*/
VPX_CODEC_CORRUPT_FRAME,
/*!\brief An application-supplied parameter is not valid.
*
*/
VPX_CODEC_INVALID_PARAM,
/*!\brief An iterator reached the end of list.
*
*/
VPX_CODEC_LIST_END
}
vpx_codec_err_t;
/*! \brief Codec capabilities bitfield
*
* Each codec advertises the capabilities it supports as part of its
* ::vpx_codec_iface_t interface structure. Capabilities are extra interfaces
* or functionality, and are not required to be supported.
*
* The available flags are specified by VPX_CODEC_CAP_* defines.
*/
typedef long vpx_codec_caps_t;
#define VPX_CODEC_CAP_DECODER 0x1 /**< Is a decoder */
#define VPX_CODEC_CAP_ENCODER 0x2 /**< Is an encoder */
/*! \brief Initialization-time Feature Enabling
*
* Certain codec features must be known at initialization time, to allow for
* proper memory allocation.
*
* The available flags are specified by VPX_CODEC_USE_* defines.
*/
typedef long vpx_codec_flags_t;
/*!\brief Codec interface structure.
*
* Contains function pointers and other data private to the codec
* implementation. This structure is opaque to the application.
*/
typedef const struct vpx_codec_iface vpx_codec_iface_t;
/*!\brief Codec private data structure.
*
* Contains data private to the codec implementation. This structure is opaque
* to the application.
*/
typedef struct vpx_codec_priv vpx_codec_priv_t;
/*!\brief Iterator
*
* Opaque storage used for iterating over lists.
*/
typedef const void *vpx_codec_iter_t;
/*!\brief Codec context structure
*
* All codecs \ref MUST support this context structure fully. In general,
* this data should be considered private to the codec algorithm, and
* not be manipulated or examined by the calling application. Applications
* may reference the 'name' member to get a printable description of the
* algorithm.
*/
typedef struct vpx_codec_ctx {
const char *name; /**< Printable interface name */
vpx_codec_iface_t *iface; /**< Interface pointers */
vpx_codec_err_t err; /**< Last returned error */
const char *err_detail; /**< Detailed info, if available */
vpx_codec_flags_t init_flags; /**< Flags passed at init time */
union {
/**< Decoder Configuration Pointer */
const struct vpx_codec_dec_cfg *dec;
/**< Encoder Configuration Pointer */
const struct vpx_codec_enc_cfg *enc;
const void *raw;
} config; /**< Configuration pointer aliasing union */
vpx_codec_priv_t *priv; /**< Algorithm private storage */
} vpx_codec_ctx_t;
/*!\brief Bit depth for codec
* *
* This enumeration determines the bit depth of the codec.
*/
typedef enum vpx_bit_depth {
VPX_BITS_8 = 8, /**< 8 bits */
VPX_BITS_10 = 10, /**< 10 bits */
VPX_BITS_12 = 12, /**< 12 bits */
} vpx_bit_depth_t;
/*
* Library Version Number Interface
*
* For example, see the following sample return values:
* vpx_codec_version() (1<<16 | 2<<8 | 3)
* vpx_codec_version_str() "v1.2.3-rc1-16-gec6a1ba"
* vpx_codec_version_extra_str() "rc1-16-gec6a1ba"
*/
/*!\brief Return the version information (as an integer)
*
* Returns a packed encoding of the library version number. This will only include
* the major.minor.patch component of the version number. Note that this encoded
* value should be accessed through the macros provided, as the encoding may change
* in the future.
*
*/
int vpx_codec_version(void);
#define VPX_VERSION_MAJOR(v) ((v>>16)&0xff) /**< extract major from packed version */
#define VPX_VERSION_MINOR(v) ((v>>8)&0xff) /**< extract minor from packed version */
#define VPX_VERSION_PATCH(v) ((v>>0)&0xff) /**< extract patch from packed version */
/*!\brief Return the version major number */
#define vpx_codec_version_major() ((vpx_codec_version()>>16)&0xff)
/*!\brief Return the version minor number */
#define vpx_codec_version_minor() ((vpx_codec_version()>>8)&0xff)
/*!\brief Return the version patch number */
#define vpx_codec_version_patch() ((vpx_codec_version()>>0)&0xff)
/*!\brief Return the version information (as a string)
*
* Returns a printable string containing the full library version number. This may
* contain additional text following the three digit version number, as to indicate
* release candidates, prerelease versions, etc.
*
*/
const char *vpx_codec_version_str(void);
/*!\brief Return the version information (as a string)
*
* Returns a printable "extra string". This is the component of the string returned
* by vpx_codec_version_str() following the three digit version number.
*
*/
const char *vpx_codec_version_extra_str(void);
/*!\brief Return the build configuration
*
* Returns a printable string containing an encoded version of the build
* configuration. This may be useful to vpx support.
*
*/
const char *vpx_codec_build_config(void);
/*!\brief Return the name for a given interface
*
* Returns a human readable string for name of the given codec interface.
*
* \param[in] iface Interface pointer
*
*/
const char *vpx_codec_iface_name(vpx_codec_iface_t *iface);
/*!\brief Convert error number to printable string
*
* Returns a human readable string for the last error returned by the
* algorithm. The returned error will be one line and will not contain
* any newline characters.
*
*
* \param[in] err Error number.
*
*/
const char *vpx_codec_err_to_string(vpx_codec_err_t err);
/*!\brief Retrieve error synopsis for codec context
*
* Returns a human readable string for the last error returned by the
* algorithm. The returned error will be one line and will not contain
* any newline characters.
*
*
* \param[in] ctx Pointer to this instance's context.
*
*/
const char *vpx_codec_error(vpx_codec_ctx_t *ctx);
/*!\brief Retrieve detailed error information for codec context
*
* Returns a human readable string providing detailed information about
* the last error.
*
* \param[in] ctx Pointer to this instance's context.
*
* \retval NULL
* No detailed information is available.
*/
const char *vpx_codec_error_detail(vpx_codec_ctx_t *ctx);
/* REQUIRED FUNCTIONS
*
* The following functions are required to be implemented for all codecs.
* They represent the base case functionality expected of all codecs.
*/
/*!\brief Destroy a codec instance
*
* Destroys a codec context, freeing any associated memory buffers.
*
* \param[in] ctx Pointer to this instance's context
*
* \retval #VPX_CODEC_OK
* The codec algorithm initialized.
* \retval #VPX_CODEC_MEM_ERROR
* Memory allocation failed.
*/
vpx_codec_err_t vpx_codec_destroy(vpx_codec_ctx_t *ctx);
/*!\brief Get the capabilities of an algorithm.
*
* Retrieves the capabilities bitfield from the algorithm's interface.
*
* \param[in] iface Pointer to the algorithm interface
*
*/
vpx_codec_caps_t vpx_codec_get_caps(vpx_codec_iface_t *iface);
/*!\brief Control algorithm
*
* This function is used to exchange algorithm specific data with the codec
* instance. This can be used to implement features specific to a particular
* algorithm.
*
* This wrapper function dispatches the request to the helper function
* associated with the given ctrl_id. It tries to call this function
* transparently, but will return #VPX_CODEC_ERROR if the request could not
* be dispatched.
*
* Note that this function should not be used directly. Call the
* #vpx_codec_control wrapper macro instead.
*
* \param[in] ctx Pointer to this instance's context
* \param[in] ctrl_id Algorithm specific control identifier
*
* \retval #VPX_CODEC_OK
* The control request was processed.
* \retval #VPX_CODEC_ERROR
* The control request was not processed.
* \retval #VPX_CODEC_INVALID_PARAM
* The data was not valid.
*/
vpx_codec_err_t vpx_codec_control_(vpx_codec_ctx_t *ctx,
int ctrl_id,
...);
#if defined(VPX_DISABLE_CTRL_TYPECHECKS) && VPX_DISABLE_CTRL_TYPECHECKS
# define vpx_codec_control(ctx,id,data) vpx_codec_control_(ctx,id,data)
# define VPX_CTRL_USE_TYPE(id, typ)
# define VPX_CTRL_USE_TYPE_DEPRECATED(id, typ)
# define VPX_CTRL_VOID(id, typ)
#else
/*!\brief vpx_codec_control wrapper macro
*
* This macro allows for type safe conversions across the variadic parameter
* to vpx_codec_control_().
*
* \internal
* It works by dispatching the call to the control function through a wrapper
* function named with the id parameter.
*/
# define vpx_codec_control(ctx,id,data) vpx_codec_control_##id(ctx,id,data)\
/**<\hideinitializer*/
/*!\brief vpx_codec_control type definition macro
*
* This macro allows for type safe conversions across the variadic parameter
* to vpx_codec_control_(). It defines the type of the argument for a given
* control identifier.
*
* \internal
* It defines a static function with
* the correctly typed arguments as a wrapper to the type-unsafe internal
* function.
*/
# define VPX_CTRL_USE_TYPE(id, typ) \
static vpx_codec_err_t \
vpx_codec_control_##id(vpx_codec_ctx_t*, int, typ) UNUSED;\
\
static vpx_codec_err_t \
vpx_codec_control_##id(vpx_codec_ctx_t *ctx, int ctrl_id, typ data) {\
return vpx_codec_control_(ctx, ctrl_id, data);\
} /**<\hideinitializer*/
/*!\brief vpx_codec_control deprecated type definition macro
*
* Like #VPX_CTRL_USE_TYPE, but indicates that the specified control is
* deprecated and should not be used. Consult the documentation for your
* codec for more information.
*
* \internal
* It defines a static function with the correctly typed arguments as a
* wrapper to the type-unsafe internal function.
*/
# define VPX_CTRL_USE_TYPE_DEPRECATED(id, typ) \
DECLSPEC_DEPRECATED static vpx_codec_err_t \
vpx_codec_control_##id(vpx_codec_ctx_t*, int, typ) DEPRECATED UNUSED;\
\
DECLSPEC_DEPRECATED static vpx_codec_err_t \
vpx_codec_control_##id(vpx_codec_ctx_t *ctx, int ctrl_id, typ data) {\
return vpx_codec_control_(ctx, ctrl_id, data);\
} /**<\hideinitializer*/
/*!\brief vpx_codec_control void type definition macro
*
* This macro allows for type safe conversions across the variadic parameter
* to vpx_codec_control_(). It indicates that a given control identifier takes
* no argument.
*
* \internal
* It defines a static function without a data argument as a wrapper to the
* type-unsafe internal function.
*/
# define VPX_CTRL_VOID(id) \
static vpx_codec_err_t \
vpx_codec_control_##id(vpx_codec_ctx_t*, int) UNUSED;\
\
static vpx_codec_err_t \
vpx_codec_control_##id(vpx_codec_ctx_t *ctx, int ctrl_id) {\
return vpx_codec_control_(ctx, ctrl_id);\
} /**<\hideinitializer*/
#endif
/*!@} - end defgroup codec*/
#ifdef __cplusplus
}
#endif
#endif // VPX_VPX_CODEC_H_

View file

@ -0,0 +1,378 @@
/*
* Copyright (c) 2010 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef VPX_VPX_DECODER_H_
#define VPX_VPX_DECODER_H_
/*!\defgroup decoder Decoder Algorithm Interface
* \ingroup codec
* This abstraction allows applications using this decoder to easily support
* multiple video formats with minimal code duplication. This section describes
* the interface common to all decoders.
* @{
*/
/*!\file
* \brief Describes the decoder algorithm interface to applications.
*
* This file describes the interface between an application and a
* video decoder algorithm.
*
*/
#ifdef __cplusplus
extern "C" {
#endif
#include "./vpx_codec.h"
#include "./vpx_frame_buffer.h"
/*!\brief Current ABI version number
*
* \internal
* If this file is altered in any way that changes the ABI, this value
* must be bumped. Examples include, but are not limited to, changing
* types, removing or reassigning enums, adding/removing/rearranging
* fields to structures
*/
#define VPX_DECODER_ABI_VERSION (3 + VPX_CODEC_ABI_VERSION) /**<\hideinitializer*/
/*! \brief Decoder capabilities bitfield
*
* Each decoder advertises the capabilities it supports as part of its
* ::vpx_codec_iface_t interface structure. Capabilities are extra interfaces
* or functionality, and are not required to be supported by a decoder.
*
* The available flags are specified by VPX_CODEC_CAP_* defines.
*/
#define VPX_CODEC_CAP_PUT_SLICE 0x10000 /**< Will issue put_slice callbacks */
#define VPX_CODEC_CAP_PUT_FRAME 0x20000 /**< Will issue put_frame callbacks */
#define VPX_CODEC_CAP_POSTPROC 0x40000 /**< Can postprocess decoded frame */
#define VPX_CODEC_CAP_ERROR_CONCEALMENT 0x80000 /**< Can conceal errors due to
packet loss */
#define VPX_CODEC_CAP_INPUT_FRAGMENTS 0x100000 /**< Can receive encoded frames
one fragment at a time */
/*! \brief Initialization-time Feature Enabling
*
* Certain codec features must be known at initialization time, to allow for
* proper memory allocation.
*
* The available flags are specified by VPX_CODEC_USE_* defines.
*/
#define VPX_CODEC_CAP_FRAME_THREADING 0x200000 /**< Can support frame-based
multi-threading */
#define VPX_CODEC_CAP_EXTERNAL_FRAME_BUFFER 0x400000 /**< Can support external
frame buffers */
#define VPX_CODEC_USE_POSTPROC 0x10000 /**< Postprocess decoded frame */
#define VPX_CODEC_USE_ERROR_CONCEALMENT 0x20000 /**< Conceal errors in decoded
frames */
#define VPX_CODEC_USE_INPUT_FRAGMENTS 0x40000 /**< The input frame should be
passed to the decoder one
fragment at a time */
#define VPX_CODEC_USE_FRAME_THREADING 0x80000 /**< Enable frame-based
multi-threading */
/*!\brief Stream properties
*
* This structure is used to query or set properties of the decoded
* stream. Algorithms may extend this structure with data specific
* to their bitstream by setting the sz member appropriately.
*/
typedef struct vpx_codec_stream_info {
unsigned int sz; /**< Size of this structure */
unsigned int w; /**< Width (or 0 for unknown/default) */
unsigned int h; /**< Height (or 0 for unknown/default) */
unsigned int is_kf; /**< Current frame is a keyframe */
} vpx_codec_stream_info_t;
/* REQUIRED FUNCTIONS
*
* The following functions are required to be implemented for all decoders.
* They represent the base case functionality expected of all decoders.
*/
/*!\brief Initialization Configurations
*
* This structure is used to pass init time configuration options to the
* decoder.
*/
typedef struct vpx_codec_dec_cfg {
unsigned int threads; /**< Maximum number of threads to use, default 1 */
unsigned int w; /**< Width */
unsigned int h; /**< Height */
} vpx_codec_dec_cfg_t; /**< alias for struct vpx_codec_dec_cfg */
/*!\brief Initialize a decoder instance
*
* Initializes a decoder context using the given interface. Applications
* should call the vpx_codec_dec_init convenience macro instead of this
* function directly, to ensure that the ABI version number parameter
* is properly initialized.
*
* If the library was configured with --disable-multithread, this call
* is not thread safe and should be guarded with a lock if being used
* in a multithreaded context.
*
* \param[in] ctx Pointer to this instance's context.
* \param[in] iface Pointer to the algorithm interface to use.
* \param[in] cfg Configuration to use, if known. May be NULL.
* \param[in] flags Bitfield of VPX_CODEC_USE_* flags
* \param[in] ver ABI version number. Must be set to
* VPX_DECODER_ABI_VERSION
* \retval #VPX_CODEC_OK
* The decoder algorithm initialized.
* \retval #VPX_CODEC_MEM_ERROR
* Memory allocation failed.
*/
vpx_codec_err_t vpx_codec_dec_init_ver(vpx_codec_ctx_t *ctx,
vpx_codec_iface_t *iface,
const vpx_codec_dec_cfg_t *cfg,
vpx_codec_flags_t flags,
int ver);
/*!\brief Convenience macro for vpx_codec_dec_init_ver()
*
* Ensures the ABI version parameter is properly set.
*/
#define vpx_codec_dec_init(ctx, iface, cfg, flags) \
vpx_codec_dec_init_ver(ctx, iface, cfg, flags, VPX_DECODER_ABI_VERSION)
/*!\brief Parse stream info from a buffer
*
* Performs high level parsing of the bitstream. Construction of a decoder
* context is not necessary. Can be used to determine if the bitstream is
* of the proper format, and to extract information from the stream.
*
* \param[in] iface Pointer to the algorithm interface
* \param[in] data Pointer to a block of data to parse
* \param[in] data_sz Size of the data buffer
* \param[in,out] si Pointer to stream info to update. The size member
* \ref MUST be properly initialized, but \ref MAY be
* clobbered by the algorithm. This parameter \ref MAY
* be NULL.
*
* \retval #VPX_CODEC_OK
* Bitstream is parsable and stream information updated
*/
vpx_codec_err_t vpx_codec_peek_stream_info(vpx_codec_iface_t *iface,
const uint8_t *data,
unsigned int data_sz,
vpx_codec_stream_info_t *si);
/*!\brief Return information about the current stream.
*
* Returns information about the stream that has been parsed during decoding.
*
* \param[in] ctx Pointer to this instance's context
* \param[in,out] si Pointer to stream info to update. The size member
* \ref MUST be properly initialized, but \ref MAY be
* clobbered by the algorithm. This parameter \ref MAY
* be NULL.
*
* \retval #VPX_CODEC_OK
* Bitstream is parsable and stream information updated
*/
vpx_codec_err_t vpx_codec_get_stream_info(vpx_codec_ctx_t *ctx,
vpx_codec_stream_info_t *si);
/*!\brief Decode data
*
* Processes a buffer of coded data. If the processing results in a new
* decoded frame becoming available, PUT_SLICE and PUT_FRAME events may be
* generated, as appropriate. Encoded data \ref MUST be passed in DTS (decode
* time stamp) order. Frames produced will always be in PTS (presentation
* time stamp) order.
* If the decoder is configured with VPX_CODEC_USE_INPUT_FRAGMENTS enabled,
* data and data_sz can contain a fragment of the encoded frame. Fragment
* \#n must contain at least partition \#n, but can also contain subsequent
* partitions (\#n+1 - \#n+i), and if so, fragments \#n+1, .., \#n+i must
* be empty. When no more data is available, this function should be called
* with NULL as data and 0 as data_sz. The memory passed to this function
* must be available until the frame has been decoded.
*
* \param[in] ctx Pointer to this instance's context
* \param[in] data Pointer to this block of new coded data. If
* NULL, a VPX_CODEC_CB_PUT_FRAME event is posted
* for the previously decoded frame.
* \param[in] data_sz Size of the coded data, in bytes.
* \param[in] user_priv Application specific data to associate with
* this frame.
* \param[in] deadline Soft deadline the decoder should attempt to meet,
* in us. Set to zero for unlimited.
*
* \return Returns #VPX_CODEC_OK if the coded data was processed completely
* and future pictures can be decoded without error. Otherwise,
* see the descriptions of the other error codes in ::vpx_codec_err_t
* for recoverability capabilities.
*/
vpx_codec_err_t vpx_codec_decode(vpx_codec_ctx_t *ctx,
const uint8_t *data,
unsigned int data_sz,
void *user_priv,
long deadline);
/*!\brief Decoded frames iterator
*
* Iterates over a list of the frames available for display. The iterator
* storage should be initialized to NULL to start the iteration. Iteration is
* complete when this function returns NULL.
*
* The list of available frames becomes valid upon completion of the
* vpx_codec_decode call, and remains valid until the next call to vpx_codec_decode.
*
* \param[in] ctx Pointer to this instance's context
* \param[in,out] iter Iterator storage, initialized to NULL
*
* \return Returns a pointer to an image, if one is ready for display. Frames
* produced will always be in PTS (presentation time stamp) order.
*/
vpx_image_t *vpx_codec_get_frame(vpx_codec_ctx_t *ctx,
vpx_codec_iter_t *iter);
/*!\defgroup cap_put_frame Frame-Based Decoding Functions
*
* The following functions are required to be implemented for all decoders
* that advertise the VPX_CODEC_CAP_PUT_FRAME capability. Calling these functions
* for codecs that don't advertise this capability will result in an error
* code being returned, usually VPX_CODEC_ERROR
* @{
*/
/*!\brief put frame callback prototype
*
* This callback is invoked by the decoder to notify the application of
* the availability of decoded image data.
*/
typedef void (*vpx_codec_put_frame_cb_fn_t)(void *user_priv,
const vpx_image_t *img);
/*!\brief Register for notification of frame completion.
*
* Registers a given function to be called when a decoded frame is
* available.
*
* \param[in] ctx Pointer to this instance's context
* \param[in] cb Pointer to the callback function
* \param[in] user_priv User's private data
*
* \retval #VPX_CODEC_OK
* Callback successfully registered.
* \retval #VPX_CODEC_ERROR
* Decoder context not initialized, or algorithm not capable of
* posting slice completion.
*/
vpx_codec_err_t vpx_codec_register_put_frame_cb(vpx_codec_ctx_t *ctx,
vpx_codec_put_frame_cb_fn_t cb,
void *user_priv);
/*!@} - end defgroup cap_put_frame */
/*!\defgroup cap_put_slice Slice-Based Decoding Functions
*
* The following functions are required to be implemented for all decoders
* that advertise the VPX_CODEC_CAP_PUT_SLICE capability. Calling these functions
* for codecs that don't advertise this capability will result in an error
* code being returned, usually VPX_CODEC_ERROR
* @{
*/
/*!\brief put slice callback prototype
*
* This callback is invoked by the decoder to notify the application of
* the availability of partially decoded image data. The
*/
typedef void (*vpx_codec_put_slice_cb_fn_t)(void *user_priv,
const vpx_image_t *img,
const vpx_image_rect_t *valid,
const vpx_image_rect_t *update);
/*!\brief Register for notification of slice completion.
*
* Registers a given function to be called when a decoded slice is
* available.
*
* \param[in] ctx Pointer to this instance's context
* \param[in] cb Pointer to the callback function
* \param[in] user_priv User's private data
*
* \retval #VPX_CODEC_OK
* Callback successfully registered.
* \retval #VPX_CODEC_ERROR
* Decoder context not initialized, or algorithm not capable of
* posting slice completion.
*/
vpx_codec_err_t vpx_codec_register_put_slice_cb(vpx_codec_ctx_t *ctx,
vpx_codec_put_slice_cb_fn_t cb,
void *user_priv);
/*!@} - end defgroup cap_put_slice*/
/*!\defgroup cap_external_frame_buffer External Frame Buffer Functions
*
* The following section is required to be implemented for all decoders
* that advertise the VPX_CODEC_CAP_EXTERNAL_FRAME_BUFFER capability.
* Calling this function for codecs that don't advertise this capability
* will result in an error code being returned, usually VPX_CODEC_ERROR.
*
* \note
* Currently this only works with VP9.
* @{
*/
/*!\brief Pass in external frame buffers for the decoder to use.
*
* Registers functions to be called when libvpx needs a frame buffer
* to decode the current frame and a function to be called when libvpx does
* not internally reference the frame buffer. This set function must
* be called before the first call to decode or libvpx will assume the
* default behavior of allocating frame buffers internally.
*
* \param[in] ctx Pointer to this instance's context
* \param[in] cb_get Pointer to the get callback function
* \param[in] cb_release Pointer to the release callback function
* \param[in] cb_priv Callback's private data
*
* \retval #VPX_CODEC_OK
* External frame buffers will be used by libvpx.
* \retval #VPX_CODEC_INVALID_PARAM
* One or more of the callbacks were NULL.
* \retval #VPX_CODEC_ERROR
* Decoder context not initialized, or algorithm not capable of
* using external frame buffers.
*
* \note
* When decoding VP9, the application may be required to pass in at least
* #VP9_MAXIMUM_REF_BUFFERS + #VPX_MAXIMUM_WORK_BUFFERS external frame
* buffers.
*/
vpx_codec_err_t vpx_codec_set_frame_buffer_functions(
vpx_codec_ctx_t *ctx,
vpx_get_frame_buffer_cb_fn_t cb_get,
vpx_release_frame_buffer_cb_fn_t cb_release, void *cb_priv);
/*!@} - end defgroup cap_external_frame_buffer */
/*!@} - end defgroup decoder*/
#ifdef __cplusplus
}
#endif
#endif // VPX_VPX_DECODER_H_

View file

@ -0,0 +1,83 @@
/*
* Copyright (c) 2014 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef VPX_VPX_FRAME_BUFFER_H_
#define VPX_VPX_FRAME_BUFFER_H_
/*!\file
* \brief Describes the decoder external frame buffer interface.
*/
#ifdef __cplusplus
extern "C" {
#endif
#include "./vpx_integer.h"
/*!\brief The maximum number of work buffers used by libvpx.
* Support maximum 4 threads to decode video in parallel.
* Each thread will use one work buffer.
* TODO(hkuang): Add support to set number of worker threads dynamically.
*/
#define VPX_MAXIMUM_WORK_BUFFERS 8
/*!\brief The maximum number of reference buffers that a VP9 encoder may use.
*/
#define VP9_MAXIMUM_REF_BUFFERS 8
/*!\brief External frame buffer
*
* This structure holds allocated frame buffers used by the decoder.
*/
typedef struct vpx_codec_frame_buffer {
uint8_t *data; /**< Pointer to the data buffer */
size_t size; /**< Size of data in bytes */
void *priv; /**< Frame's private data */
} vpx_codec_frame_buffer_t;
/*!\brief get frame buffer callback prototype
*
* This callback is invoked by the decoder to retrieve data for the frame
* buffer in order for the decode call to complete. The callback must
* allocate at least min_size in bytes and assign it to fb->data. The callback
* must zero out all the data allocated. Then the callback must set fb->size
* to the allocated size. The application does not need to align the allocated
* data. The callback is triggered when the decoder needs a frame buffer to
* decode a compressed image into. This function may be called more than once
* for every call to vpx_codec_decode. The application may set fb->priv to
* some data which will be passed back in the ximage and the release function
* call. |fb| is guaranteed to not be NULL. On success the callback must
* return 0. Any failure the callback must return a value less than 0.
*
* \param[in] priv Callback's private data
* \param[in] new_size Size in bytes needed by the buffer
* \param[in,out] fb Pointer to vpx_codec_frame_buffer_t
*/
typedef int (*vpx_get_frame_buffer_cb_fn_t)(
void *priv, size_t min_size, vpx_codec_frame_buffer_t *fb);
/*!\brief release frame buffer callback prototype
*
* This callback is invoked by the decoder when the frame buffer is not
* referenced by any other buffers. |fb| is guaranteed to not be NULL. On
* success the callback must return 0. Any failure the callback must return
* a value less than 0.
*
* \param[in] priv Callback's private data
* \param[in] fb Pointer to vpx_codec_frame_buffer_t
*/
typedef int (*vpx_release_frame_buffer_cb_fn_t)(
void *priv, vpx_codec_frame_buffer_t *fb);
#ifdef __cplusplus
} // extern "C"
#endif
#endif // VPX_VPX_FRAME_BUFFER_H_

View file

@ -0,0 +1,235 @@
/*
* Copyright (c) 2010 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
/*!\file
* \brief Describes the vpx image descriptor and associated operations
*
*/
#ifndef VPX_VPX_IMAGE_H_
#define VPX_VPX_IMAGE_H_
#ifdef __cplusplus
extern "C" {
#endif
/*!\brief Current ABI version number
*
* \internal
* If this file is altered in any way that changes the ABI, this value
* must be bumped. Examples include, but are not limited to, changing
* types, removing or reassigning enums, adding/removing/rearranging
* fields to structures
*/
#define VPX_IMAGE_ABI_VERSION (4) /**<\hideinitializer*/
#define VPX_IMG_FMT_PLANAR 0x100 /**< Image is a planar format. */
#define VPX_IMG_FMT_UV_FLIP 0x200 /**< V plane precedes U in memory. */
#define VPX_IMG_FMT_HAS_ALPHA 0x400 /**< Image has an alpha channel. */
#define VPX_IMG_FMT_HIGHBITDEPTH 0x800 /**< Image uses 16bit framebuffer. */
/*!\brief List of supported image formats */
typedef enum vpx_img_fmt {
VPX_IMG_FMT_NONE,
VPX_IMG_FMT_RGB24, /**< 24 bit per pixel packed RGB */
VPX_IMG_FMT_RGB32, /**< 32 bit per pixel packed 0RGB */
VPX_IMG_FMT_RGB565, /**< 16 bit per pixel, 565 */
VPX_IMG_FMT_RGB555, /**< 16 bit per pixel, 555 */
VPX_IMG_FMT_UYVY, /**< UYVY packed YUV */
VPX_IMG_FMT_YUY2, /**< YUYV packed YUV */
VPX_IMG_FMT_YVYU, /**< YVYU packed YUV */
VPX_IMG_FMT_BGR24, /**< 24 bit per pixel packed BGR */
VPX_IMG_FMT_RGB32_LE, /**< 32 bit packed BGR0 */
VPX_IMG_FMT_ARGB, /**< 32 bit packed ARGB, alpha=255 */
VPX_IMG_FMT_ARGB_LE, /**< 32 bit packed BGRA, alpha=255 */
VPX_IMG_FMT_RGB565_LE, /**< 16 bit per pixel, gggbbbbb rrrrrggg */
VPX_IMG_FMT_RGB555_LE, /**< 16 bit per pixel, gggbbbbb 0rrrrrgg */
VPX_IMG_FMT_YV12 = VPX_IMG_FMT_PLANAR | VPX_IMG_FMT_UV_FLIP | 1, /**< planar YVU */
VPX_IMG_FMT_I420 = VPX_IMG_FMT_PLANAR | 2,
VPX_IMG_FMT_VPXYV12 = VPX_IMG_FMT_PLANAR | VPX_IMG_FMT_UV_FLIP | 3, /** < planar 4:2:0 format with vpx color space */
VPX_IMG_FMT_VPXI420 = VPX_IMG_FMT_PLANAR | 4,
VPX_IMG_FMT_I422 = VPX_IMG_FMT_PLANAR | 5,
VPX_IMG_FMT_I444 = VPX_IMG_FMT_PLANAR | 6,
VPX_IMG_FMT_I440 = VPX_IMG_FMT_PLANAR | 7,
VPX_IMG_FMT_444A = VPX_IMG_FMT_PLANAR | VPX_IMG_FMT_HAS_ALPHA | 6,
VPX_IMG_FMT_I42016 = VPX_IMG_FMT_I420 | VPX_IMG_FMT_HIGHBITDEPTH,
VPX_IMG_FMT_I42216 = VPX_IMG_FMT_I422 | VPX_IMG_FMT_HIGHBITDEPTH,
VPX_IMG_FMT_I44416 = VPX_IMG_FMT_I444 | VPX_IMG_FMT_HIGHBITDEPTH,
VPX_IMG_FMT_I44016 = VPX_IMG_FMT_I440 | VPX_IMG_FMT_HIGHBITDEPTH
} vpx_img_fmt_t; /**< alias for enum vpx_img_fmt */
/*!\brief List of supported color spaces */
typedef enum vpx_color_space {
VPX_CS_UNKNOWN = 0, /**< Unknown */
VPX_CS_BT_601 = 1, /**< BT.601 */
VPX_CS_BT_709 = 2, /**< BT.709 */
VPX_CS_SMPTE_170 = 3, /**< SMPTE.170 */
VPX_CS_SMPTE_240 = 4, /**< SMPTE.240 */
VPX_CS_BT_2020 = 5, /**< BT.2020 */
VPX_CS_RESERVED = 6, /**< Reserved */
VPX_CS_SRGB = 7 /**< sRGB */
} vpx_color_space_t; /**< alias for enum vpx_color_space */
/*!\brief List of supported color range */
typedef enum vpx_color_range {
VPX_CR_STUDIO_RANGE = 0, /**< Y [16..235], UV [16..240] */
VPX_CR_FULL_RANGE = 1 /**< YUV/RGB [0..255] */
} vpx_color_range_t; /**< alias for enum vpx_color_range */
/**\brief Image Descriptor */
typedef struct vpx_image {
vpx_img_fmt_t fmt; /**< Image Format */
vpx_color_space_t cs; /**< Color Space */
vpx_color_range_t range; /**< Color Range */
/* Image storage dimensions */
unsigned int w; /**< Stored image width */
unsigned int h; /**< Stored image height */
unsigned int bit_depth; /**< Stored image bit-depth */
/* Image display dimensions */
unsigned int d_w; /**< Displayed image width */
unsigned int d_h; /**< Displayed image height */
/* Image intended rendering dimensions */
unsigned int r_w; /**< Intended rendering image width */
unsigned int r_h; /**< Intended rendering image height */
/* Chroma subsampling info */
unsigned int x_chroma_shift; /**< subsampling order, X */
unsigned int y_chroma_shift; /**< subsampling order, Y */
/* Image data pointers. */
#define VPX_PLANE_PACKED 0 /**< To be used for all packed formats */
#define VPX_PLANE_Y 0 /**< Y (Luminance) plane */
#define VPX_PLANE_U 1 /**< U (Chroma) plane */
#define VPX_PLANE_V 2 /**< V (Chroma) plane */
#define VPX_PLANE_ALPHA 3 /**< A (Transparency) plane */
unsigned char *planes[4]; /**< pointer to the top left pixel for each plane */
int stride[4]; /**< stride between rows for each plane */
int bps; /**< bits per sample (for packed formats) */
/* The following member may be set by the application to associate data
* with this image.
*/
void *user_priv; /**< may be set by the application to associate data
* with this image. */
/* The following members should be treated as private. */
unsigned char *img_data; /**< private */
int img_data_owner; /**< private */
int self_allocd; /**< private */
void *fb_priv; /**< Frame buffer data associated with the image. */
} vpx_image_t; /**< alias for struct vpx_image */
/**\brief Representation of a rectangle on a surface */
typedef struct vpx_image_rect {
unsigned int x; /**< leftmost column */
unsigned int y; /**< topmost row */
unsigned int w; /**< width */
unsigned int h; /**< height */
} vpx_image_rect_t; /**< alias for struct vpx_image_rect */
/*!\brief Open a descriptor, allocating storage for the underlying image
*
* Returns a descriptor for storing an image of the given format. The
* storage for the descriptor is allocated on the heap.
*
* \param[in] img Pointer to storage for descriptor. If this parameter
* is NULL, the storage for the descriptor will be
* allocated on the heap.
* \param[in] fmt Format for the image
* \param[in] d_w Width of the image
* \param[in] d_h Height of the image
* \param[in] align Alignment, in bytes, of the image buffer and
* each row in the image(stride).
*
* \return Returns a pointer to the initialized image descriptor. If the img
* parameter is non-null, the value of the img parameter will be
* returned.
*/
vpx_image_t *vpx_img_alloc(vpx_image_t *img,
vpx_img_fmt_t fmt,
unsigned int d_w,
unsigned int d_h,
unsigned int align);
/*!\brief Open a descriptor, using existing storage for the underlying image
*
* Returns a descriptor for storing an image of the given format. The
* storage for descriptor has been allocated elsewhere, and a descriptor is
* desired to "wrap" that storage.
*
* \param[in] img Pointer to storage for descriptor. If this parameter
* is NULL, the storage for the descriptor will be
* allocated on the heap.
* \param[in] fmt Format for the image
* \param[in] d_w Width of the image
* \param[in] d_h Height of the image
* \param[in] align Alignment, in bytes, of each row in the image.
* \param[in] img_data Storage to use for the image
*
* \return Returns a pointer to the initialized image descriptor. If the img
* parameter is non-null, the value of the img parameter will be
* returned.
*/
vpx_image_t *vpx_img_wrap(vpx_image_t *img,
vpx_img_fmt_t fmt,
unsigned int d_w,
unsigned int d_h,
unsigned int align,
unsigned char *img_data);
/*!\brief Set the rectangle identifying the displayed portion of the image
*
* Updates the displayed rectangle (aka viewport) on the image surface to
* match the specified coordinates and size.
*
* \param[in] img Image descriptor
* \param[in] x leftmost column
* \param[in] y topmost row
* \param[in] w width
* \param[in] h height
*
* \return 0 if the requested rectangle is valid, nonzero otherwise.
*/
int vpx_img_set_rect(vpx_image_t *img,
unsigned int x,
unsigned int y,
unsigned int w,
unsigned int h);
/*!\brief Flip the image vertically (top for bottom)
*
* Adjusts the image descriptor's pointers and strides to make the image
* be referenced upside-down.
*
* \param[in] img Image descriptor
*/
void vpx_img_flip(vpx_image_t *img);
/*!\brief Close an image descriptor
*
* Frees all allocated storage associated with an image descriptor.
*
* \param[in] img Image descriptor
*/
void vpx_img_free(vpx_image_t *img);
#ifdef __cplusplus
} // extern "C"
#endif
#endif // VPX_VPX_IMAGE_H_

View file

@ -0,0 +1,74 @@
/*
* Copyright (c) 2010 The WebM project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef VPX_VPX_INTEGER_H_
#define VPX_VPX_INTEGER_H_
/* get ptrdiff_t, size_t, wchar_t, NULL */
#include <stddef.h>
#if defined(_MSC_VER)
#define VPX_FORCE_INLINE __forceinline
#define VPX_INLINE __inline
#else
#define VPX_FORCE_INLINE __inline__ __attribute__(always_inline)
// TODO(jbb): Allow a way to force inline off for older compilers.
#define VPX_INLINE inline
#endif
#if (defined(_MSC_VER) && (_MSC_VER < 1600)) || defined(VPX_EMULATE_INTTYPES)
typedef signed char int8_t;
typedef signed short int16_t;
typedef signed int int32_t;
typedef unsigned char uint8_t;
typedef unsigned short uint16_t;
typedef unsigned int uint32_t;
#if (defined(_MSC_VER) && (_MSC_VER < 1600))
typedef signed __int64 int64_t;
typedef unsigned __int64 uint64_t;
#define INT64_MAX _I64_MAX
#define INT32_MAX _I32_MAX
#define INT32_MIN _I32_MIN
#define INT16_MAX _I16_MAX
#define INT16_MIN _I16_MIN
#endif
#ifndef _UINTPTR_T_DEFINED
typedef size_t uintptr_t;
#endif
#else
/* Most platforms have the C99 standard integer types. */
#if defined(__cplusplus)
# if !defined(__STDC_FORMAT_MACROS)
# define __STDC_FORMAT_MACROS
# endif
# if !defined(__STDC_LIMIT_MACROS)
# define __STDC_LIMIT_MACROS
# endif
#endif // __cplusplus
#include <stdint.h>
#endif
/* VS2010 defines stdint.h, but not inttypes.h */
#if defined(_MSC_VER) && _MSC_VER < 1800
#define PRId64 "I64d"
#else
#include <inttypes.h>
#endif
#endif // VPX_VPX_INTEGER_H_

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View file

@ -0,0 +1,44 @@
Instructions for Building EDuke32's Library Dependencies Targeting Win32 and Win64
==================================================================================
First, follow these instructions: http://wiki.eduke32.com/wiki/Building_EDuke32_on_Windows
Download the latest sources from the link provided.
The build output listed as "Desired Results" is what EDuke32 needs to function.
The desired results for each library in some cases may need to be installed to the compiler. "x depends on the results of y to compile" means that the build output of x must be added to the compiler in this way. Copy files listed in each category to the appropriate destinations. Unless otherwise noted, do NOT copy the ".dll.a" file or else the final product may depend on external DLLs (which you may actually want).
For MinGW (MinGW32):
from the compiler root (ex. C:/MinGW/)
* headers: include/
* libraries: lib/
For MinGW-w64:
from the compiler root (ex. C:/MinGW-w64/mingw32-dw2/)
* headers: <target>-w64-mingw32/include/
* libraries: <target>-w64-mingw32/lib/
Binaries (if mentioned) need to be present with the finished EDuke32 executables. They are not needed during compilation.
NB: Text formatted as code blocks are commands to be pasted into the Windows command prompt.
http://wiki.eduke32.com/wiki/Working_with_the_Windows_Command_Prompt
[//]: # (Plain text readers: This refers to lines beginning with exactly four spaces.)
libvpx
------
### Prerequisites ###
Download the binary of yasm (http://yasm.tortall.net/) for your host system architecture. Both builds target both architectures.
The build environment needs pr.exe (https://mingw-lib.googlecode.com/files/coreutils-5.97-MSYS-1.0.11-snapshot.tar.bz2).
### Download ###
* Instructions: http://www.webmproject.org/code/
* Stable: http://downloads.webmproject.org/releases/webm/
* Git: https://chromium.googlesource.com/webm/libvpx
### Build ###
sh ./configure --disable-vp8-encoder --disable-vp9-encoder --disable-multithread --disable-spatial-resampling --as=yasm && make libvpx.a
### Desired Results ###
* headers: vpx/vp8.h vpx/vp8dx.h vpx/vpx_codec.h vpx/vpx_decoder.h vpx/vpx_frame_buffer.h vpx/vpx_image.h vpx/vpx_integer.h
* libraries: libvpx.a

View file

@ -0,0 +1,55 @@
/**
* This file has no copyright assigned and is placed in the Public Domain.
* This file is part of the w64 mingw-runtime package.
* No warranty is given; refer to the file DISCLAIMER.PD within this package.
*/
#ifndef _dbg_LOAD_IMAGE_h
#define _dbg_LOAD_IMAGE_h
#ifndef WINAPI
#define WINAPI __stdcall
#endif
#define IMAGEAPI DECLSPEC_IMPORT WINAPI
#define DBHLP_DEPRECIATED __declspec(deprecated)
#define DBHLPAPI IMAGEAPI
#ifndef EBACKTRACE_MINGW32
#define IMAGE_SEPARATION (64*1024)
typedef struct _LOADED_IMAGE {
PSTR ModuleName;
HANDLE hFile;
PUCHAR MappedAddress;
#ifdef _IMAGEHLP64
PIMAGE_NT_HEADERS64 FileHeader;
#else
PIMAGE_NT_HEADERS32 FileHeader;
#endif
PIMAGE_SECTION_HEADER LastRvaSection;
ULONG NumberOfSections;
PIMAGE_SECTION_HEADER Sections;
ULONG Characteristics;
BOOLEAN fSystemImage;
BOOLEAN fDOSImage;
BOOLEAN fReadOnly;
UCHAR Version;
LIST_ENTRY Links;
ULONG SizeOfImage;
} LOADED_IMAGE,*PLOADED_IMAGE;
#endif
#define MAX_SYM_NAME 2000
typedef struct _MODLOAD_DATA {
DWORD ssize;
DWORD ssig;
PVOID data;
DWORD size;
DWORD flags;
} MODLOAD_DATA,*PMODLOAD_DATA;
#endif

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,702 @@
/*
Copyright (c) 2010 ,
Cloud Wu . All rights reserved.
http://www.codingnow.com
Use, modification and distribution are subject to the "New BSD License"
as listed at <url: http://www.opensource.org/licenses/bsd-license.php >.
filename: backtrace.c
build command: gcc -O2 -shared -Wall -o backtrace.dll backtrace.c -lbfd -liberty -limagehlp
how to use: Call LoadLibraryA("backtrace.dll"); at beginning of your program .
*/
/* modified from original for EDuke32 */
// warnings cleaned up, ported to 64-bit, and heavily extended by Hendricks266
#include <windows.h>
#include <excpt.h>
#include <imagehlp.h>
// Tenuous: MinGW provides _IMAGEHLP_H while MinGW-w64 defines _IMAGEHLP_.
#ifdef _IMAGEHLP_H
# define EBACKTRACE_MINGW32
#endif
#ifdef _IMAGEHLP_
# define EBACKTRACE_MINGW_W64
#endif
#if defined(EBACKTRACE_MINGW32) && !defined(EBACKTRACE_MINGW_W64)
# include "_dbg_common.h"
#endif
#ifndef PACKAGE
# define PACKAGE EBACKTRACE1
#endif
#ifndef PACKAGE_VERSION
# define PACKAGE_VERSION 1
#endif
#if defined(_M_X64) || defined(__amd64__) || defined(__x86_64__) || defined(_WIN64)
# define EBACKTRACE64
#endif
#include <bfd.h>
#include <psapi.h>
#include <stdlib.h>
#include <stdio.h>
#include <unistd.h>
#include <fcntl.h>
#include <stdarg.h>
#include <string.h>
#include <stdbool.h>
#include <sys/stat.h>
#include <time.h>
#include <stdint.h>
#ifndef DBG_PRINTEXCEPTION_C
# define DBG_PRINTEXCEPTION_C (0x40010006)
#endif
#ifndef MS_VC_EXCEPTION
# define MS_VC_EXCEPTION 1080890248
#endif
#if defined __GNUC__ || defined __clang__
# define ATTRIBUTE(attrlist) __attribute__(attrlist)
#else
# define ATTRIBUTE(attrlist)
#endif
#define BUFFER_MAX (16*1024)
struct bfd_ctx {
bfd * handle;
asymbol ** symbol;
};
struct bfd_set {
char * name;
struct bfd_ctx * bc;
struct bfd_set *next;
};
struct find_info {
asymbol **symbol;
bfd_vma counter;
const char *file;
const char *func;
unsigned line;
};
struct output_buffer {
char * buf;
size_t sz;
size_t ptr;
};
static void
output_init(struct output_buffer *ob, char * buf, size_t sz)
{
ob->buf = buf;
ob->sz = sz;
ob->ptr = 0;
ob->buf[0] = '\0';
}
static void
output_print(struct output_buffer *ob, const char * format, ...)
{
va_list ap;
if (ob->sz == ob->ptr)
return;
ob->buf[ob->ptr] = '\0';
va_start(ap,format);
vsnprintf(ob->buf + ob->ptr , ob->sz - ob->ptr , format, ap);
va_end(ap);
ob->ptr = strlen(ob->buf + ob->ptr) + ob->ptr;
}
static void
lookup_section(bfd *abfd, asection *sec, void *opaque_data)
{
struct find_info *data = opaque_data;
bfd_vma vma;
if (data->func)
return;
if (!(bfd_get_section_flags(abfd, sec) & SEC_ALLOC))
return;
vma = bfd_get_section_vma(abfd, sec);
if (data->counter < vma || vma + bfd_get_section_size(sec) <= data->counter)
return;
bfd_find_nearest_line(abfd, sec, data->symbol, data->counter - vma, &(data->file), &(data->func), &(data->line));
}
static void
find(struct bfd_ctx * b, DWORD offset, const char **file, const char **func, unsigned *line)
{
struct find_info data;
data.func = NULL;
data.symbol = b->symbol;
data.counter = offset;
data.file = NULL;
data.func = NULL;
data.line = 0;
bfd_map_over_sections(b->handle, &lookup_section, &data);
if (file) {
*file = data.file;
}
if (func) {
*func = data.func;
}
if (line) {
*line = data.line;
}
}
static int
init_bfd_ctx(struct bfd_ctx *bc, const char * procname, struct output_buffer *ob)
{
int r1, r2, r3;
bfd *b;
void *symbol_table;
unsigned dummy = 0;
bc->handle = NULL;
bc->symbol = NULL;
b = bfd_openr(procname, 0);
if (!b) {
output_print(ob,"Failed to open bfd from (%s)\n" , procname);
return 1;
}
r1 = bfd_check_format(b, bfd_object);
r2 = bfd_check_format_matches(b, bfd_object, NULL);
r3 = bfd_get_file_flags(b) & HAS_SYMS;
if (!(r1 && r2 && r3)) {
bfd_close(b);
if (!(r1 && r2))
output_print(ob,"Failed to init bfd from (%s): %d %d %d\n", procname, r1, r2, r3);
return 1;
}
if (bfd_read_minisymbols(b, FALSE, &symbol_table, &dummy) == 0) {
if (bfd_read_minisymbols(b, TRUE, &symbol_table, &dummy) < 0) {
free(symbol_table);
bfd_close(b);
output_print(ob,"Failed to read symbols from (%s)\n", procname);
return 1;
}
}
bc->handle = b;
bc->symbol = symbol_table;
return 0;
}
static void
close_bfd_ctx(struct bfd_ctx *bc)
{
if (bc) {
if (bc->symbol) {
free(bc->symbol);
}
if (bc->handle) {
bfd_close(bc->handle);
}
}
}
static struct bfd_ctx *
get_bc(struct output_buffer *ob , struct bfd_set *set , const char *procname)
{
struct bfd_ctx bc;
while(set->name) {
if (strcmp(set->name , procname) == 0) {
return set->bc;
}
set = set->next;
}
if (init_bfd_ctx(&bc, procname , ob)) {
return NULL;
}
set->next = calloc(1, sizeof(*set));
set->bc = malloc(sizeof(struct bfd_ctx));
memcpy(set->bc, &bc, sizeof(bc));
set->name = strdup(procname);
return set->bc;
}
static void
release_set(struct bfd_set *set)
{
while(set) {
struct bfd_set * temp = set->next;
if (set->name)
free(set->name);
close_bfd_ctx(set->bc);
free(set);
set = temp;
}
}
static char procname[MAX_PATH];
#ifdef EBACKTRACE64
# define MachineType IMAGE_FILE_MACHINE_AMD64
# define MAYBE64(x) x ## 64
#else
# define MachineType IMAGE_FILE_MACHINE_I386
# define MAYBE64(x) x
#endif
static void
_backtrace(struct output_buffer *ob, struct bfd_set *set, int depth , LPCONTEXT context)
{
MAYBE64(STACKFRAME) frame;
HANDLE process, thread;
char symbol_buffer[sizeof(MAYBE64(IMAGEHLP_SYMBOL)) + 255];
char module_name_raw[MAX_PATH];
struct bfd_ctx *bc = NULL;
GetModuleFileNameA(NULL, procname, sizeof procname);
memset(&frame,0,sizeof(frame));
#ifdef EBACKTRACE64
frame.AddrPC.Offset = context->Rip;
frame.AddrStack.Offset = context->Rsp;
frame.AddrFrame.Offset = context->Rbp;
#else
frame.AddrPC.Offset = context->Eip;
frame.AddrStack.Offset = context->Esp;
frame.AddrFrame.Offset = context->Ebp;
#endif
frame.AddrPC.Mode = AddrModeFlat;
frame.AddrStack.Mode = AddrModeFlat;
frame.AddrFrame.Mode = AddrModeFlat;
process = GetCurrentProcess();
thread = GetCurrentThread();
while(MAYBE64(StackWalk)(MachineType,
process,
thread,
&frame,
context,
NULL,
MAYBE64(SymFunctionTableAccess),
MAYBE64(SymGetModuleBase), NULL)) {
MAYBE64(IMAGEHLP_SYMBOL) *symbol;
MAYBE64(DWORD) module_base;
const char * module_name = "[unknown module]";
const char * file = NULL;
const char * func = NULL;
unsigned line = 0;
--depth;
if (depth < 0)
break;
symbol = (MAYBE64(IMAGEHLP_SYMBOL) *)symbol_buffer;
symbol->SizeOfStruct = (sizeof *symbol) + 255;
symbol->MaxNameLength = 254;
module_base = MAYBE64(SymGetModuleBase)(process, frame.AddrPC.Offset);
if (module_base &&
GetModuleFileNameA((HINSTANCE)(intptr_t)module_base, module_name_raw, MAX_PATH)) {
module_name = module_name_raw;
bc = get_bc(ob, set, module_name);
}
if (bc) {
find(bc,frame.AddrPC.Offset,&file,&func,&line);
}
if (file == NULL) {
MAYBE64(DWORD) dummy = 0;
if (MAYBE64(SymGetSymFromAddr)(process, frame.AddrPC.Offset, &dummy, symbol)) {
file = symbol->Name;
}
else {
file = "[unknown file]";
}
}
output_print(ob,"0x%p : %s : %s", frame.AddrPC.Offset, module_name, file);
if (func != NULL)
output_print(ob, " (%d) : in function (%s)", line, func);
output_print(ob, "\n");
}
}
static LPTSTR FormatErrorMessage(DWORD dwMessageId)
{
LPTSTR lpBuffer = NULL;
// adapted from http://stackoverflow.com/a/455533
FormatMessage(
FORMAT_MESSAGE_FROM_SYSTEM
|FORMAT_MESSAGE_ALLOCATE_BUFFER
|FORMAT_MESSAGE_IGNORE_INSERTS,
NULL,
dwMessageId,
MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), // MAKELANGID(LANG_ENGLISH, SUBLANG_ENGLISH_US)
(LPTSTR)&lpBuffer,
0,
NULL);
return lpBuffer; // must be LocalFree()'d by caller
}
static LPTSTR FormatExceptionCodeMessage(DWORD dwMessageId)
{
LPTSTR lpBuffer = NULL;
FormatMessage(
FORMAT_MESSAGE_FROM_HMODULE
|FORMAT_MESSAGE_ALLOCATE_BUFFER
|FORMAT_MESSAGE_IGNORE_INSERTS,
GetModuleHandleA("ntdll.dll"),
dwMessageId,
MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), // MAKELANGID(LANG_ENGLISH, SUBLANG_ENGLISH_US)
(LPTSTR)&lpBuffer,
0,
NULL);
return lpBuffer; // must be LocalFree()'d by caller
}
// adapted from http://www.catch22.net/tuts/custom-messagebox
static HHOOK hMsgBoxHook;
LRESULT CALLBACK CBTProc(int nCode, WPARAM wParam, LPARAM lParam)
{
if (nCode < 0)
return CallNextHookEx(hMsgBoxHook, nCode, wParam, lParam);
switch (nCode)
{
case HCBT_ACTIVATE:
{
// Get handle to the message box!
HWND hwnd = (HWND)wParam;
// Do customization!
SetWindowTextA(GetDlgItem(hwnd, IDYES), "Quit");
SetWindowTextA(GetDlgItem(hwnd, IDNO), "Continue");
SetWindowTextA(GetDlgItem(hwnd, IDCANCEL), "Ignore");
return 0;
}
break;
}
// Call the next hook, if there is one
return CallNextHookEx(hMsgBoxHook, nCode, wParam, lParam);
}
int ExceptionMessage(TCHAR *szText, TCHAR *szCaption)
{
int retval;
// Install a window hook, so we can intercept the message-box
// creation, and customize it
hMsgBoxHook = SetWindowsHookEx(
WH_CBT,
CBTProc,
NULL,
GetCurrentThreadId() // Only install for THIS thread!!!
);
// Display a standard message box
retval = MessageBoxA(NULL, szText, szCaption, MB_YESNOCANCEL|MB_ICONERROR|MB_TASKMODAL);
// remove the window hook
UnhookWindowsHookEx(hMsgBoxHook);
return retval;
}
static char crashlogfilename[MAX_PATH] = "crash.log";
static char propername[MAX_PATH] = "this application";
__declspec(dllexport) void SetTechnicalName(const char* input)
{
snprintf(crashlogfilename, MAX_PATH, "%s.crash.log", input);
}
__declspec(dllexport) void SetProperName(const char* input)
{
strncpy(propername, input, MAX_PATH);
}
static char * g_output = NULL;
static PVOID g_prev = NULL;
static LONG WINAPI
exception_filter(LPEXCEPTION_POINTERS info)
{
struct output_buffer ob;
int logfd, written, msgboxID;
PEXCEPTION_RECORD exception;
BOOL initialized = FALSE;
char *ExceptionPrinted;
for (exception = info->ExceptionRecord; exception != NULL; exception = exception->ExceptionRecord)
{
#if 0
if (exception->ExceptionFlags & EXCEPTION_NONCONTINUABLE)
continuable = FALSE;
#endif
switch (exception->ExceptionCode)
{
case EXCEPTION_BREAKPOINT:
case EXCEPTION_SINGLE_STEP:
case DBG_CONTROL_C:
case DBG_PRINTEXCEPTION_C:
case MS_VC_EXCEPTION:
break;
default:
{
LPTSTR ExceptionCodeMsg = FormatExceptionCodeMessage(exception->ExceptionCode);
// The message for this exception code is broken.
LPTSTR ExceptionText = exception->ExceptionCode == EXCEPTION_ACCESS_VIOLATION ? "Access violation." : ExceptionCodeMsg;
if (!initialized)
{
output_init(&ob, g_output, BUFFER_MAX);
initialized = TRUE;
}
output_print(&ob, "Caught exception 0x%08X at 0x%p: %s\n", exception->ExceptionCode, exception->ExceptionAddress, ExceptionText);
LocalFree(ExceptionCodeMsg);
}
break;
}
}
if (!initialized)
return EXCEPTION_CONTINUE_SEARCH; // EXCEPTION_CONTINUE_EXECUTION
ExceptionPrinted = (char*)calloc(strlen(g_output) + 37 + 2*MAX_PATH, sizeof(char));
strcpy(ExceptionPrinted, g_output);
strcat(ExceptionPrinted, "\nPlease send ");
strcat(ExceptionPrinted, crashlogfilename);
strcat(ExceptionPrinted, " to the maintainers of ");
strcat(ExceptionPrinted, propername);
strcat(ExceptionPrinted, ".");
{
DWORD error = 0;
BOOL SymInitialized = SymInitialize(GetCurrentProcess(), NULL, TRUE);
if (!SymInitialized)
{
LPTSTR errorText;
error = GetLastError();
errorText = FormatErrorMessage(error);
output_print(&ob, "SymInitialize() failed with error %d: %s\n", error, errorText);
LocalFree(errorText);
}
if (SymInitialized || error == 87)
{
struct bfd_set *set = calloc(1,sizeof(*set));
bfd_init();
_backtrace(&ob , set , 128 , info->ContextRecord);
release_set(set);
SymCleanup(GetCurrentProcess());
}
}
logfd = open(crashlogfilename, O_APPEND | O_CREAT | O_RDWR, S_IRUSR | S_IWUSR);
if (logfd) {
time_t curtime;
struct tm *curltime;
const char *theasctime;
const char *finistr = "---------------\n";
while ((written = write(logfd, g_output, strlen(g_output)))) {
g_output += written;
}
curtime = time(NULL);
curltime = localtime(&curtime);
theasctime = curltime ? asctime(curltime) : NULL;
if (theasctime)
write(logfd, theasctime, strlen(theasctime));
write(logfd, finistr, strlen(finistr));
close(logfd);
}
//fputs(g_output, stderr);
msgboxID = ExceptionMessage(ExceptionPrinted, propername);
free(ExceptionPrinted);
switch (msgboxID)
{
case IDYES:
exit(0xBAC);
break;
case IDNO:
break;
case IDCANCEL:
return EXCEPTION_CONTINUE_EXECUTION;
break;
}
return EXCEPTION_CONTINUE_SEARCH;
}
static void
backtrace_register(void)
{
if (g_output == NULL) {
g_output = malloc(BUFFER_MAX);
g_prev = AddVectoredExceptionHandler(1, exception_filter);
}
}
static void
backtrace_unregister(void)
{
if (g_output) {
free(g_output);
RemoveVectoredExceptionHandler(g_prev);
g_prev = NULL;
g_output = NULL;
}
}
BOOL WINAPI
DllMain(HINSTANCE hinstDLL ATTRIBUTE((unused)), DWORD dwReason, LPVOID lpvReserved ATTRIBUTE((unused)))
{
switch (dwReason) {
case DLL_PROCESS_ATTACH:
backtrace_register();
break;
case DLL_PROCESS_DETACH:
backtrace_unregister();
break;
}
return TRUE;
}
/* cut dependence on libintl... libbfd needs this */
char *libintl_dgettext (const char *domain_name ATTRIBUTE((unused)), const char *msgid ATTRIBUTE((unused)))
{
static char buf[1024] = "XXX placeholder XXX";
return buf;
}
int __printf__ ( const char * format, ... );
int libintl_fprintf ( FILE * stream, const char * format, ... );
int libintl_sprintf ( char * str, const char * format, ... );
int libintl_snprintf ( char *buffer, int buf_size, const char *format, ... );
int libintl_vprintf ( const char * format, va_list arg );
int libintl_vfprintf ( FILE * stream, const char * format, va_list arg );
int libintl_vsprintf ( char * str, const char * format, va_list arg );
int __printf__ ( const char * format, ... )
{
int value;
va_list arg;
va_start(arg, format);
value = vprintf ( format, arg );
va_end(arg);
return value;
}
int libintl_fprintf ( FILE * stream, const char * format, ... )
{
int value;
va_list arg;
va_start(arg, format);
value = vfprintf ( stream, format, arg );
va_end(arg);
return value;
}
int libintl_sprintf ( char * str, const char * format, ... )
{
int value;
va_list arg;
va_start(arg, format);
value = vsprintf ( str, format, arg );
va_end(arg);
return value;
}
int libintl_snprintf ( char *buffer, int buf_size, const char *format, ... )
{
int value;
va_list arg;
va_start(arg, format);
value = vsnprintf ( buffer, buf_size, format, arg );
va_end(arg);
return value;
}
int libintl_vprintf ( const char * format, va_list arg )
{
return vprintf ( format, arg );
}
int libintl_vfprintf ( FILE * stream, const char * format, va_list arg )
{
return vfprintf ( stream, format, arg );
}
int libintl_vsprintf ( char * str, const char * format, va_list arg )
{
return vsprintf ( str, format, arg );
}
/* cut dependence on zlib... libbfd needs this */
int compress (unsigned char *dest ATTRIBUTE((unused)), unsigned long destLen ATTRIBUTE((unused)), const unsigned char source ATTRIBUTE((unused)), unsigned long sourceLen ATTRIBUTE((unused)))
{
return 0;
}
unsigned long compressBound (unsigned long sourceLen)
{
return sourceLen + (sourceLen >> 12) + (sourceLen >> 14) + (sourceLen >> 25) + 13;
}
int inflateEnd(void *strm ATTRIBUTE((unused)))
{
return 0;
}
int inflateInit_(void *strm ATTRIBUTE((unused)), const char *version ATTRIBUTE((unused)), int stream_size ATTRIBUTE((unused)))
{
return 0;
}
int inflateReset(void *strm ATTRIBUTE((unused)))
{
return 0;
}
int inflate(void *strm ATTRIBUTE((unused)), int flush ATTRIBUTE((unused)))
{
return 0;
}

View file

@ -0,0 +1,22 @@
o=o
NAME:=libcompat-to-msvc
%.$o: %.c
gcc -Wall -Wextra -O3 -c $< -o $@
%.$o: %.S
gcc -c $< -o $@
OBJS=dll_math.$o io_math.$o dll_dependency.$o vsnprintf.$o
.INTERMEDIATE: $(OBJS)
$(NAME).a: $(OBJS)
ar rc $@ $^
ranlib $@
clean:
-rm -f *.a *.o

View file

@ -0,0 +1,88 @@
/* Implementation for gcc's internal stack-allocation routines. */
.global ___chkstk
.global __alloca
.global ___chkstk_ms
___chkstk_ms:
#ifdef _WIN64
pushq %rax
pushq %rcx
cmpq $0x1000, %rax
leaq 24(%rsp), %rcx
jb .Lchkstk_ms_end
.Lchkstk_ms_loop:
subq $0x1000, %rcx
subq $0x1000, %rax
orq $0x0, (%rcx)
cmpq $0x1000, %rax
ja .Lchkstk_ms_loop
.Lchkstk_ms_end:
subq %rax, %rcx
orq $0x0, (%rcx)
popq %rcx
popq %rax
ret
#else
pushl %eax
pushl %ecx
cmpl $0x1000, %eax
leal 12(%esp), %ecx
jb chkstk_ms_end
chkstk_ms_loop:
subl $0x1000, %ecx
subl $0x1000, %eax
orl $0x0, (%ecx)
cmpl $0x1000, %eax
ja chkstk_ms_loop
chkstk_ms_end:
subl %eax, %ecx
orl $0x0, (%ecx)
popl %ecx
popl %eax
ret
#endif
#ifdef _WIN64
__alloca:
movq %rcx, %rax
.align 4
___chkstk:
popq %r11
movq %rsp, %r10
cmpq $0x1000, %rax
jb .Lchkstk_end
.Lchkstk_loop:
subq $0x1000, %r10
subq $0x1000, %rax
orl $0x0, (%r10)
cmpq $0x1000, %rax
ja .Lchkstk_loop
.Lchkstk_end:
subq %rax, %r10
movq %rsp, %rax
orl $0x0, (%r10)
movq %r10, %rsp
pushq %r11
ret
#else
___chkstk:
__alloca:
pushl %ecx
leal 8(%esp), %ecx
cmpl $0x1000, %eax /* > 4k ?*/
jb chkstk_end
chkstk_loop:
subl $0x1000, %ecx
subl $0x1000, %eax
orl $0x0, (%ecx)
cmpl $0x1000, %eax
ja chkstk_loop
chkstk_end:
subl %eax, %ecx
orl $0x0, (%ecx)
movl %esp, %eax
movl %ecx, %esp
movl (%eax), %ecx
pushl 4(%eax)
ret
#endif

View file

@ -0,0 +1,572 @@
/*-
* Copyright (c) 1992, 1993
* The Regents of the University of California. All rights reserved.
*
* This software was developed by the Computer Systems Engineering group
* at Lawrence Berkeley Laboratory under DARPA contract BG 91-66 and
* contributed to Berkeley.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 4. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#ifndef _LIBKERN_QUAD_H_
#define _LIBKERN_QUAD_H_
/*
* Quad arithmetic.
*
* This library makes the following assumptions:
*
* - The type long long (aka quad_t) exists.
*
* - A quad variable is exactly twice as long as `long'.
*
* - The machine's arithmetic is two's complement.
*
* This library can provide 128-bit arithmetic on a machine with 128-bit
* quads and 64-bit longs, for instance, or 96-bit arithmetic on machines
* with 48-bit longs.
*/
/*
#include <sys/cdefs.h>
#include <sys/types.h>
#include <sys/limits.h>
#include <sys/syslimits.h>
*/
#include <limits.h>
typedef long long quad_t;
typedef unsigned long long u_quad_t;
typedef unsigned long u_long;
#ifndef CHAR_BIT
# define CHAR_BIT __CHAR_BIT__
#endif
/*
* Define the order of 32-bit words in 64-bit words.
* For little endian only.
*/
#define _QUAD_HIGHWORD 1
#define _QUAD_LOWWORD 0
/*
* Depending on the desired operation, we view a `long long' (aka quad_t) in
* one or more of the following formats.
*/
union uu {
quad_t q; /* as a (signed) quad */
quad_t uq; /* as an unsigned quad */
long sl[2]; /* as two signed longs */
u_long ul[2]; /* as two unsigned longs */
};
/*
* Define high and low longwords.
*/
#define H _QUAD_HIGHWORD
#define L _QUAD_LOWWORD
/*
* Total number of bits in a quad_t and in the pieces that make it up.
* These are used for shifting, and also below for halfword extraction
* and assembly.
*/
#define QUAD_BITS (sizeof(quad_t) * CHAR_BIT)
#define LONG_BITS (sizeof(long) * CHAR_BIT)
#define HALF_BITS (sizeof(long) * CHAR_BIT / 2)
/*
* Extract high and low shortwords from longword, and move low shortword of
* longword to upper half of long, i.e., produce the upper longword of
* ((quad_t)(x) << (number_of_bits_in_long/2)). (`x' must actually be u_long.)
*
* These are used in the multiply code, to split a longword into upper
* and lower halves, and to reassemble a product as a quad_t, shifted left
* (sizeof(long)*CHAR_BIT/2).
*/
#define HHALF(x) ((x) >> HALF_BITS)
#define LHALF(x) ((x) & ((1 << HALF_BITS) - 1))
#define LHUP(x) ((x) << HALF_BITS)
typedef unsigned int qshift_t;
quad_t __ashldi3(quad_t, qshift_t);
quad_t __ashrdi3(quad_t, qshift_t);
int __cmpdi2(quad_t a, quad_t b);
quad_t __divdi3(quad_t a, quad_t b);
quad_t __lshrdi3(quad_t, qshift_t);
quad_t __moddi3(quad_t a, quad_t b);
u_quad_t __qdivrem(u_quad_t u, u_quad_t v, u_quad_t *rem);
u_quad_t __udivdi3(u_quad_t a, u_quad_t b);
u_quad_t __umoddi3(u_quad_t a, u_quad_t b);
int __ucmpdi2(u_quad_t a, u_quad_t b);
#endif /* !_LIBKERN_QUAD_H_ */
#if defined (_X86_) && !defined (__x86_64__)
/*
* Shift a (signed) quad value left (arithmetic shift left).
* This is the same as logical shift left!
*/
quad_t
__ashldi3(a, shift)
quad_t a;
qshift_t shift;
{
union uu aa;
aa.q = a;
if (shift >= LONG_BITS) {
aa.ul[H] = shift >= QUAD_BITS ? 0 :
aa.ul[L] << (shift - LONG_BITS);
aa.ul[L] = 0;
} else if (shift > 0) {
aa.ul[H] = (aa.ul[H] << shift) |
(aa.ul[L] >> (LONG_BITS - shift));
aa.ul[L] <<= shift;
}
return (aa.q);
}
/*
* Shift a (signed) quad value right (arithmetic shift right).
*/
quad_t
__ashrdi3(a, shift)
quad_t a;
qshift_t shift;
{
union uu aa;
aa.q = a;
if (shift >= LONG_BITS) {
long s;
/*
* Smear bits rightward using the machine's right-shift
* method, whether that is sign extension or zero fill,
* to get the `sign word' s. Note that shifting by
* LONG_BITS is undefined, so we shift (LONG_BITS-1),
* then 1 more, to get our answer.
*/
s = (aa.sl[H] >> (LONG_BITS - 1)) >> 1;
aa.ul[L] = shift >= QUAD_BITS ? s :
aa.sl[H] >> (shift - LONG_BITS);
aa.ul[H] = s;
} else if (shift > 0) {
aa.ul[L] = (aa.ul[L] >> shift) |
(aa.ul[H] << (LONG_BITS - shift));
aa.sl[H] >>= shift;
}
return (aa.q);
}
/*
* Return 0, 1, or 2 as a <, =, > b respectively.
* Both a and b are considered signed---which means only the high word is
* signed.
*/
int
__cmpdi2(a, b)
quad_t a, b;
{
union uu aa, bb;
aa.q = a;
bb.q = b;
return (aa.sl[H] < bb.sl[H] ? 0 : aa.sl[H] > bb.sl[H] ? 2 :
aa.ul[L] < bb.ul[L] ? 0 : aa.ul[L] > bb.ul[L] ? 2 : 1);
}
/*
* Divide two signed quads.
* ??? if -1/2 should produce -1 on this machine, this code is wrong
*/
quad_t
__divdi3(a, b)
quad_t a, b;
{
u_quad_t ua, ub, uq;
int neg;
if (a < 0)
ua = -(u_quad_t)a, neg = 1;
else
ua = a, neg = 0;
if (b < 0)
ub = -(u_quad_t)b, neg ^= 1;
else
ub = b;
uq = __qdivrem(ua, ub, (u_quad_t *)0);
return (neg ? -uq : uq);
}
/*
* Shift an (unsigned) quad value right (logical shift right).
*/
quad_t
__lshrdi3(a, shift)
quad_t a;
qshift_t shift;
{
union uu aa;
aa.q = a;
if (shift >= LONG_BITS) {
aa.ul[L] = shift >= QUAD_BITS ? 0 :
aa.ul[H] >> (shift - LONG_BITS);
aa.ul[H] = 0;
} else if (shift > 0) {
aa.ul[L] = (aa.ul[L] >> shift) |
(aa.ul[H] << (LONG_BITS - shift));
aa.ul[H] >>= shift;
}
return (aa.q);
}
/*
* Return remainder after dividing two signed quads.
*
* XXX
* If -1/2 should produce -1 on this machine, this code is wrong.
*/
quad_t
__moddi3(a, b)
quad_t a, b;
{
u_quad_t ua, ub, ur;
int neg;
if (a < 0)
ua = -(u_quad_t)a, neg = 1;
else
ua = a, neg = 0;
if (b < 0)
ub = -(u_quad_t)b;
else
ub = b;
(void)__qdivrem(ua, ub, &ur);
return (neg ? -ur : ur);
}
/*
* Multiprecision divide. This algorithm is from Knuth vol. 2 (2nd ed),
* section 4.3.1, pp. 257--259.
*/
#define B (1 << HALF_BITS) /* digit base */
/* Combine two `digits' to make a single two-digit number. */
#define COMBINE(a, b) (((u_long)(a) << HALF_BITS) | (b))
/* select a type for digits in base B: use unsigned short if they fit */
#if ULONG_MAX == 0xffffffff && USHRT_MAX >= 0xffff
typedef unsigned short digit;
#else
typedef u_long digit;
#endif
/*
* Shift p[0]..p[len] left `sh' bits, ignoring any bits that
* `fall out' the left (there never will be any such anyway).
* We may assume len >= 0. NOTE THAT THIS WRITES len+1 DIGITS.
*/
static void
__shl(register digit *p, register int len, register int sh)
{
register int i;
for (i = 0; i < len; i++)
p[i] = LHALF(p[i] << sh) | (p[i + 1] >> (HALF_BITS - sh));
p[i] = LHALF(p[i] << sh);
}
/*
* __qdivrem(u, v, rem) returns u/v and, optionally, sets *rem to u%v.
*
* We do this in base 2-sup-HALF_BITS, so that all intermediate products
* fit within u_long. As a consequence, the maximum length dividend and
* divisor are 4 `digits' in this base (they are shorter if they have
* leading zeros).
*/
u_quad_t
__qdivrem(uq, vq, arq)
u_quad_t uq, vq, *arq;
{
union uu tmp;
digit *u, *v, *q;
register digit v1, v2;
u_long qhat, rhat, t;
int m, n, d, j, i;
digit uspace[5], vspace[5], qspace[5];
/*
* Take care of special cases: divide by zero, and u < v.
*/
if (vq == 0) {
/* divide by zero. */
static volatile const unsigned int zero = 0;
tmp.ul[H] = tmp.ul[L] = 1 / zero;
if (arq)
*arq = uq;
return (tmp.q);
}
if (uq < vq) {
if (arq)
*arq = uq;
return (0);
}
u = &uspace[0];
v = &vspace[0];
q = &qspace[0];
/*
* Break dividend and divisor into digits in base B, then
* count leading zeros to determine m and n. When done, we
* will have:
* u = (u[1]u[2]...u[m+n]) sub B
* v = (v[1]v[2]...v[n]) sub B
* v[1] != 0
* 1 < n <= 4 (if n = 1, we use a different division algorithm)
* m >= 0 (otherwise u < v, which we already checked)
* m + n = 4
* and thus
* m = 4 - n <= 2
*/
tmp.uq = uq;
u[0] = 0;
u[1] = HHALF(tmp.ul[H]);
u[2] = LHALF(tmp.ul[H]);
u[3] = HHALF(tmp.ul[L]);
u[4] = LHALF(tmp.ul[L]);
tmp.uq = vq;
v[1] = HHALF(tmp.ul[H]);
v[2] = LHALF(tmp.ul[H]);
v[3] = HHALF(tmp.ul[L]);
v[4] = LHALF(tmp.ul[L]);
for (n = 4; v[1] == 0; v++) {
if (--n == 1) {
u_long rbj; /* r*B+u[j] (not root boy jim) */
digit q1, q2, q3, q4;
/*
* Change of plan, per exercise 16.
* r = 0;
* for j = 1..4:
* q[j] = floor((r*B + u[j]) / v),
* r = (r*B + u[j]) % v;
* We unroll this completely here.
*/
t = v[2]; /* nonzero, by definition */
q1 = u[1] / t;
rbj = COMBINE(u[1] % t, u[2]);
q2 = rbj / t;
rbj = COMBINE(rbj % t, u[3]);
q3 = rbj / t;
rbj = COMBINE(rbj % t, u[4]);
q4 = rbj / t;
if (arq)
*arq = rbj % t;
tmp.ul[H] = COMBINE(q1, q2);
tmp.ul[L] = COMBINE(q3, q4);
return (tmp.q);
}
}
/*
* By adjusting q once we determine m, we can guarantee that
* there is a complete four-digit quotient at &qspace[1] when
* we finally stop.
*/
for (m = 4 - n; u[1] == 0; u++)
m--;
for (i = 4 - m; --i >= 0;)
q[i] = 0;
q += 4 - m;
/*
* Here we run Program D, translated from MIX to C and acquiring
* a few minor changes.
*
* D1: choose multiplier 1 << d to ensure v[1] >= B/2.
*/
d = 0;
for (t = v[1]; t < B / 2; t <<= 1)
d++;
if (d > 0) {
__shl(&u[0], m + n, d); /* u <<= d */
__shl(&v[1], n - 1, d); /* v <<= d */
}
/*
* D2: j = 0.
*/
j = 0;
v1 = v[1]; /* for D3 -- note that v[1..n] are constant */
v2 = v[2]; /* for D3 */
do {
register digit uj0, uj1, uj2;
/*
* D3: Calculate qhat (\^q, in TeX notation).
* Let qhat = min((u[j]*B + u[j+1])/v[1], B-1), and
* let rhat = (u[j]*B + u[j+1]) mod v[1].
* While rhat < B and v[2]*qhat > rhat*B+u[j+2],
* decrement qhat and increase rhat correspondingly.
* Note that if rhat >= B, v[2]*qhat < rhat*B.
*/
uj0 = u[j + 0]; /* for D3 only -- note that u[j+...] change */
uj1 = u[j + 1]; /* for D3 only */
uj2 = u[j + 2]; /* for D3 only */
if (uj0 == v1) {
qhat = B;
rhat = uj1;
goto qhat_too_big;
} else {
u_long nn = COMBINE(uj0, uj1);
qhat = nn / v1;
rhat = nn % v1;
}
while (v2 * qhat > COMBINE(rhat, uj2)) {
qhat_too_big:
qhat--;
if ((rhat += v1) >= B)
break;
}
/*
* D4: Multiply and subtract.
* The variable `t' holds any borrows across the loop.
* We split this up so that we do not require v[0] = 0,
* and to eliminate a final special case.
*/
for (t = 0, i = n; i > 0; i--) {
t = u[i + j] - v[i] * qhat - t;
u[i + j] = LHALF(t);
t = (B - HHALF(t)) & (B - 1);
}
t = u[j] - t;
u[j] = LHALF(t);
/*
* D5: test remainder.
* There is a borrow if and only if HHALF(t) is nonzero;
* in that (rare) case, qhat was too large (by exactly 1).
* Fix it by adding v[1..n] to u[j..j+n].
*/
if (HHALF(t)) {
qhat--;
for (t = 0, i = n; i > 0; i--) { /* D6: add back. */
t += u[i + j] + v[i];
u[i + j] = LHALF(t);
t = HHALF(t);
}
u[j] = LHALF(u[j] + t);
}
q[j] = qhat;
} while (++j <= m); /* D7: loop on j. */
/*
* If caller wants the remainder, we have to calculate it as
* u[m..m+n] >> d (this is at most n digits and thus fits in
* u[m+1..m+n], but we may need more source digits).
*/
if (arq) {
if (d) {
for (i = m + n; i > m; --i)
u[i] = (u[i] >> d) |
LHALF(u[i - 1] << (HALF_BITS - d));
u[i] = 0;
}
tmp.ul[H] = COMBINE(uspace[1], uspace[2]);
tmp.ul[L] = COMBINE(uspace[3], uspace[4]);
*arq = tmp.q;
}
tmp.ul[H] = COMBINE(qspace[1], qspace[2]);
tmp.ul[L] = COMBINE(qspace[3], qspace[4]);
return (tmp.q);
}
/*
* Return 0, 1, or 2 as a <, =, > b respectively.
* Neither a nor b are considered signed.
*/
int
__ucmpdi2(a, b)
u_quad_t a, b;
{
union uu aa, bb;
aa.uq = a;
bb.uq = b;
return (aa.ul[H] < bb.ul[H] ? 0 : aa.ul[H] > bb.ul[H] ? 2 :
aa.ul[L] < bb.ul[L] ? 0 : aa.ul[L] > bb.ul[L] ? 2 : 1);
}
/*
* Divide two unsigned quads.
*/
u_quad_t
__udivdi3(a, b)
u_quad_t a, b;
{
return (__qdivrem(a, b, (u_quad_t *)0));
}
/*
* Return remainder after dividing two unsigned quads.
*/
u_quad_t
__umoddi3(a, b)
u_quad_t a, b;
{
u_quad_t r;
(void)__qdivrem(a, b, &r);
return (r);
}
/*
* Divide two unsigned quads.
* This function is new in GCC 7.
*/
u_quad_t
__udivmoddi4(a, b, rem)
u_quad_t a, b, *rem;
{
u_quad_t ua, ub, uq, ur;
ua = a;
ub = b;
uq = __qdivrem(ua, ub, &ur);
if (rem)
*rem = ur;
return uq;
}
#else
static int __attribute__((unused)) dummy;
#endif /* defined (_X86_) && !defined (__x86_64__) */

View file

@ -0,0 +1,39 @@
// Some libraries expect these functions, for which Visual Studio (pre-2013) falls down on the job.
#include <stdio.h>
#include <math.h>
#ifndef _MSC_VER
# include <stdint.h>
int64_t _ftelli64(
FILE *stream
);
int _fseeki64(
FILE *stream,
int64_t offset,
int origin
);
#endif
int fseeko(FILE *fp, off_t offset, int whence)
{
return _fseeki64(fp, (int64_t)offset, whence);
}
int fseeko64(FILE *fp, off64_t offset, int whence)
{
return _fseeki64(fp, (int64_t)offset, whence);
}
off_t ftello(FILE *stream)
{
return (off_t)_ftelli64(stream);
}
off64_t ftello64(FILE *stream)
{
return (off64_t)_ftelli64(stream);
}
long lround(double d)
{
return (long)(d > 0 ? d + 0.5 : ceil(d - 0.5));
}

View file

@ -0,0 +1,19 @@
/**
* This file has no copyright assigned and is placed in the Public Domain.
* This file is part of the mingw-w64 runtime package.
* No warranty is given; refer to the file DISCLAIMER.PD within this package.
*/
#define __CRT__NO_INLINE
#include <stdarg.h>
#include <stdlib.h>
extern int __cdecl _vsnprintf(char * __restrict__, size_t, const char * __restrict__, va_list);
int __cdecl __ms_vsnprintf (char * __restrict__ s, size_t n, const char * __restrict__ format, va_list arg)
{
return _vsnprintf(s, n, format, arg);
}
int __cdecl __mingw_vsnprintf (char * __restrict__ s, size_t n, const char * __restrict__ format, va_list arg)
{
return _vsnprintf(s, n, format, arg);
}

Binary file not shown.

View file

@ -67,7 +67,8 @@ typedef struct SoundStreamInfo_
typedef enum SampleType_
{
SampleType_UInt8,
SampleType_Int16
SampleType_Int16,
SampleType_Float32
} SampleType;
typedef enum ChannelConfig_
@ -76,6 +77,15 @@ typedef enum ChannelConfig_
ChannelConfig_Stereo
} ChannelConfig;
typedef struct SoundStreamInfoEx_
{
int mBufferSize; // If mBufferSize is 0, the song doesn't use streaming but plays through a different interface.
int mSampleRate;
SampleType mSampleType;
ChannelConfig mChannelConfig;
} SoundStreamInfoEx;
typedef enum EIntConfigKey_
{
zmusic_adl_chips_count,
@ -264,14 +274,14 @@ typedef struct ZMusicConfigurationSetting_
#ifndef ZMUSIC_INTERNAL
#ifdef _MSC_VER
#if defined(_MSC_VER) && !defined(ZMUSIC_STATIC)
#define DLL_IMPORT _declspec(dllimport)
#else // !_MSC_VER
#else
#define DLL_IMPORT
#endif // _MSC_VER
#endif
// Note that the internal 'class' definitions are not C compatible!
typedef struct { int zm1; } *ZMusic_MidiSource;
typedef struct { int zm2; } *ZMusic_MusicStream;
typedef struct _ZMusic_MidiSource_Struct { int zm1; } *ZMusic_MidiSource;
typedef struct _ZMusic_MusicStream_Struct { int zm2; } *ZMusic_MusicStream;
struct SoundDecoder;
#endif
@ -319,6 +329,7 @@ extern "C"
DLL_IMPORT void ZMusic_VolumeChanged(ZMusic_MusicStream song);
DLL_IMPORT zmusic_bool ZMusic_WriteSMF(ZMusic_MidiSource source, const char* fn, int looplimit);
DLL_IMPORT void ZMusic_GetStreamInfo(ZMusic_MusicStream song, SoundStreamInfo *info);
DLL_IMPORT void ZMusic_GetStreamInfoEx(ZMusic_MusicStream song, SoundStreamInfoEx *info);
// Configuration interface. The return value specifies if a music restart is needed.
// RealValue should be written back to the CVAR or whatever other method the client uses to store configuration state.
DLL_IMPORT zmusic_bool ChangeMusicSettingInt(EIntConfigKey key, ZMusic_MusicStream song, int value, int* pRealValue);
@ -406,6 +417,7 @@ typedef zmusic_bool (*pfn_ZMusic_IsMIDI)(ZMusic_MusicStream song);
typedef void (*pfn_ZMusic_VolumeChanged)(ZMusic_MusicStream song);
typedef zmusic_bool (*pfn_ZMusic_WriteSMF)(ZMusic_MidiSource source, const char* fn, int looplimit);
typedef void (*pfn_ZMusic_GetStreamInfo)(ZMusic_MusicStream song, SoundStreamInfo *info);
typedef void (*pfn_ZMusic_GetStreamInfoEx)(ZMusic_MusicStream song, SoundStreamInfoEx *info);
typedef zmusic_bool (*pfn_ChangeMusicSettingInt)(EIntConfigKey key, ZMusic_MusicStream song, int value, int* pRealValue);
typedef zmusic_bool (*pfn_ChangeMusicSettingFloat)(EFloatConfigKey key, ZMusic_MusicStream song, float value, float* pRealValue);
typedef zmusic_bool (*pfn_ChangeMusicSettingString)(EStringConfigKey key, ZMusic_MusicStream song, const char* value);
@ -419,4 +431,4 @@ typedef const ZMusicMidiOutDevice *(*pfn_ZMusic_GetMidiDevices)(int *pAmount);
#endif
#endif

8
cmake/FindVPX.cmake Normal file
View file

@ -0,0 +1,8 @@
find_path(VPX_INCLUDE_DIR NAMES vpx/vp8dx.h vpx/vpx_decoder.h)
find_library(VPX_LIBRARIES NAMES vpx)
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(VPX DEFAULT_MSG VPX_LIBRARIES VPX_INCLUDE_DIR)
mark_as_advanced(VPX_INCLUDE_DIR VPX_LIBRARIES)

View file

@ -9,7 +9,7 @@ GPL v3 with permission.
The majority of original code uses a BSD-like lincese. See bsd.txt.
The OpenGL renderer is released under the LGPL v3, except some bits
of code that were inherited fro ZDoomGL.
of code that were inherited from ZDoomGL.
Some code was taken from the Eternity Engine.
Copyright (c) James Haley, Stephen McGranahan, et al.
@ -31,4 +31,4 @@ version used by the foobar2000 component foo_dumb as of mid-2008, found at
http://kode54.foobar2000.org/.
All script code in gzdoom.pk3 is licensed under the GPL v3 unless noted
otherwise.
otherwise.

1
fm_banks/LINKS.txt Normal file
View file

@ -0,0 +1 @@
https://github.com/Wohlstand/OPN2BankEditor

View file

@ -0,0 +1,15 @@
Bank was imported by a hacky way from the Tomsoft's SegaMusic program
by TommyXie (Xie Rong Chun):
- the dummy MIDI file was created that contains all 128 instruments in GM order
- the Sega emulator playable BIN file was generated
- the GYM dump was generated from the playback of that dummy instrument
- OPN2 Bank Editor was used to scan GYM file for instruments and import all of
them.
The work woth done by Jean-Pierre Cimalando:
https://github.com/Wohlstand/OPN2BankEditor/issues/44
Then, the bank was tuned by Wohlstand:
- Corrected note offsets to align octaves of all instruments
- Merged with xg.wopn to provide the set of percussions.

BIN
fm_banks/Tomsoft.wopn Normal file

Binary file not shown.

14
fm_banks/readme.txt Normal file
View file

@ -0,0 +1,14 @@
This bank (gm.wopn and xg.wopn) is made by me. I have imported some instruments from various
VGM files, ported from OPL3 banks, or remixed them.
This bank can be freely used, modified, shared with any purposes.
License for this bank - MIT
To edit this bank and other banks in WOPN format, you can use this editor
which I created for that: https://github.com/Wohlstand/OPN2BankEditor
==============================================================================
Vitaliy Novichkov "Wohlstand", 2017-2018

BIN
fm_banks/xg.wopn Normal file

Binary file not shown.

View file

@ -0,0 +1,199 @@
cmake_minimum_required(VERSION 3.15)
project(zvulkan)
set(ZVULKAN_SOURCES
src/vulkanbuilders.cpp
src/vulkandevice.cpp
src/vulkaninstance.cpp
src/vulkansurface.cpp
src/vulkanswapchain.cpp
src/vk_mem_alloc/vk_mem_alloc.cpp
src/vk_mem_alloc/vk_mem_alloc.natvis
src/volk/volk.c
src/glslang/glslang/MachineIndependent/SymbolTable.h
src/glslang/glslang/MachineIndependent/propagateNoContraction.cpp
src/glslang/glslang/MachineIndependent/PoolAlloc.cpp
src/glslang/glslang/MachineIndependent/Intermediate.cpp
src/glslang/glslang/MachineIndependent/gl_types.h
src/glslang/glslang/MachineIndependent/parseVersions.h
src/glslang/glslang/MachineIndependent/attribute.cpp
src/glslang/glslang/MachineIndependent/Scan.cpp
src/glslang/glslang/MachineIndependent/iomapper.h
src/glslang/glslang/MachineIndependent/ParseHelper.h
src/glslang/glslang/MachineIndependent/glslang_tab.cpp.h
src/glslang/glslang/MachineIndependent/SymbolTable.cpp
src/glslang/glslang/MachineIndependent/RemoveTree.cpp
src/glslang/glslang/MachineIndependent/Versions.h
src/glslang/glslang/MachineIndependent/reflection.cpp
src/glslang/glslang/MachineIndependent/LiveTraverser.h
src/glslang/glslang/MachineIndependent/iomapper.cpp
src/glslang/glslang/MachineIndependent/intermOut.cpp
src/glslang/glslang/MachineIndependent/Versions.cpp
src/glslang/glslang/MachineIndependent/Initialize.h
src/glslang/glslang/MachineIndependent/linkValidate.cpp
src/glslang/glslang/MachineIndependent/InfoSink.cpp
src/glslang/glslang/MachineIndependent/Constant.cpp
src/glslang/glslang/MachineIndependent/IntermTraverse.cpp
src/glslang/glslang/MachineIndependent/propagateNoContraction.h
src/glslang/glslang/MachineIndependent/glslang_tab.cpp
src/glslang/glslang/MachineIndependent/ShaderLang.cpp
src/glslang/glslang/MachineIndependent/preprocessor/Pp.cpp
src/glslang/glslang/MachineIndependent/preprocessor/PpTokens.h
src/glslang/glslang/MachineIndependent/preprocessor/PpAtom.cpp
src/glslang/glslang/MachineIndependent/preprocessor/PpContext.cpp
src/glslang/glslang/MachineIndependent/preprocessor/PpTokens.cpp
src/glslang/glslang/MachineIndependent/preprocessor/PpScanner.cpp
src/glslang/glslang/MachineIndependent/preprocessor/PpContext.h
src/glslang/glslang/MachineIndependent/attribute.h
src/glslang/glslang/MachineIndependent/localintermediate.h
src/glslang/glslang/MachineIndependent/parseConst.cpp
src/glslang/glslang/MachineIndependent/Initialize.cpp
src/glslang/glslang/MachineIndependent/limits.cpp
src/glslang/glslang/MachineIndependent/ParseContextBase.cpp
src/glslang/glslang/MachineIndependent/RemoveTree.h
src/glslang/glslang/MachineIndependent/ParseHelper.cpp
src/glslang/glslang/MachineIndependent/Scan.h
src/glslang/glslang/MachineIndependent/reflection.h
src/glslang/glslang/MachineIndependent/ScanContext.h
src/glslang/glslang/MachineIndependent/SpirvIntrinsics.cpp
src/glslang/glslang/OSDependent/osinclude.h
src/glslang/glslang/GenericCodeGen/Link.cpp
src/glslang/glslang/GenericCodeGen/CodeGen.cpp
src/glslang/glslang/Public/ShaderLang.h
src/glslang/glslang/Include/ConstantUnion.h
src/glslang/glslang/Include/InitializeGlobals.h
src/glslang/glslang/Include/Common.h
src/glslang/glslang/Include/PoolAlloc.h
src/glslang/glslang/Include/arrays.h
src/glslang/glslang/Include/ShHandle.h
src/glslang/glslang/Include/InfoSink.h
src/glslang/glslang/Include/ResourceLimits.h
src/glslang/glslang/Include/Types.h
src/glslang/glslang/Include/BaseTypes.h
src/glslang/glslang/Include/intermediate.h
src/glslang/glslang/Include/SpirvIntrinsics.h
src/glslang/glslang/Include/build_info.h
src/glslang/glslang/OSDependent/osinclude.h
src/glslang/spirv/Logger.h
src/glslang/spirv/GlslangToSpv.cpp
src/glslang/spirv/SPVRemapper.h
src/glslang/spirv/GLSL.ext.EXT.h
src/glslang/spirv/hex_float.h
src/glslang/spirv/doc.cpp
src/glslang/spirv/disassemble.cpp
src/glslang/spirv/SpvPostProcess.cpp
src/glslang/spirv/bitutils.h
src/glslang/spirv/InReadableOrder.cpp
src/glslang/spirv/GLSL.ext.AMD.h
src/glslang/spirv/GLSL.ext.NV.h
src/glslang/spirv/SPVRemapper.cpp
src/glslang/spirv/SpvBuilder.h
src/glslang/spirv/GLSL.ext.KHR.h
src/glslang/spirv/disassemble.h
src/glslang/spirv/SpvBuilder.cpp
src/glslang/spirv/GlslangToSpv.h
src/glslang/spirv/doc.h
src/glslang/spirv/SpvTools.cpp
src/glslang/spirv/spvIR.h
src/glslang/spirv/Logger.cpp
src/glslang/spirv/SpvTools.h
src/glslang/spirv/GLSL.std.450.h
src/glslang/spirv/NonSemanticDebugPrintf.h
src/glslang/OGLCompilersDLL/InitializeDll.cpp
src/glslang/OGLCompilersDLL/InitializeDll.h
)
set(ZVULKAN_INCLUDES
include/zvulkan/vulkanbuilders.h
include/zvulkan/vulkancompatibledevice.h
include/zvulkan/vulkandevice.h
include/zvulkan/vulkaninstance.h
include/zvulkan/vulkanobjects.h
include/zvulkan/vulkansurface.h
include/zvulkan/vulkanswapchain.h
include/zvulkan/volk/volk.h
include/zvulkan/vk_mem_alloc/vk_mem_alloc.h
)
set(ZVULKAN_WIN32_SOURCES
src/glslang/glslang/OSDependent/Windows/ossource.cpp
)
set(ZVULKAN_UNIX_SOURCES
src/glslang/glslang/OSDependent/Unix/ossource.cpp
)
set(VULKAN_INCLUDES
include/vulkan/vk_enum_string_helper.h
include/vulkan/vk_icd.h
include/vulkan/vk_layer.h
include/vulkan/vk_layer_dispatch_table.h
include/vulkan/vk_platform.h
include/vulkan/vk_sdk_platform.h
include/vulkan/vulkan.h
include/vulkan/vulkan_android.h
include/vulkan/vulkan_beta.h
include/vulkan/vulkan_core.h
include/vulkan/vulkan_directfb.h
include/vulkan/vulkan_fuchsia.h
include/vulkan/vulkan_ggp.h
include/vulkan/vulkan_ios.h
include/vulkan/vulkan_macos.h
include/vulkan/vulkan_metal.h
include/vulkan/vulkan_mir.h
include/vulkan/vulkan_screen.h
include/vulkan/vulkan_vi.h
include/vulkan/vulkan_wayland.h
include/vulkan/vulkan_win32.h
include/vulkan/vulkan_xcb.h
include/vulkan/vulkan_xlib.h
include/vulkan/vulkan_xlib_xrandr.h
)
source_group("src" REGULAR_EXPRESSION "${CMAKE_CURRENT_SOURCE_DIR}/src/.+")
source_group("src\\glslang" REGULAR_EXPRESSION "${CMAKE_CURRENT_SOURCE_DIR}/src/glslang/.+")
source_group("src\\glslang\\glslang" REGULAR_EXPRESSION "${CMAKE_CURRENT_SOURCE_DIR}/src/glslang/glslang/.+")
source_group("src\\glslang\\glslang\\GenericCodeGen" REGULAR_EXPRESSION "${CMAKE_CURRENT_SOURCE_DIR}/src/glslang/glslang/GenericCodeGen/.+")
source_group("src\\glslang\\glslang\\Include" REGULAR_EXPRESSION "${CMAKE_CURRENT_SOURCE_DIR}/src/glslang/glslang/Include/.+")
source_group("src\\glslang\\glslang\\MachineIndependent" REGULAR_EXPRESSION "${CMAKE_CURRENT_SOURCE_DIR}/src/glslang/glslang/MachineIndependent/.+")
source_group("src\\glslang\\glslang\\MachineIndependent\\preprocessor" REGULAR_EXPRESSION "${CMAKE_CURRENT_SOURCE_DIR}/src/glslang/glslang/MachineIndependent/preprocessor/.+")
source_group("src\\glslang\\glslang\\OSDependent" REGULAR_EXPRESSION "${CMAKE_CURRENT_SOURCE_DIR}/src/glslang/glslang/OSDependent/.+")
source_group("src\\glslang\\glslang\\OSDependent\\Unix" REGULAR_EXPRESSION "${CMAKE_CURRENT_SOURCE_DIR}/src/glslang/glslang/OSDependent/Unix/.+")
source_group("src\\glslang\\glslang\\OSDependent\\Web" REGULAR_EXPRESSION "${CMAKE_CURRENT_SOURCE_DIR}/src/glslang/glslang/OSDependent/Web/.+")
source_group("src\\glslang\\glslang\\OSDependent\\Windows" REGULAR_EXPRESSION "${CMAKE_CURRENT_SOURCE_DIR}/src/glslang/glslang/OSDependent/Windows/.+")
source_group("src\\glslang\\glslang\\Public" REGULAR_EXPRESSION "${CMAKE_CURRENT_SOURCE_DIR}/src/glslang/glslang/Public/.+")
source_group("src\\glslang\\OGLCompilersDLL" REGULAR_EXPRESSION "${CMAKE_CURRENT_SOURCE_DIR}/src/glslang/OGLCompilersDLL/.+")
source_group("src\\glslang\\spirv" REGULAR_EXPRESSION "${CMAKE_CURRENT_SOURCE_DIR}/src/glslang/spirv/.+")
source_group("src\\vk_mem_alloc" REGULAR_EXPRESSION "${CMAKE_CURRENT_SOURCE_DIR}/src/vk_mem_alloc/.+")
source_group("src\\volk" REGULAR_EXPRESSION "${CMAKE_CURRENT_SOURCE_DIR}/src/volk/.+")
source_group("include" REGULAR_EXPRESSION "${CMAKE_CURRENT_SOURCE_DIR}/include/zvulkan/.+")
source_group("include\\vk_mem_alloc" REGULAR_EXPRESSION "${CMAKE_CURRENT_SOURCE_DIR}/include/zvulkan/vk_mem_alloc/.+")
source_group("include\\volk" REGULAR_EXPRESSION "${CMAKE_CURRENT_SOURCE_DIR}/include/zvulkan/volk/.+")
include_directories(include include/zvulkan src)
if(WIN32)
set(ZVULKAN_SOURCES ${ZVULKAN_SOURCES} ${ZVULKAN_WIN32_SOURCES})
add_definitions(-DUNICODE -D_UNICODE)
else()
set(ZVULKAN_SOURCES ${ZVULKAN_SOURCES} ${ZVULKAN_UNIX_SOURCES})
set(ZVULKAN_LIBS ${CMAKE_DL_LIBS} -ldl)
add_definitions(-DUNIX -D_UNIX)
add_link_options(-pthread)
endif()
if(MSVC)
# Use all cores for compilation
set(CMAKE_CXX_FLAGS "/MP ${CMAKE_CXX_FLAGS}")
# Ignore warnings in third party code
#set_source_files_properties(${ZVULKAN_SOURCES} PROPERTIES COMPILE_FLAGS "/wd4244 /wd4267 /wd4005 /wd4018 -D_CRT_SECURE_NO_WARNINGS")
endif()
add_library(zvulkan STATIC ${ZVULKAN_SOURCES} ${ZVULKAN_INCLUDES} ${VULKAN_INCLUDES})
target_link_libraries(zvulkan ${ZVULKAN_LIBS})
set_target_properties(zvulkan PROPERTIES CXX_STANDARD 17)
if(MSVC)
set_property(TARGET zvulkan PROPERTY MSVC_RUNTIME_LIBRARY "MultiThreaded$<$<CONFIG:Debug>:Debug>")
endif()

View file

@ -0,0 +1,90 @@
# License information
## License for ZVulkan itself
// Copyright (c) 2016-2022 Magnus Norddahl
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
//
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
//
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
## License for src/vk_mem_alloc
// Copyright (c) 2017-2018 Advanced Micro Devices, Inc. All rights reserved.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
//
## License for src/volk
/**
* Copyright (c) 2018-2019 Arseny Kapoulkine
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
## License for include/zvulkan/vulkan
/*
** Copyright (c) 2015-2019 The Khronos Group Inc.
**
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
**
** http://www.apache.org/licenses/LICENSE-2.0
**
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*/
## License for src/glslang
See src/glslang/LICENSE.txt

View file

@ -0,0 +1,2 @@
# ZVulkan
A framework for building vulkan applications

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,525 @@
// *** THIS FILE IS GENERATED - DO NOT EDIT ***
// See loader_extension_generator.py for modifications
/*
* Copyright (c) 2015-2017 The Khronos Group Inc.
* Copyright (c) 2015-2017 Valve Corporation
* Copyright (c) 2015-2017 LunarG, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Author: Mark Lobodzinski <mark@lunarg.com>
* Author: Mark Young <marky@lunarg.com>
*/
#pragma once
typedef PFN_vkVoidFunction (VKAPI_PTR *PFN_GetPhysicalDeviceProcAddr)(VkInstance instance, const char* pName);
// Instance function pointer dispatch table
typedef struct VkLayerInstanceDispatchTable_ {
// Manually add in GetPhysicalDeviceProcAddr entry
PFN_GetPhysicalDeviceProcAddr GetPhysicalDeviceProcAddr;
// ---- Core 1_0 commands
PFN_vkCreateInstance CreateInstance;
PFN_vkDestroyInstance DestroyInstance;
PFN_vkEnumeratePhysicalDevices EnumeratePhysicalDevices;
PFN_vkGetPhysicalDeviceFeatures GetPhysicalDeviceFeatures;
PFN_vkGetPhysicalDeviceFormatProperties GetPhysicalDeviceFormatProperties;
PFN_vkGetPhysicalDeviceImageFormatProperties GetPhysicalDeviceImageFormatProperties;
PFN_vkGetPhysicalDeviceProperties GetPhysicalDeviceProperties;
PFN_vkGetPhysicalDeviceQueueFamilyProperties GetPhysicalDeviceQueueFamilyProperties;
PFN_vkGetPhysicalDeviceMemoryProperties GetPhysicalDeviceMemoryProperties;
PFN_vkGetInstanceProcAddr GetInstanceProcAddr;
PFN_vkCreateDevice CreateDevice;
PFN_vkEnumerateInstanceExtensionProperties EnumerateInstanceExtensionProperties;
PFN_vkEnumerateDeviceExtensionProperties EnumerateDeviceExtensionProperties;
PFN_vkEnumerateInstanceLayerProperties EnumerateInstanceLayerProperties;
PFN_vkEnumerateDeviceLayerProperties EnumerateDeviceLayerProperties;
PFN_vkGetPhysicalDeviceSparseImageFormatProperties GetPhysicalDeviceSparseImageFormatProperties;
// ---- Core 1_1 commands
PFN_vkEnumerateInstanceVersion EnumerateInstanceVersion;
PFN_vkEnumeratePhysicalDeviceGroups EnumeratePhysicalDeviceGroups;
PFN_vkGetPhysicalDeviceFeatures2 GetPhysicalDeviceFeatures2;
PFN_vkGetPhysicalDeviceProperties2 GetPhysicalDeviceProperties2;
PFN_vkGetPhysicalDeviceFormatProperties2 GetPhysicalDeviceFormatProperties2;
PFN_vkGetPhysicalDeviceImageFormatProperties2 GetPhysicalDeviceImageFormatProperties2;
PFN_vkGetPhysicalDeviceQueueFamilyProperties2 GetPhysicalDeviceQueueFamilyProperties2;
PFN_vkGetPhysicalDeviceMemoryProperties2 GetPhysicalDeviceMemoryProperties2;
PFN_vkGetPhysicalDeviceSparseImageFormatProperties2 GetPhysicalDeviceSparseImageFormatProperties2;
PFN_vkGetPhysicalDeviceExternalBufferProperties GetPhysicalDeviceExternalBufferProperties;
PFN_vkGetPhysicalDeviceExternalFenceProperties GetPhysicalDeviceExternalFenceProperties;
PFN_vkGetPhysicalDeviceExternalSemaphoreProperties GetPhysicalDeviceExternalSemaphoreProperties;
// ---- VK_KHR_surface extension commands
PFN_vkDestroySurfaceKHR DestroySurfaceKHR;
PFN_vkGetPhysicalDeviceSurfaceSupportKHR GetPhysicalDeviceSurfaceSupportKHR;
PFN_vkGetPhysicalDeviceSurfaceCapabilitiesKHR GetPhysicalDeviceSurfaceCapabilitiesKHR;
PFN_vkGetPhysicalDeviceSurfaceFormatsKHR GetPhysicalDeviceSurfaceFormatsKHR;
PFN_vkGetPhysicalDeviceSurfacePresentModesKHR GetPhysicalDeviceSurfacePresentModesKHR;
// ---- VK_KHR_swapchain extension commands
PFN_vkGetPhysicalDevicePresentRectanglesKHR GetPhysicalDevicePresentRectanglesKHR;
// ---- VK_KHR_display extension commands
PFN_vkGetPhysicalDeviceDisplayPropertiesKHR GetPhysicalDeviceDisplayPropertiesKHR;
PFN_vkGetPhysicalDeviceDisplayPlanePropertiesKHR GetPhysicalDeviceDisplayPlanePropertiesKHR;
PFN_vkGetDisplayPlaneSupportedDisplaysKHR GetDisplayPlaneSupportedDisplaysKHR;
PFN_vkGetDisplayModePropertiesKHR GetDisplayModePropertiesKHR;
PFN_vkCreateDisplayModeKHR CreateDisplayModeKHR;
PFN_vkGetDisplayPlaneCapabilitiesKHR GetDisplayPlaneCapabilitiesKHR;
PFN_vkCreateDisplayPlaneSurfaceKHR CreateDisplayPlaneSurfaceKHR;
// ---- VK_KHR_xlib_surface extension commands
#ifdef VK_USE_PLATFORM_XLIB_KHR
PFN_vkCreateXlibSurfaceKHR CreateXlibSurfaceKHR;
#endif // VK_USE_PLATFORM_XLIB_KHR
#ifdef VK_USE_PLATFORM_XLIB_KHR
PFN_vkGetPhysicalDeviceXlibPresentationSupportKHR GetPhysicalDeviceXlibPresentationSupportKHR;
#endif // VK_USE_PLATFORM_XLIB_KHR
// ---- VK_KHR_xcb_surface extension commands
#ifdef VK_USE_PLATFORM_XCB_KHR
PFN_vkCreateXcbSurfaceKHR CreateXcbSurfaceKHR;
#endif // VK_USE_PLATFORM_XCB_KHR
#ifdef VK_USE_PLATFORM_XCB_KHR
PFN_vkGetPhysicalDeviceXcbPresentationSupportKHR GetPhysicalDeviceXcbPresentationSupportKHR;
#endif // VK_USE_PLATFORM_XCB_KHR
// ---- VK_KHR_wayland_surface extension commands
#ifdef VK_USE_PLATFORM_WAYLAND_KHR
PFN_vkCreateWaylandSurfaceKHR CreateWaylandSurfaceKHR;
#endif // VK_USE_PLATFORM_WAYLAND_KHR
#ifdef VK_USE_PLATFORM_WAYLAND_KHR
PFN_vkGetPhysicalDeviceWaylandPresentationSupportKHR GetPhysicalDeviceWaylandPresentationSupportKHR;
#endif // VK_USE_PLATFORM_WAYLAND_KHR
// ---- VK_KHR_mir_surface extension commands
#ifdef VK_USE_PLATFORM_MIR_KHR
PFN_vkCreateMirSurfaceKHR CreateMirSurfaceKHR;
#endif // VK_USE_PLATFORM_MIR_KHR
#ifdef VK_USE_PLATFORM_MIR_KHR
PFN_vkGetPhysicalDeviceMirPresentationSupportKHR GetPhysicalDeviceMirPresentationSupportKHR;
#endif // VK_USE_PLATFORM_MIR_KHR
// ---- VK_KHR_android_surface extension commands
#ifdef VK_USE_PLATFORM_ANDROID_KHR
PFN_vkCreateAndroidSurfaceKHR CreateAndroidSurfaceKHR;
#endif // VK_USE_PLATFORM_ANDROID_KHR
// ---- VK_KHR_win32_surface extension commands
#ifdef VK_USE_PLATFORM_WIN32_KHR
PFN_vkCreateWin32SurfaceKHR CreateWin32SurfaceKHR;
#endif // VK_USE_PLATFORM_WIN32_KHR
#ifdef VK_USE_PLATFORM_WIN32_KHR
PFN_vkGetPhysicalDeviceWin32PresentationSupportKHR GetPhysicalDeviceWin32PresentationSupportKHR;
#endif // VK_USE_PLATFORM_WIN32_KHR
// ---- VK_KHR_get_physical_device_properties2 extension commands
PFN_vkGetPhysicalDeviceFeatures2KHR GetPhysicalDeviceFeatures2KHR;
PFN_vkGetPhysicalDeviceProperties2KHR GetPhysicalDeviceProperties2KHR;
PFN_vkGetPhysicalDeviceFormatProperties2KHR GetPhysicalDeviceFormatProperties2KHR;
PFN_vkGetPhysicalDeviceImageFormatProperties2KHR GetPhysicalDeviceImageFormatProperties2KHR;
PFN_vkGetPhysicalDeviceQueueFamilyProperties2KHR GetPhysicalDeviceQueueFamilyProperties2KHR;
PFN_vkGetPhysicalDeviceMemoryProperties2KHR GetPhysicalDeviceMemoryProperties2KHR;
PFN_vkGetPhysicalDeviceSparseImageFormatProperties2KHR GetPhysicalDeviceSparseImageFormatProperties2KHR;
// ---- VK_KHR_device_group_creation extension commands
PFN_vkEnumeratePhysicalDeviceGroupsKHR EnumeratePhysicalDeviceGroupsKHR;
// ---- VK_KHR_external_memory_capabilities extension commands
PFN_vkGetPhysicalDeviceExternalBufferPropertiesKHR GetPhysicalDeviceExternalBufferPropertiesKHR;
// ---- VK_KHR_external_semaphore_capabilities extension commands
PFN_vkGetPhysicalDeviceExternalSemaphorePropertiesKHR GetPhysicalDeviceExternalSemaphorePropertiesKHR;
// ---- VK_KHR_external_fence_capabilities extension commands
PFN_vkGetPhysicalDeviceExternalFencePropertiesKHR GetPhysicalDeviceExternalFencePropertiesKHR;
// ---- VK_KHR_get_surface_capabilities2 extension commands
PFN_vkGetPhysicalDeviceSurfaceCapabilities2KHR GetPhysicalDeviceSurfaceCapabilities2KHR;
PFN_vkGetPhysicalDeviceSurfaceFormats2KHR GetPhysicalDeviceSurfaceFormats2KHR;
// ---- VK_KHR_get_display_properties2 extension commands
PFN_vkGetPhysicalDeviceDisplayProperties2KHR GetPhysicalDeviceDisplayProperties2KHR;
PFN_vkGetPhysicalDeviceDisplayPlaneProperties2KHR GetPhysicalDeviceDisplayPlaneProperties2KHR;
PFN_vkGetDisplayModeProperties2KHR GetDisplayModeProperties2KHR;
PFN_vkGetDisplayPlaneCapabilities2KHR GetDisplayPlaneCapabilities2KHR;
// ---- VK_EXT_debug_report extension commands
PFN_vkCreateDebugReportCallbackEXT CreateDebugReportCallbackEXT;
PFN_vkDestroyDebugReportCallbackEXT DestroyDebugReportCallbackEXT;
PFN_vkDebugReportMessageEXT DebugReportMessageEXT;
// ---- VK_NV_external_memory_capabilities extension commands
PFN_vkGetPhysicalDeviceExternalImageFormatPropertiesNV GetPhysicalDeviceExternalImageFormatPropertiesNV;
// ---- VK_NN_vi_surface extension commands
#ifdef VK_USE_PLATFORM_VI_NN
PFN_vkCreateViSurfaceNN CreateViSurfaceNN;
#endif // VK_USE_PLATFORM_VI_NN
// ---- VK_NVX_device_generated_commands extension commands
PFN_vkGetPhysicalDeviceGeneratedCommandsPropertiesNVX GetPhysicalDeviceGeneratedCommandsPropertiesNVX;
// ---- VK_EXT_direct_mode_display extension commands
PFN_vkReleaseDisplayEXT ReleaseDisplayEXT;
// ---- VK_EXT_acquire_xlib_display extension commands
#ifdef VK_USE_PLATFORM_XLIB_XRANDR_EXT
PFN_vkAcquireXlibDisplayEXT AcquireXlibDisplayEXT;
#endif // VK_USE_PLATFORM_XLIB_XRANDR_EXT
#ifdef VK_USE_PLATFORM_XLIB_XRANDR_EXT
PFN_vkGetRandROutputDisplayEXT GetRandROutputDisplayEXT;
#endif // VK_USE_PLATFORM_XLIB_XRANDR_EXT
// ---- VK_EXT_display_surface_counter extension commands
PFN_vkGetPhysicalDeviceSurfaceCapabilities2EXT GetPhysicalDeviceSurfaceCapabilities2EXT;
// ---- VK_MVK_ios_surface extension commands
#ifdef VK_USE_PLATFORM_IOS_MVK
PFN_vkCreateIOSSurfaceMVK CreateIOSSurfaceMVK;
#endif // VK_USE_PLATFORM_IOS_MVK
// ---- VK_MVK_macos_surface extension commands
#ifdef VK_USE_PLATFORM_MACOS_MVK
PFN_vkCreateMacOSSurfaceMVK CreateMacOSSurfaceMVK;
#endif // VK_USE_PLATFORM_MACOS_MVK
// ---- VK_EXT_debug_utils extension commands
PFN_vkCreateDebugUtilsMessengerEXT CreateDebugUtilsMessengerEXT;
PFN_vkDestroyDebugUtilsMessengerEXT DestroyDebugUtilsMessengerEXT;
PFN_vkSubmitDebugUtilsMessageEXT SubmitDebugUtilsMessageEXT;
// ---- VK_EXT_sample_locations extension commands
PFN_vkGetPhysicalDeviceMultisamplePropertiesEXT GetPhysicalDeviceMultisamplePropertiesEXT;
} VkLayerInstanceDispatchTable;
// Device function pointer dispatch table
typedef struct VkLayerDispatchTable_ {
// ---- Core 1_0 commands
PFN_vkGetDeviceProcAddr GetDeviceProcAddr;
PFN_vkDestroyDevice DestroyDevice;
PFN_vkGetDeviceQueue GetDeviceQueue;
PFN_vkQueueSubmit QueueSubmit;
PFN_vkQueueWaitIdle QueueWaitIdle;
PFN_vkDeviceWaitIdle DeviceWaitIdle;
PFN_vkAllocateMemory AllocateMemory;
PFN_vkFreeMemory FreeMemory;
PFN_vkMapMemory MapMemory;
PFN_vkUnmapMemory UnmapMemory;
PFN_vkFlushMappedMemoryRanges FlushMappedMemoryRanges;
PFN_vkInvalidateMappedMemoryRanges InvalidateMappedMemoryRanges;
PFN_vkGetDeviceMemoryCommitment GetDeviceMemoryCommitment;
PFN_vkBindBufferMemory BindBufferMemory;
PFN_vkBindImageMemory BindImageMemory;
PFN_vkGetBufferMemoryRequirements GetBufferMemoryRequirements;
PFN_vkGetImageMemoryRequirements GetImageMemoryRequirements;
PFN_vkGetImageSparseMemoryRequirements GetImageSparseMemoryRequirements;
PFN_vkQueueBindSparse QueueBindSparse;
PFN_vkCreateFence CreateFence;
PFN_vkDestroyFence DestroyFence;
PFN_vkResetFences ResetFences;
PFN_vkGetFenceStatus GetFenceStatus;
PFN_vkWaitForFences WaitForFences;
PFN_vkCreateSemaphore CreateSemaphore;
PFN_vkDestroySemaphore DestroySemaphore;
PFN_vkCreateEvent CreateEvent;
PFN_vkDestroyEvent DestroyEvent;
PFN_vkGetEventStatus GetEventStatus;
PFN_vkSetEvent SetEvent;
PFN_vkResetEvent ResetEvent;
PFN_vkCreateQueryPool CreateQueryPool;
PFN_vkDestroyQueryPool DestroyQueryPool;
PFN_vkGetQueryPoolResults GetQueryPoolResults;
PFN_vkCreateBuffer CreateBuffer;
PFN_vkDestroyBuffer DestroyBuffer;
PFN_vkCreateBufferView CreateBufferView;
PFN_vkDestroyBufferView DestroyBufferView;
PFN_vkCreateImage CreateImage;
PFN_vkDestroyImage DestroyImage;
PFN_vkGetImageSubresourceLayout GetImageSubresourceLayout;
PFN_vkCreateImageView CreateImageView;
PFN_vkDestroyImageView DestroyImageView;
PFN_vkCreateShaderModule CreateShaderModule;
PFN_vkDestroyShaderModule DestroyShaderModule;
PFN_vkCreatePipelineCache CreatePipelineCache;
PFN_vkDestroyPipelineCache DestroyPipelineCache;
PFN_vkGetPipelineCacheData GetPipelineCacheData;
PFN_vkMergePipelineCaches MergePipelineCaches;
PFN_vkCreateGraphicsPipelines CreateGraphicsPipelines;
PFN_vkCreateComputePipelines CreateComputePipelines;
PFN_vkDestroyPipeline DestroyPipeline;
PFN_vkCreatePipelineLayout CreatePipelineLayout;
PFN_vkDestroyPipelineLayout DestroyPipelineLayout;
PFN_vkCreateSampler CreateSampler;
PFN_vkDestroySampler DestroySampler;
PFN_vkCreateDescriptorSetLayout CreateDescriptorSetLayout;
PFN_vkDestroyDescriptorSetLayout DestroyDescriptorSetLayout;
PFN_vkCreateDescriptorPool CreateDescriptorPool;
PFN_vkDestroyDescriptorPool DestroyDescriptorPool;
PFN_vkResetDescriptorPool ResetDescriptorPool;
PFN_vkAllocateDescriptorSets AllocateDescriptorSets;
PFN_vkFreeDescriptorSets FreeDescriptorSets;
PFN_vkUpdateDescriptorSets UpdateDescriptorSets;
PFN_vkCreateFramebuffer CreateFramebuffer;
PFN_vkDestroyFramebuffer DestroyFramebuffer;
PFN_vkCreateRenderPass CreateRenderPass;
PFN_vkDestroyRenderPass DestroyRenderPass;
PFN_vkGetRenderAreaGranularity GetRenderAreaGranularity;
PFN_vkCreateCommandPool CreateCommandPool;
PFN_vkDestroyCommandPool DestroyCommandPool;
PFN_vkResetCommandPool ResetCommandPool;
PFN_vkAllocateCommandBuffers AllocateCommandBuffers;
PFN_vkFreeCommandBuffers FreeCommandBuffers;
PFN_vkBeginCommandBuffer BeginCommandBuffer;
PFN_vkEndCommandBuffer EndCommandBuffer;
PFN_vkResetCommandBuffer ResetCommandBuffer;
PFN_vkCmdBindPipeline CmdBindPipeline;
PFN_vkCmdSetViewport CmdSetViewport;
PFN_vkCmdSetScissor CmdSetScissor;
PFN_vkCmdSetLineWidth CmdSetLineWidth;
PFN_vkCmdSetDepthBias CmdSetDepthBias;
PFN_vkCmdSetBlendConstants CmdSetBlendConstants;
PFN_vkCmdSetDepthBounds CmdSetDepthBounds;
PFN_vkCmdSetStencilCompareMask CmdSetStencilCompareMask;
PFN_vkCmdSetStencilWriteMask CmdSetStencilWriteMask;
PFN_vkCmdSetStencilReference CmdSetStencilReference;
PFN_vkCmdBindDescriptorSets CmdBindDescriptorSets;
PFN_vkCmdBindIndexBuffer CmdBindIndexBuffer;
PFN_vkCmdBindVertexBuffers CmdBindVertexBuffers;
PFN_vkCmdDraw CmdDraw;
PFN_vkCmdDrawIndexed CmdDrawIndexed;
PFN_vkCmdDrawIndirect CmdDrawIndirect;
PFN_vkCmdDrawIndexedIndirect CmdDrawIndexedIndirect;
PFN_vkCmdDispatch CmdDispatch;
PFN_vkCmdDispatchIndirect CmdDispatchIndirect;
PFN_vkCmdCopyBuffer CmdCopyBuffer;
PFN_vkCmdCopyImage CmdCopyImage;
PFN_vkCmdBlitImage CmdBlitImage;
PFN_vkCmdCopyBufferToImage CmdCopyBufferToImage;
PFN_vkCmdCopyImageToBuffer CmdCopyImageToBuffer;
PFN_vkCmdUpdateBuffer CmdUpdateBuffer;
PFN_vkCmdFillBuffer CmdFillBuffer;
PFN_vkCmdClearColorImage CmdClearColorImage;
PFN_vkCmdClearDepthStencilImage CmdClearDepthStencilImage;
PFN_vkCmdClearAttachments CmdClearAttachments;
PFN_vkCmdResolveImage CmdResolveImage;
PFN_vkCmdSetEvent CmdSetEvent;
PFN_vkCmdResetEvent CmdResetEvent;
PFN_vkCmdWaitEvents CmdWaitEvents;
PFN_vkCmdPipelineBarrier CmdPipelineBarrier;
PFN_vkCmdBeginQuery CmdBeginQuery;
PFN_vkCmdEndQuery CmdEndQuery;
PFN_vkCmdResetQueryPool CmdResetQueryPool;
PFN_vkCmdWriteTimestamp CmdWriteTimestamp;
PFN_vkCmdCopyQueryPoolResults CmdCopyQueryPoolResults;
PFN_vkCmdPushConstants CmdPushConstants;
PFN_vkCmdBeginRenderPass CmdBeginRenderPass;
PFN_vkCmdNextSubpass CmdNextSubpass;
PFN_vkCmdEndRenderPass CmdEndRenderPass;
PFN_vkCmdExecuteCommands CmdExecuteCommands;
// ---- Core 1_1 commands
PFN_vkBindBufferMemory2 BindBufferMemory2;
PFN_vkBindImageMemory2 BindImageMemory2;
PFN_vkGetDeviceGroupPeerMemoryFeatures GetDeviceGroupPeerMemoryFeatures;
PFN_vkCmdSetDeviceMask CmdSetDeviceMask;
PFN_vkCmdDispatchBase CmdDispatchBase;
PFN_vkGetImageMemoryRequirements2 GetImageMemoryRequirements2;
PFN_vkGetBufferMemoryRequirements2 GetBufferMemoryRequirements2;
PFN_vkGetImageSparseMemoryRequirements2 GetImageSparseMemoryRequirements2;
PFN_vkTrimCommandPool TrimCommandPool;
PFN_vkGetDeviceQueue2 GetDeviceQueue2;
PFN_vkCreateSamplerYcbcrConversion CreateSamplerYcbcrConversion;
PFN_vkDestroySamplerYcbcrConversion DestroySamplerYcbcrConversion;
PFN_vkCreateDescriptorUpdateTemplate CreateDescriptorUpdateTemplate;
PFN_vkDestroyDescriptorUpdateTemplate DestroyDescriptorUpdateTemplate;
PFN_vkUpdateDescriptorSetWithTemplate UpdateDescriptorSetWithTemplate;
PFN_vkGetDescriptorSetLayoutSupport GetDescriptorSetLayoutSupport;
// ---- VK_KHR_swapchain extension commands
PFN_vkCreateSwapchainKHR CreateSwapchainKHR;
PFN_vkDestroySwapchainKHR DestroySwapchainKHR;
PFN_vkGetSwapchainImagesKHR GetSwapchainImagesKHR;
PFN_vkAcquireNextImageKHR AcquireNextImageKHR;
PFN_vkQueuePresentKHR QueuePresentKHR;
PFN_vkGetDeviceGroupPresentCapabilitiesKHR GetDeviceGroupPresentCapabilitiesKHR;
PFN_vkGetDeviceGroupSurfacePresentModesKHR GetDeviceGroupSurfacePresentModesKHR;
PFN_vkAcquireNextImage2KHR AcquireNextImage2KHR;
// ---- VK_KHR_display_swapchain extension commands
PFN_vkCreateSharedSwapchainsKHR CreateSharedSwapchainsKHR;
// ---- VK_KHR_device_group extension commands
PFN_vkGetDeviceGroupPeerMemoryFeaturesKHR GetDeviceGroupPeerMemoryFeaturesKHR;
PFN_vkCmdSetDeviceMaskKHR CmdSetDeviceMaskKHR;
PFN_vkCmdDispatchBaseKHR CmdDispatchBaseKHR;
// ---- VK_KHR_maintenance1 extension commands
PFN_vkTrimCommandPoolKHR TrimCommandPoolKHR;
// ---- VK_KHR_external_memory_win32 extension commands
#ifdef VK_USE_PLATFORM_WIN32_KHR
PFN_vkGetMemoryWin32HandleKHR GetMemoryWin32HandleKHR;
#endif // VK_USE_PLATFORM_WIN32_KHR
#ifdef VK_USE_PLATFORM_WIN32_KHR
PFN_vkGetMemoryWin32HandlePropertiesKHR GetMemoryWin32HandlePropertiesKHR;
#endif // VK_USE_PLATFORM_WIN32_KHR
// ---- VK_KHR_external_memory_fd extension commands
PFN_vkGetMemoryFdKHR GetMemoryFdKHR;
PFN_vkGetMemoryFdPropertiesKHR GetMemoryFdPropertiesKHR;
// ---- VK_KHR_external_semaphore_win32 extension commands
#ifdef VK_USE_PLATFORM_WIN32_KHR
PFN_vkImportSemaphoreWin32HandleKHR ImportSemaphoreWin32HandleKHR;
#endif // VK_USE_PLATFORM_WIN32_KHR
#ifdef VK_USE_PLATFORM_WIN32_KHR
PFN_vkGetSemaphoreWin32HandleKHR GetSemaphoreWin32HandleKHR;
#endif // VK_USE_PLATFORM_WIN32_KHR
// ---- VK_KHR_external_semaphore_fd extension commands
PFN_vkImportSemaphoreFdKHR ImportSemaphoreFdKHR;
PFN_vkGetSemaphoreFdKHR GetSemaphoreFdKHR;
// ---- VK_KHR_push_descriptor extension commands
PFN_vkCmdPushDescriptorSetKHR CmdPushDescriptorSetKHR;
PFN_vkCmdPushDescriptorSetWithTemplateKHR CmdPushDescriptorSetWithTemplateKHR;
// ---- VK_KHR_descriptor_update_template extension commands
PFN_vkCreateDescriptorUpdateTemplateKHR CreateDescriptorUpdateTemplateKHR;
PFN_vkDestroyDescriptorUpdateTemplateKHR DestroyDescriptorUpdateTemplateKHR;
PFN_vkUpdateDescriptorSetWithTemplateKHR UpdateDescriptorSetWithTemplateKHR;
// ---- VK_KHR_shared_presentable_image extension commands
PFN_vkGetSwapchainStatusKHR GetSwapchainStatusKHR;
// ---- VK_KHR_external_fence_win32 extension commands
#ifdef VK_USE_PLATFORM_WIN32_KHR
PFN_vkImportFenceWin32HandleKHR ImportFenceWin32HandleKHR;
#endif // VK_USE_PLATFORM_WIN32_KHR
#ifdef VK_USE_PLATFORM_WIN32_KHR
PFN_vkGetFenceWin32HandleKHR GetFenceWin32HandleKHR;
#endif // VK_USE_PLATFORM_WIN32_KHR
// ---- VK_KHR_external_fence_fd extension commands
PFN_vkImportFenceFdKHR ImportFenceFdKHR;
PFN_vkGetFenceFdKHR GetFenceFdKHR;
// ---- VK_KHR_get_memory_requirements2 extension commands
PFN_vkGetImageMemoryRequirements2KHR GetImageMemoryRequirements2KHR;
PFN_vkGetBufferMemoryRequirements2KHR GetBufferMemoryRequirements2KHR;
PFN_vkGetImageSparseMemoryRequirements2KHR GetImageSparseMemoryRequirements2KHR;
// ---- VK_KHR_sampler_ycbcr_conversion extension commands
PFN_vkCreateSamplerYcbcrConversionKHR CreateSamplerYcbcrConversionKHR;
PFN_vkDestroySamplerYcbcrConversionKHR DestroySamplerYcbcrConversionKHR;
// ---- VK_KHR_bind_memory2 extension commands
PFN_vkBindBufferMemory2KHR BindBufferMemory2KHR;
PFN_vkBindImageMemory2KHR BindImageMemory2KHR;
// ---- VK_KHR_maintenance3 extension commands
PFN_vkGetDescriptorSetLayoutSupportKHR GetDescriptorSetLayoutSupportKHR;
// ---- VK_KHR_draw_indirect_count extension commands
PFN_vkCmdDrawIndirectCountKHR CmdDrawIndirectCountKHR;
PFN_vkCmdDrawIndexedIndirectCountKHR CmdDrawIndexedIndirectCountKHR;
// ---- VK_EXT_debug_marker extension commands
PFN_vkDebugMarkerSetObjectTagEXT DebugMarkerSetObjectTagEXT;
PFN_vkDebugMarkerSetObjectNameEXT DebugMarkerSetObjectNameEXT;
PFN_vkCmdDebugMarkerBeginEXT CmdDebugMarkerBeginEXT;
PFN_vkCmdDebugMarkerEndEXT CmdDebugMarkerEndEXT;
PFN_vkCmdDebugMarkerInsertEXT CmdDebugMarkerInsertEXT;
// ---- VK_AMD_draw_indirect_count extension commands
PFN_vkCmdDrawIndirectCountAMD CmdDrawIndirectCountAMD;
PFN_vkCmdDrawIndexedIndirectCountAMD CmdDrawIndexedIndirectCountAMD;
// ---- VK_AMD_shader_info extension commands
PFN_vkGetShaderInfoAMD GetShaderInfoAMD;
// ---- VK_NV_external_memory_win32 extension commands
#ifdef VK_USE_PLATFORM_WIN32_KHR
PFN_vkGetMemoryWin32HandleNV GetMemoryWin32HandleNV;
#endif // VK_USE_PLATFORM_WIN32_KHR
// ---- VK_NVX_device_generated_commands extension commands
PFN_vkCmdProcessCommandsNVX CmdProcessCommandsNVX;
PFN_vkCmdReserveSpaceForCommandsNVX CmdReserveSpaceForCommandsNVX;
PFN_vkCreateIndirectCommandsLayoutNVX CreateIndirectCommandsLayoutNVX;
PFN_vkDestroyIndirectCommandsLayoutNVX DestroyIndirectCommandsLayoutNVX;
PFN_vkCreateObjectTableNVX CreateObjectTableNVX;
PFN_vkDestroyObjectTableNVX DestroyObjectTableNVX;
PFN_vkRegisterObjectsNVX RegisterObjectsNVX;
PFN_vkUnregisterObjectsNVX UnregisterObjectsNVX;
// ---- VK_NV_clip_space_w_scaling extension commands
PFN_vkCmdSetViewportWScalingNV CmdSetViewportWScalingNV;
// ---- VK_EXT_display_control extension commands
PFN_vkDisplayPowerControlEXT DisplayPowerControlEXT;
PFN_vkRegisterDeviceEventEXT RegisterDeviceEventEXT;
PFN_vkRegisterDisplayEventEXT RegisterDisplayEventEXT;
PFN_vkGetSwapchainCounterEXT GetSwapchainCounterEXT;
// ---- VK_GOOGLE_display_timing extension commands
PFN_vkGetRefreshCycleDurationGOOGLE GetRefreshCycleDurationGOOGLE;
PFN_vkGetPastPresentationTimingGOOGLE GetPastPresentationTimingGOOGLE;
// ---- VK_EXT_discard_rectangles extension commands
PFN_vkCmdSetDiscardRectangleEXT CmdSetDiscardRectangleEXT;
// ---- VK_EXT_hdr_metadata extension commands
PFN_vkSetHdrMetadataEXT SetHdrMetadataEXT;
// ---- VK_EXT_debug_utils extension commands
PFN_vkSetDebugUtilsObjectNameEXT SetDebugUtilsObjectNameEXT;
PFN_vkSetDebugUtilsObjectTagEXT SetDebugUtilsObjectTagEXT;
PFN_vkQueueBeginDebugUtilsLabelEXT QueueBeginDebugUtilsLabelEXT;
PFN_vkQueueEndDebugUtilsLabelEXT QueueEndDebugUtilsLabelEXT;
PFN_vkQueueInsertDebugUtilsLabelEXT QueueInsertDebugUtilsLabelEXT;
PFN_vkCmdBeginDebugUtilsLabelEXT CmdBeginDebugUtilsLabelEXT;
PFN_vkCmdEndDebugUtilsLabelEXT CmdEndDebugUtilsLabelEXT;
PFN_vkCmdInsertDebugUtilsLabelEXT CmdInsertDebugUtilsLabelEXT;
// ---- VK_ANDROID_external_memory_android_hardware_buffer extension commands
#ifdef VK_USE_PLATFORM_ANDROID_KHR
PFN_vkGetAndroidHardwareBufferPropertiesANDROID GetAndroidHardwareBufferPropertiesANDROID;
#endif // VK_USE_PLATFORM_ANDROID_KHR
#ifdef VK_USE_PLATFORM_ANDROID_KHR
PFN_vkGetMemoryAndroidHardwareBufferANDROID GetMemoryAndroidHardwareBufferANDROID;
#endif // VK_USE_PLATFORM_ANDROID_KHR
// ---- VK_EXT_sample_locations extension commands
PFN_vkCmdSetSampleLocationsEXT CmdSetSampleLocationsEXT;
// ---- VK_EXT_validation_cache extension commands
PFN_vkCreateValidationCacheEXT CreateValidationCacheEXT;
PFN_vkDestroyValidationCacheEXT DestroyValidationCacheEXT;
PFN_vkMergeValidationCachesEXT MergeValidationCachesEXT;
PFN_vkGetValidationCacheDataEXT GetValidationCacheDataEXT;
// ---- VK_EXT_external_memory_host extension commands
PFN_vkGetMemoryHostPointerPropertiesEXT GetMemoryHostPointerPropertiesEXT;
// ---- VK_AMD_buffer_marker extension commands
PFN_vkCmdWriteBufferMarkerAMD CmdWriteBufferMarkerAMD;
} VkLayerDispatchTable;

View file

@ -0,0 +1,65 @@
#ifndef VULKAN_MIR_H_
#define VULKAN_MIR_H_ 1
#ifdef __cplusplus
extern "C" {
#endif
/*
** Copyright (c) 2015-2018 The Khronos Group Inc.
**
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
** You may obtain a copy of the License at
**
** http://www.apache.org/licenses/LICENSE-2.0
**
** Unless required by applicable law or agreed to in writing, software
** distributed under the License is distributed on an "AS IS" BASIS,
** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
** See the License for the specific language governing permissions and
** limitations under the License.
*/
/*
** This header is generated from the Khronos Vulkan XML API Registry.
**
*/
#define VK_KHR_mir_surface 1
#define VK_KHR_MIR_SURFACE_SPEC_VERSION 4
#define VK_KHR_MIR_SURFACE_EXTENSION_NAME "VK_KHR_mir_surface"
typedef VkFlags VkMirSurfaceCreateFlagsKHR;
typedef struct VkMirSurfaceCreateInfoKHR {
VkStructureType sType;
const void* pNext;
VkMirSurfaceCreateFlagsKHR flags;
MirConnection* connection;
MirSurface* mirSurface;
} VkMirSurfaceCreateInfoKHR;
typedef VkResult (VKAPI_PTR *PFN_vkCreateMirSurfaceKHR)(VkInstance instance, const VkMirSurfaceCreateInfoKHR* pCreateInfo, const VkAllocationCallbacks* pAllocator, VkSurfaceKHR* pSurface);
typedef VkBool32 (VKAPI_PTR *PFN_vkGetPhysicalDeviceMirPresentationSupportKHR)(VkPhysicalDevice physicalDevice, uint32_t queueFamilyIndex, MirConnection* connection);
#ifndef VK_NO_PROTOTYPES
VKAPI_ATTR VkResult VKAPI_CALL vkCreateMirSurfaceKHR(
VkInstance instance,
const VkMirSurfaceCreateInfoKHR* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkSurfaceKHR* pSurface);
VKAPI_ATTR VkBool32 VKAPI_CALL vkGetPhysicalDeviceMirPresentationSupportKHR(
VkPhysicalDevice physicalDevice,
uint32_t queueFamilyIndex,
MirConnection* connection);
#endif
#ifdef __cplusplus
}
#endif
#endif

View file

@ -6,6 +6,7 @@
*
* This library is distributed under the MIT License. See notice at the end of this file.
*/
/* clang-format off */
#ifndef VOLK_H_
#define VOLK_H_
@ -13,16 +14,49 @@
# error To use volk, you need to define VK_NO_PROTOTYPES before including vulkan.h
#endif
/* VOLK_GENERATE_VERSION */
#define VOLK_HEADER_VERSION 102
/* VOLK_GENERATE_VERSION */
/* VOLK_GENERATE_VERSION_DEFINE */
#define VOLK_HEADER_VERSION 190
/* VOLK_GENERATE_VERSION_DEFINE */
#ifndef VK_NO_PROTOTYPES
# define VK_NO_PROTOTYPES
#endif
#ifndef VULKAN_H_
# include <vulkan/vulkan.h>
# ifdef VOLK_VULKAN_H_PATH
# include VOLK_VULKAN_H_PATH
# elif defined(VK_USE_PLATFORM_WIN32_KHR)
# include <vulkan/vk_platform.h>
# include <vulkan/vulkan_core.h>
/* When VK_USE_PLATFORM_WIN32_KHR is defined, instead of including vulkan.h directly, we include individual parts of the SDK
* This is necessary to avoid including <windows.h> which is very heavy - it takes 200ms to parse without WIN32_LEAN_AND_MEAN
* and 100ms to parse with it. vulkan_win32.h only needs a few symbols that are easy to redefine ourselves.
*/
typedef unsigned long DWORD;
typedef const wchar_t* LPCWSTR;
typedef void* HANDLE;
typedef struct HINSTANCE__* HINSTANCE;
typedef struct HWND__* HWND;
typedef struct HMONITOR__* HMONITOR;
typedef struct _SECURITY_ATTRIBUTES SECURITY_ATTRIBUTES;
# include <vulkan/vulkan_win32.h>
# ifdef VK_ENABLE_BETA_EXTENSIONS
# include <vulkan/vulkan_beta.h>
# endif
# else
# include <vulkan/vulkan.h>
# endif
#endif
/* Disable several extensions on earlier SDKs because later SDKs introduce a backwards incompatible change to function signatures */
#if VK_HEADER_VERSION < 140
# undef VK_NVX_image_view_handle
#endif
#if VK_HEADER_VERSION < 184
# undef VK_HUAWEI_subpass_shading
#endif
#ifdef __cplusplus
@ -59,6 +93,12 @@ uint32_t volkGetInstanceVersion(void);
*/
void volkLoadInstance(VkInstance instance);
/**
* Load global function pointers using application-created VkInstance; call this function after creating the Vulkan instance.
* Skips loading device-based function pointers, requires usage of volkLoadDevice afterwards.
*/
void volkLoadInstanceOnly(VkInstance instance);
/**
* Load global function pointers using application-created VkDevice; call this function after creating the Vulkan device.
*
@ -66,6 +106,18 @@ void volkLoadInstance(VkInstance instance);
*/
void volkLoadDevice(VkDevice device);
/**
* Return last VkInstance for which global function pointers have been loaded via volkLoadInstance(),
* or VK_NULL_HANDLE if volkLoadInstance() has not been called.
*/
VkInstance volkGetLoadedInstance(void);
/**
* Return last VkDevice for which global function pointers have been loaded via volkLoadDevice(),
* or VK_NULL_HANDLE if volkLoadDevice() has not been called.
*/
VkDevice volkGetLoadedDevice(void);
/**
* Load function pointers using application-created VkDevice into a table.
* Application should use function pointers from that table instead of using global function pointers.
@ -218,9 +270,27 @@ struct VolkDeviceTable
PFN_vkTrimCommandPool vkTrimCommandPool;
PFN_vkUpdateDescriptorSetWithTemplate vkUpdateDescriptorSetWithTemplate;
#endif /* defined(VK_VERSION_1_1) */
#if defined(VK_VERSION_1_2)
PFN_vkCmdBeginRenderPass2 vkCmdBeginRenderPass2;
PFN_vkCmdDrawIndexedIndirectCount vkCmdDrawIndexedIndirectCount;
PFN_vkCmdDrawIndirectCount vkCmdDrawIndirectCount;
PFN_vkCmdEndRenderPass2 vkCmdEndRenderPass2;
PFN_vkCmdNextSubpass2 vkCmdNextSubpass2;
PFN_vkCreateRenderPass2 vkCreateRenderPass2;
PFN_vkGetBufferDeviceAddress vkGetBufferDeviceAddress;
PFN_vkGetBufferOpaqueCaptureAddress vkGetBufferOpaqueCaptureAddress;
PFN_vkGetDeviceMemoryOpaqueCaptureAddress vkGetDeviceMemoryOpaqueCaptureAddress;
PFN_vkGetSemaphoreCounterValue vkGetSemaphoreCounterValue;
PFN_vkResetQueryPool vkResetQueryPool;
PFN_vkSignalSemaphore vkSignalSemaphore;
PFN_vkWaitSemaphores vkWaitSemaphores;
#endif /* defined(VK_VERSION_1_2) */
#if defined(VK_AMD_buffer_marker)
PFN_vkCmdWriteBufferMarkerAMD vkCmdWriteBufferMarkerAMD;
#endif /* defined(VK_AMD_buffer_marker) */
#if defined(VK_AMD_display_native_hdr)
PFN_vkSetLocalDimmingAMD vkSetLocalDimmingAMD;
#endif /* defined(VK_AMD_display_native_hdr) */
#if defined(VK_AMD_draw_indirect_count)
PFN_vkCmdDrawIndexedIndirectCountAMD vkCmdDrawIndexedIndirectCountAMD;
PFN_vkCmdDrawIndirectCountAMD vkCmdDrawIndirectCountAMD;
@ -238,6 +308,9 @@ struct VolkDeviceTable
#if defined(VK_EXT_calibrated_timestamps)
PFN_vkGetCalibratedTimestampsEXT vkGetCalibratedTimestampsEXT;
#endif /* defined(VK_EXT_calibrated_timestamps) */
#if defined(VK_EXT_color_write_enable)
PFN_vkCmdSetColorWriteEnableEXT vkCmdSetColorWriteEnableEXT;
#endif /* defined(VK_EXT_color_write_enable) */
#if defined(VK_EXT_conditional_rendering)
PFN_vkCmdBeginConditionalRenderingEXT vkCmdBeginConditionalRenderingEXT;
PFN_vkCmdEndConditionalRenderingEXT vkCmdEndConditionalRenderingEXT;
@ -249,16 +322,6 @@ struct VolkDeviceTable
PFN_vkDebugMarkerSetObjectNameEXT vkDebugMarkerSetObjectNameEXT;
PFN_vkDebugMarkerSetObjectTagEXT vkDebugMarkerSetObjectTagEXT;
#endif /* defined(VK_EXT_debug_marker) */
#if defined(VK_EXT_debug_utils)
PFN_vkCmdBeginDebugUtilsLabelEXT vkCmdBeginDebugUtilsLabelEXT;
PFN_vkCmdEndDebugUtilsLabelEXT vkCmdEndDebugUtilsLabelEXT;
PFN_vkCmdInsertDebugUtilsLabelEXT vkCmdInsertDebugUtilsLabelEXT;
PFN_vkQueueBeginDebugUtilsLabelEXT vkQueueBeginDebugUtilsLabelEXT;
PFN_vkQueueEndDebugUtilsLabelEXT vkQueueEndDebugUtilsLabelEXT;
PFN_vkQueueInsertDebugUtilsLabelEXT vkQueueInsertDebugUtilsLabelEXT;
PFN_vkSetDebugUtilsObjectNameEXT vkSetDebugUtilsObjectNameEXT;
PFN_vkSetDebugUtilsObjectTagEXT vkSetDebugUtilsObjectTagEXT;
#endif /* defined(VK_EXT_debug_utils) */
#if defined(VK_EXT_discard_rectangles)
PFN_vkCmdSetDiscardRectangleEXT vkCmdSetDiscardRectangleEXT;
#endif /* defined(VK_EXT_discard_rectangles) */
@ -268,15 +331,56 @@ struct VolkDeviceTable
PFN_vkRegisterDeviceEventEXT vkRegisterDeviceEventEXT;
PFN_vkRegisterDisplayEventEXT vkRegisterDisplayEventEXT;
#endif /* defined(VK_EXT_display_control) */
#if defined(VK_EXT_extended_dynamic_state)
PFN_vkCmdBindVertexBuffers2EXT vkCmdBindVertexBuffers2EXT;
PFN_vkCmdSetCullModeEXT vkCmdSetCullModeEXT;
PFN_vkCmdSetDepthBoundsTestEnableEXT vkCmdSetDepthBoundsTestEnableEXT;
PFN_vkCmdSetDepthCompareOpEXT vkCmdSetDepthCompareOpEXT;
PFN_vkCmdSetDepthTestEnableEXT vkCmdSetDepthTestEnableEXT;
PFN_vkCmdSetDepthWriteEnableEXT vkCmdSetDepthWriteEnableEXT;
PFN_vkCmdSetFrontFaceEXT vkCmdSetFrontFaceEXT;
PFN_vkCmdSetPrimitiveTopologyEXT vkCmdSetPrimitiveTopologyEXT;
PFN_vkCmdSetScissorWithCountEXT vkCmdSetScissorWithCountEXT;
PFN_vkCmdSetStencilOpEXT vkCmdSetStencilOpEXT;
PFN_vkCmdSetStencilTestEnableEXT vkCmdSetStencilTestEnableEXT;
PFN_vkCmdSetViewportWithCountEXT vkCmdSetViewportWithCountEXT;
#endif /* defined(VK_EXT_extended_dynamic_state) */
#if defined(VK_EXT_extended_dynamic_state2)
PFN_vkCmdSetDepthBiasEnableEXT vkCmdSetDepthBiasEnableEXT;
PFN_vkCmdSetLogicOpEXT vkCmdSetLogicOpEXT;
PFN_vkCmdSetPatchControlPointsEXT vkCmdSetPatchControlPointsEXT;
PFN_vkCmdSetPrimitiveRestartEnableEXT vkCmdSetPrimitiveRestartEnableEXT;
PFN_vkCmdSetRasterizerDiscardEnableEXT vkCmdSetRasterizerDiscardEnableEXT;
#endif /* defined(VK_EXT_extended_dynamic_state2) */
#if defined(VK_EXT_external_memory_host)
PFN_vkGetMemoryHostPointerPropertiesEXT vkGetMemoryHostPointerPropertiesEXT;
#endif /* defined(VK_EXT_external_memory_host) */
#if defined(VK_EXT_full_screen_exclusive)
PFN_vkAcquireFullScreenExclusiveModeEXT vkAcquireFullScreenExclusiveModeEXT;
PFN_vkReleaseFullScreenExclusiveModeEXT vkReleaseFullScreenExclusiveModeEXT;
#endif /* defined(VK_EXT_full_screen_exclusive) */
#if defined(VK_EXT_hdr_metadata)
PFN_vkSetHdrMetadataEXT vkSetHdrMetadataEXT;
#endif /* defined(VK_EXT_hdr_metadata) */
#if defined(VK_EXT_host_query_reset)
PFN_vkResetQueryPoolEXT vkResetQueryPoolEXT;
#endif /* defined(VK_EXT_host_query_reset) */
#if defined(VK_EXT_image_drm_format_modifier)
PFN_vkGetImageDrmFormatModifierPropertiesEXT vkGetImageDrmFormatModifierPropertiesEXT;
#endif /* defined(VK_EXT_image_drm_format_modifier) */
#if defined(VK_EXT_line_rasterization)
PFN_vkCmdSetLineStippleEXT vkCmdSetLineStippleEXT;
#endif /* defined(VK_EXT_line_rasterization) */
#if defined(VK_EXT_multi_draw)
PFN_vkCmdDrawMultiEXT vkCmdDrawMultiEXT;
PFN_vkCmdDrawMultiIndexedEXT vkCmdDrawMultiIndexedEXT;
#endif /* defined(VK_EXT_multi_draw) */
#if defined(VK_EXT_private_data)
PFN_vkCreatePrivateDataSlotEXT vkCreatePrivateDataSlotEXT;
PFN_vkDestroyPrivateDataSlotEXT vkDestroyPrivateDataSlotEXT;
PFN_vkGetPrivateDataEXT vkGetPrivateDataEXT;
PFN_vkSetPrivateDataEXT vkSetPrivateDataEXT;
#endif /* defined(VK_EXT_private_data) */
#if defined(VK_EXT_sample_locations)
PFN_vkCmdSetSampleLocationsEXT vkCmdSetSampleLocationsEXT;
#endif /* defined(VK_EXT_sample_locations) */
@ -294,20 +398,87 @@ struct VolkDeviceTable
PFN_vkGetValidationCacheDataEXT vkGetValidationCacheDataEXT;
PFN_vkMergeValidationCachesEXT vkMergeValidationCachesEXT;
#endif /* defined(VK_EXT_validation_cache) */
#if defined(VK_EXT_vertex_input_dynamic_state)
PFN_vkCmdSetVertexInputEXT vkCmdSetVertexInputEXT;
#endif /* defined(VK_EXT_vertex_input_dynamic_state) */
#if defined(VK_FUCHSIA_external_memory)
PFN_vkGetMemoryZirconHandleFUCHSIA vkGetMemoryZirconHandleFUCHSIA;
PFN_vkGetMemoryZirconHandlePropertiesFUCHSIA vkGetMemoryZirconHandlePropertiesFUCHSIA;
#endif /* defined(VK_FUCHSIA_external_memory) */
#if defined(VK_FUCHSIA_external_semaphore)
PFN_vkGetSemaphoreZirconHandleFUCHSIA vkGetSemaphoreZirconHandleFUCHSIA;
PFN_vkImportSemaphoreZirconHandleFUCHSIA vkImportSemaphoreZirconHandleFUCHSIA;
#endif /* defined(VK_FUCHSIA_external_semaphore) */
#if defined(VK_GOOGLE_display_timing)
PFN_vkGetPastPresentationTimingGOOGLE vkGetPastPresentationTimingGOOGLE;
PFN_vkGetRefreshCycleDurationGOOGLE vkGetRefreshCycleDurationGOOGLE;
#endif /* defined(VK_GOOGLE_display_timing) */
#if defined(VK_HUAWEI_invocation_mask)
PFN_vkCmdBindInvocationMaskHUAWEI vkCmdBindInvocationMaskHUAWEI;
#endif /* defined(VK_HUAWEI_invocation_mask) */
#if defined(VK_HUAWEI_subpass_shading)
PFN_vkCmdSubpassShadingHUAWEI vkCmdSubpassShadingHUAWEI;
PFN_vkGetDeviceSubpassShadingMaxWorkgroupSizeHUAWEI vkGetDeviceSubpassShadingMaxWorkgroupSizeHUAWEI;
#endif /* defined(VK_HUAWEI_subpass_shading) */
#if defined(VK_INTEL_performance_query)
PFN_vkAcquirePerformanceConfigurationINTEL vkAcquirePerformanceConfigurationINTEL;
PFN_vkCmdSetPerformanceMarkerINTEL vkCmdSetPerformanceMarkerINTEL;
PFN_vkCmdSetPerformanceOverrideINTEL vkCmdSetPerformanceOverrideINTEL;
PFN_vkCmdSetPerformanceStreamMarkerINTEL vkCmdSetPerformanceStreamMarkerINTEL;
PFN_vkGetPerformanceParameterINTEL vkGetPerformanceParameterINTEL;
PFN_vkInitializePerformanceApiINTEL vkInitializePerformanceApiINTEL;
PFN_vkQueueSetPerformanceConfigurationINTEL vkQueueSetPerformanceConfigurationINTEL;
PFN_vkReleasePerformanceConfigurationINTEL vkReleasePerformanceConfigurationINTEL;
PFN_vkUninitializePerformanceApiINTEL vkUninitializePerformanceApiINTEL;
#endif /* defined(VK_INTEL_performance_query) */
#if defined(VK_KHR_acceleration_structure)
PFN_vkBuildAccelerationStructuresKHR vkBuildAccelerationStructuresKHR;
PFN_vkCmdBuildAccelerationStructuresIndirectKHR vkCmdBuildAccelerationStructuresIndirectKHR;
PFN_vkCmdBuildAccelerationStructuresKHR vkCmdBuildAccelerationStructuresKHR;
PFN_vkCmdCopyAccelerationStructureKHR vkCmdCopyAccelerationStructureKHR;
PFN_vkCmdCopyAccelerationStructureToMemoryKHR vkCmdCopyAccelerationStructureToMemoryKHR;
PFN_vkCmdCopyMemoryToAccelerationStructureKHR vkCmdCopyMemoryToAccelerationStructureKHR;
PFN_vkCmdWriteAccelerationStructuresPropertiesKHR vkCmdWriteAccelerationStructuresPropertiesKHR;
PFN_vkCopyAccelerationStructureKHR vkCopyAccelerationStructureKHR;
PFN_vkCopyAccelerationStructureToMemoryKHR vkCopyAccelerationStructureToMemoryKHR;
PFN_vkCopyMemoryToAccelerationStructureKHR vkCopyMemoryToAccelerationStructureKHR;
PFN_vkCreateAccelerationStructureKHR vkCreateAccelerationStructureKHR;
PFN_vkDestroyAccelerationStructureKHR vkDestroyAccelerationStructureKHR;
PFN_vkGetAccelerationStructureBuildSizesKHR vkGetAccelerationStructureBuildSizesKHR;
PFN_vkGetAccelerationStructureDeviceAddressKHR vkGetAccelerationStructureDeviceAddressKHR;
PFN_vkGetDeviceAccelerationStructureCompatibilityKHR vkGetDeviceAccelerationStructureCompatibilityKHR;
PFN_vkWriteAccelerationStructuresPropertiesKHR vkWriteAccelerationStructuresPropertiesKHR;
#endif /* defined(VK_KHR_acceleration_structure) */
#if defined(VK_KHR_bind_memory2)
PFN_vkBindBufferMemory2KHR vkBindBufferMemory2KHR;
PFN_vkBindImageMemory2KHR vkBindImageMemory2KHR;
#endif /* defined(VK_KHR_bind_memory2) */
#if defined(VK_KHR_buffer_device_address)
PFN_vkGetBufferDeviceAddressKHR vkGetBufferDeviceAddressKHR;
PFN_vkGetBufferOpaqueCaptureAddressKHR vkGetBufferOpaqueCaptureAddressKHR;
PFN_vkGetDeviceMemoryOpaqueCaptureAddressKHR vkGetDeviceMemoryOpaqueCaptureAddressKHR;
#endif /* defined(VK_KHR_buffer_device_address) */
#if defined(VK_KHR_copy_commands2)
PFN_vkCmdBlitImage2KHR vkCmdBlitImage2KHR;
PFN_vkCmdCopyBuffer2KHR vkCmdCopyBuffer2KHR;
PFN_vkCmdCopyBufferToImage2KHR vkCmdCopyBufferToImage2KHR;
PFN_vkCmdCopyImage2KHR vkCmdCopyImage2KHR;
PFN_vkCmdCopyImageToBuffer2KHR vkCmdCopyImageToBuffer2KHR;
PFN_vkCmdResolveImage2KHR vkCmdResolveImage2KHR;
#endif /* defined(VK_KHR_copy_commands2) */
#if defined(VK_KHR_create_renderpass2)
PFN_vkCmdBeginRenderPass2KHR vkCmdBeginRenderPass2KHR;
PFN_vkCmdEndRenderPass2KHR vkCmdEndRenderPass2KHR;
PFN_vkCmdNextSubpass2KHR vkCmdNextSubpass2KHR;
PFN_vkCreateRenderPass2KHR vkCreateRenderPass2KHR;
#endif /* defined(VK_KHR_create_renderpass2) */
#if defined(VK_KHR_deferred_host_operations)
PFN_vkCreateDeferredOperationKHR vkCreateDeferredOperationKHR;
PFN_vkDeferredOperationJoinKHR vkDeferredOperationJoinKHR;
PFN_vkDestroyDeferredOperationKHR vkDestroyDeferredOperationKHR;
PFN_vkGetDeferredOperationMaxConcurrencyKHR vkGetDeferredOperationMaxConcurrencyKHR;
PFN_vkGetDeferredOperationResultKHR vkGetDeferredOperationResultKHR;
#endif /* defined(VK_KHR_deferred_host_operations) */
#if defined(VK_KHR_descriptor_update_template)
PFN_vkCreateDescriptorUpdateTemplateKHR vkCreateDescriptorUpdateTemplateKHR;
PFN_vkDestroyDescriptorUpdateTemplateKHR vkDestroyDescriptorUpdateTemplateKHR;
@ -349,6 +520,9 @@ struct VolkDeviceTable
PFN_vkGetSemaphoreWin32HandleKHR vkGetSemaphoreWin32HandleKHR;
PFN_vkImportSemaphoreWin32HandleKHR vkImportSemaphoreWin32HandleKHR;
#endif /* defined(VK_KHR_external_semaphore_win32) */
#if defined(VK_KHR_fragment_shading_rate)
PFN_vkCmdSetFragmentShadingRateKHR vkCmdSetFragmentShadingRateKHR;
#endif /* defined(VK_KHR_fragment_shading_rate) */
#if defined(VK_KHR_get_memory_requirements2)
PFN_vkGetBufferMemoryRequirements2KHR vkGetBufferMemoryRequirements2KHR;
PFN_vkGetImageMemoryRequirements2KHR vkGetImageMemoryRequirements2KHR;
@ -360,9 +534,30 @@ struct VolkDeviceTable
#if defined(VK_KHR_maintenance3)
PFN_vkGetDescriptorSetLayoutSupportKHR vkGetDescriptorSetLayoutSupportKHR;
#endif /* defined(VK_KHR_maintenance3) */
#if defined(VK_KHR_performance_query)
PFN_vkAcquireProfilingLockKHR vkAcquireProfilingLockKHR;
PFN_vkReleaseProfilingLockKHR vkReleaseProfilingLockKHR;
#endif /* defined(VK_KHR_performance_query) */
#if defined(VK_KHR_pipeline_executable_properties)
PFN_vkGetPipelineExecutableInternalRepresentationsKHR vkGetPipelineExecutableInternalRepresentationsKHR;
PFN_vkGetPipelineExecutablePropertiesKHR vkGetPipelineExecutablePropertiesKHR;
PFN_vkGetPipelineExecutableStatisticsKHR vkGetPipelineExecutableStatisticsKHR;
#endif /* defined(VK_KHR_pipeline_executable_properties) */
#if defined(VK_KHR_present_wait)
PFN_vkWaitForPresentKHR vkWaitForPresentKHR;
#endif /* defined(VK_KHR_present_wait) */
#if defined(VK_KHR_push_descriptor)
PFN_vkCmdPushDescriptorSetKHR vkCmdPushDescriptorSetKHR;
#endif /* defined(VK_KHR_push_descriptor) */
#if defined(VK_KHR_ray_tracing_pipeline)
PFN_vkCmdSetRayTracingPipelineStackSizeKHR vkCmdSetRayTracingPipelineStackSizeKHR;
PFN_vkCmdTraceRaysIndirectKHR vkCmdTraceRaysIndirectKHR;
PFN_vkCmdTraceRaysKHR vkCmdTraceRaysKHR;
PFN_vkCreateRayTracingPipelinesKHR vkCreateRayTracingPipelinesKHR;
PFN_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR vkGetRayTracingCaptureReplayShaderGroupHandlesKHR;
PFN_vkGetRayTracingShaderGroupHandlesKHR vkGetRayTracingShaderGroupHandlesKHR;
PFN_vkGetRayTracingShaderGroupStackSizeKHR vkGetRayTracingShaderGroupStackSizeKHR;
#endif /* defined(VK_KHR_ray_tracing_pipeline) */
#if defined(VK_KHR_sampler_ycbcr_conversion)
PFN_vkCreateSamplerYcbcrConversionKHR vkCreateSamplerYcbcrConversionKHR;
PFN_vkDestroySamplerYcbcrConversionKHR vkDestroySamplerYcbcrConversionKHR;
@ -377,17 +572,52 @@ struct VolkDeviceTable
PFN_vkGetSwapchainImagesKHR vkGetSwapchainImagesKHR;
PFN_vkQueuePresentKHR vkQueuePresentKHR;
#endif /* defined(VK_KHR_swapchain) */
#if defined(VK_NVX_device_generated_commands)
PFN_vkCmdProcessCommandsNVX vkCmdProcessCommandsNVX;
PFN_vkCmdReserveSpaceForCommandsNVX vkCmdReserveSpaceForCommandsNVX;
PFN_vkCreateIndirectCommandsLayoutNVX vkCreateIndirectCommandsLayoutNVX;
PFN_vkCreateObjectTableNVX vkCreateObjectTableNVX;
PFN_vkDestroyIndirectCommandsLayoutNVX vkDestroyIndirectCommandsLayoutNVX;
PFN_vkDestroyObjectTableNVX vkDestroyObjectTableNVX;
PFN_vkRegisterObjectsNVX vkRegisterObjectsNVX;
PFN_vkUnregisterObjectsNVX vkUnregisterObjectsNVX;
#endif /* defined(VK_NVX_device_generated_commands) */
#if defined(VK_KHR_synchronization2)
PFN_vkCmdPipelineBarrier2KHR vkCmdPipelineBarrier2KHR;
PFN_vkCmdResetEvent2KHR vkCmdResetEvent2KHR;
PFN_vkCmdSetEvent2KHR vkCmdSetEvent2KHR;
PFN_vkCmdWaitEvents2KHR vkCmdWaitEvents2KHR;
PFN_vkCmdWriteTimestamp2KHR vkCmdWriteTimestamp2KHR;
PFN_vkQueueSubmit2KHR vkQueueSubmit2KHR;
#endif /* defined(VK_KHR_synchronization2) */
#if defined(VK_KHR_synchronization2) && defined(VK_AMD_buffer_marker)
PFN_vkCmdWriteBufferMarker2AMD vkCmdWriteBufferMarker2AMD;
#endif /* defined(VK_KHR_synchronization2) && defined(VK_AMD_buffer_marker) */
#if defined(VK_KHR_synchronization2) && defined(VK_NV_device_diagnostic_checkpoints)
PFN_vkGetQueueCheckpointData2NV vkGetQueueCheckpointData2NV;
#endif /* defined(VK_KHR_synchronization2) && defined(VK_NV_device_diagnostic_checkpoints) */
#if defined(VK_KHR_timeline_semaphore)
PFN_vkGetSemaphoreCounterValueKHR vkGetSemaphoreCounterValueKHR;
PFN_vkSignalSemaphoreKHR vkSignalSemaphoreKHR;
PFN_vkWaitSemaphoresKHR vkWaitSemaphoresKHR;
#endif /* defined(VK_KHR_timeline_semaphore) */
#if defined(VK_KHR_video_decode_queue)
PFN_vkCmdDecodeVideoKHR vkCmdDecodeVideoKHR;
#endif /* defined(VK_KHR_video_decode_queue) */
#if defined(VK_KHR_video_encode_queue)
PFN_vkCmdEncodeVideoKHR vkCmdEncodeVideoKHR;
#endif /* defined(VK_KHR_video_encode_queue) */
#if defined(VK_KHR_video_queue)
PFN_vkBindVideoSessionMemoryKHR vkBindVideoSessionMemoryKHR;
PFN_vkCmdBeginVideoCodingKHR vkCmdBeginVideoCodingKHR;
PFN_vkCmdControlVideoCodingKHR vkCmdControlVideoCodingKHR;
PFN_vkCmdEndVideoCodingKHR vkCmdEndVideoCodingKHR;
PFN_vkCreateVideoSessionKHR vkCreateVideoSessionKHR;
PFN_vkCreateVideoSessionParametersKHR vkCreateVideoSessionParametersKHR;
PFN_vkDestroyVideoSessionKHR vkDestroyVideoSessionKHR;
PFN_vkDestroyVideoSessionParametersKHR vkDestroyVideoSessionParametersKHR;
PFN_vkGetVideoSessionMemoryRequirementsKHR vkGetVideoSessionMemoryRequirementsKHR;
PFN_vkUpdateVideoSessionParametersKHR vkUpdateVideoSessionParametersKHR;
#endif /* defined(VK_KHR_video_queue) */
#if defined(VK_NVX_binary_import)
PFN_vkCmdCuLaunchKernelNVX vkCmdCuLaunchKernelNVX;
PFN_vkCreateCuFunctionNVX vkCreateCuFunctionNVX;
PFN_vkCreateCuModuleNVX vkCreateCuModuleNVX;
PFN_vkDestroyCuFunctionNVX vkDestroyCuFunctionNVX;
PFN_vkDestroyCuModuleNVX vkDestroyCuModuleNVX;
#endif /* defined(VK_NVX_binary_import) */
#if defined(VK_NVX_image_view_handle)
PFN_vkGetImageViewAddressNVX vkGetImageViewAddressNVX;
PFN_vkGetImageViewHandleNVX vkGetImageViewHandleNVX;
#endif /* defined(VK_NVX_image_view_handle) */
#if defined(VK_NV_clip_space_w_scaling)
@ -397,9 +627,23 @@ struct VolkDeviceTable
PFN_vkCmdSetCheckpointNV vkCmdSetCheckpointNV;
PFN_vkGetQueueCheckpointDataNV vkGetQueueCheckpointDataNV;
#endif /* defined(VK_NV_device_diagnostic_checkpoints) */
#if defined(VK_NV_device_generated_commands)
PFN_vkCmdBindPipelineShaderGroupNV vkCmdBindPipelineShaderGroupNV;
PFN_vkCmdExecuteGeneratedCommandsNV vkCmdExecuteGeneratedCommandsNV;
PFN_vkCmdPreprocessGeneratedCommandsNV vkCmdPreprocessGeneratedCommandsNV;
PFN_vkCreateIndirectCommandsLayoutNV vkCreateIndirectCommandsLayoutNV;
PFN_vkDestroyIndirectCommandsLayoutNV vkDestroyIndirectCommandsLayoutNV;
PFN_vkGetGeneratedCommandsMemoryRequirementsNV vkGetGeneratedCommandsMemoryRequirementsNV;
#endif /* defined(VK_NV_device_generated_commands) */
#if defined(VK_NV_external_memory_rdma)
PFN_vkGetMemoryRemoteAddressNV vkGetMemoryRemoteAddressNV;
#endif /* defined(VK_NV_external_memory_rdma) */
#if defined(VK_NV_external_memory_win32)
PFN_vkGetMemoryWin32HandleNV vkGetMemoryWin32HandleNV;
#endif /* defined(VK_NV_external_memory_win32) */
#if defined(VK_NV_fragment_shading_rate_enums)
PFN_vkCmdSetFragmentShadingRateEnumNV vkCmdSetFragmentShadingRateEnumNV;
#endif /* defined(VK_NV_fragment_shading_rate_enums) */
#if defined(VK_NV_mesh_shader)
PFN_vkCmdDrawMeshTasksIndirectCountNV vkCmdDrawMeshTasksIndirectCountNV;
PFN_vkCmdDrawMeshTasksIndirectNV vkCmdDrawMeshTasksIndirectNV;
@ -427,9 +671,12 @@ struct VolkDeviceTable
PFN_vkCmdSetCoarseSampleOrderNV vkCmdSetCoarseSampleOrderNV;
PFN_vkCmdSetViewportShadingRatePaletteNV vkCmdSetViewportShadingRatePaletteNV;
#endif /* defined(VK_NV_shading_rate_image) */
#if (defined(VK_KHR_descriptor_update_template) && defined(VK_KHR_push_descriptor)) || (defined(VK_KHR_push_descriptor) && defined(VK_VERSION_1_1))
#if (defined(VK_EXT_full_screen_exclusive) && defined(VK_KHR_device_group)) || (defined(VK_EXT_full_screen_exclusive) && defined(VK_VERSION_1_1))
PFN_vkGetDeviceGroupSurfacePresentModes2EXT vkGetDeviceGroupSurfacePresentModes2EXT;
#endif /* (defined(VK_EXT_full_screen_exclusive) && defined(VK_KHR_device_group)) || (defined(VK_EXT_full_screen_exclusive) && defined(VK_VERSION_1_1)) */
#if (defined(VK_KHR_descriptor_update_template) && defined(VK_KHR_push_descriptor)) || (defined(VK_KHR_push_descriptor) && defined(VK_VERSION_1_1)) || (defined(VK_KHR_push_descriptor) && defined(VK_KHR_descriptor_update_template))
PFN_vkCmdPushDescriptorSetWithTemplateKHR vkCmdPushDescriptorSetWithTemplateKHR;
#endif /* (defined(VK_KHR_descriptor_update_template) && defined(VK_KHR_push_descriptor)) || (defined(VK_KHR_push_descriptor) && defined(VK_VERSION_1_1)) */
#endif /* (defined(VK_KHR_descriptor_update_template) && defined(VK_KHR_push_descriptor)) || (defined(VK_KHR_push_descriptor) && defined(VK_VERSION_1_1)) || (defined(VK_KHR_push_descriptor) && defined(VK_KHR_descriptor_update_template)) */
#if (defined(VK_KHR_device_group) && defined(VK_KHR_surface)) || (defined(VK_KHR_swapchain) && defined(VK_VERSION_1_1))
PFN_vkGetDeviceGroupPresentCapabilitiesKHR vkGetDeviceGroupPresentCapabilitiesKHR;
PFN_vkGetDeviceGroupSurfacePresentModesKHR vkGetDeviceGroupSurfacePresentModesKHR;
@ -610,9 +857,27 @@ extern PFN_vkGetPhysicalDeviceSparseImageFormatProperties2 vkGetPhysicalDeviceSp
extern PFN_vkTrimCommandPool vkTrimCommandPool;
extern PFN_vkUpdateDescriptorSetWithTemplate vkUpdateDescriptorSetWithTemplate;
#endif /* defined(VK_VERSION_1_1) */
#if defined(VK_VERSION_1_2)
extern PFN_vkCmdBeginRenderPass2 vkCmdBeginRenderPass2;
extern PFN_vkCmdDrawIndexedIndirectCount vkCmdDrawIndexedIndirectCount;
extern PFN_vkCmdDrawIndirectCount vkCmdDrawIndirectCount;
extern PFN_vkCmdEndRenderPass2 vkCmdEndRenderPass2;
extern PFN_vkCmdNextSubpass2 vkCmdNextSubpass2;
extern PFN_vkCreateRenderPass2 vkCreateRenderPass2;
extern PFN_vkGetBufferDeviceAddress vkGetBufferDeviceAddress;
extern PFN_vkGetBufferOpaqueCaptureAddress vkGetBufferOpaqueCaptureAddress;
extern PFN_vkGetDeviceMemoryOpaqueCaptureAddress vkGetDeviceMemoryOpaqueCaptureAddress;
extern PFN_vkGetSemaphoreCounterValue vkGetSemaphoreCounterValue;
extern PFN_vkResetQueryPool vkResetQueryPool;
extern PFN_vkSignalSemaphore vkSignalSemaphore;
extern PFN_vkWaitSemaphores vkWaitSemaphores;
#endif /* defined(VK_VERSION_1_2) */
#if defined(VK_AMD_buffer_marker)
extern PFN_vkCmdWriteBufferMarkerAMD vkCmdWriteBufferMarkerAMD;
#endif /* defined(VK_AMD_buffer_marker) */
#if defined(VK_AMD_display_native_hdr)
extern PFN_vkSetLocalDimmingAMD vkSetLocalDimmingAMD;
#endif /* defined(VK_AMD_display_native_hdr) */
#if defined(VK_AMD_draw_indirect_count)
extern PFN_vkCmdDrawIndexedIndirectCountAMD vkCmdDrawIndexedIndirectCountAMD;
extern PFN_vkCmdDrawIndirectCountAMD vkCmdDrawIndirectCountAMD;
@ -624,6 +889,10 @@ extern PFN_vkGetShaderInfoAMD vkGetShaderInfoAMD;
extern PFN_vkGetAndroidHardwareBufferPropertiesANDROID vkGetAndroidHardwareBufferPropertiesANDROID;
extern PFN_vkGetMemoryAndroidHardwareBufferANDROID vkGetMemoryAndroidHardwareBufferANDROID;
#endif /* defined(VK_ANDROID_external_memory_android_hardware_buffer) */
#if defined(VK_EXT_acquire_drm_display)
extern PFN_vkAcquireDrmDisplayEXT vkAcquireDrmDisplayEXT;
extern PFN_vkGetDrmDisplayEXT vkGetDrmDisplayEXT;
#endif /* defined(VK_EXT_acquire_drm_display) */
#if defined(VK_EXT_acquire_xlib_display)
extern PFN_vkAcquireXlibDisplayEXT vkAcquireXlibDisplayEXT;
extern PFN_vkGetRandROutputDisplayEXT vkGetRandROutputDisplayEXT;
@ -635,6 +904,9 @@ extern PFN_vkGetBufferDeviceAddressEXT vkGetBufferDeviceAddressEXT;
extern PFN_vkGetCalibratedTimestampsEXT vkGetCalibratedTimestampsEXT;
extern PFN_vkGetPhysicalDeviceCalibrateableTimeDomainsEXT vkGetPhysicalDeviceCalibrateableTimeDomainsEXT;
#endif /* defined(VK_EXT_calibrated_timestamps) */
#if defined(VK_EXT_color_write_enable)
extern PFN_vkCmdSetColorWriteEnableEXT vkCmdSetColorWriteEnableEXT;
#endif /* defined(VK_EXT_color_write_enable) */
#if defined(VK_EXT_conditional_rendering)
extern PFN_vkCmdBeginConditionalRenderingEXT vkCmdBeginConditionalRenderingEXT;
extern PFN_vkCmdEndConditionalRenderingEXT vkCmdEndConditionalRenderingEXT;
@ -667,6 +939,10 @@ extern PFN_vkSubmitDebugUtilsMessageEXT vkSubmitDebugUtilsMessageEXT;
#if defined(VK_EXT_direct_mode_display)
extern PFN_vkReleaseDisplayEXT vkReleaseDisplayEXT;
#endif /* defined(VK_EXT_direct_mode_display) */
#if defined(VK_EXT_directfb_surface)
extern PFN_vkCreateDirectFBSurfaceEXT vkCreateDirectFBSurfaceEXT;
extern PFN_vkGetPhysicalDeviceDirectFBPresentationSupportEXT vkGetPhysicalDeviceDirectFBPresentationSupportEXT;
#endif /* defined(VK_EXT_directfb_surface) */
#if defined(VK_EXT_discard_rectangles)
extern PFN_vkCmdSetDiscardRectangleEXT vkCmdSetDiscardRectangleEXT;
#endif /* defined(VK_EXT_discard_rectangles) */
@ -679,22 +955,70 @@ extern PFN_vkRegisterDisplayEventEXT vkRegisterDisplayEventEXT;
#if defined(VK_EXT_display_surface_counter)
extern PFN_vkGetPhysicalDeviceSurfaceCapabilities2EXT vkGetPhysicalDeviceSurfaceCapabilities2EXT;
#endif /* defined(VK_EXT_display_surface_counter) */
#if defined(VK_EXT_extended_dynamic_state)
extern PFN_vkCmdBindVertexBuffers2EXT vkCmdBindVertexBuffers2EXT;
extern PFN_vkCmdSetCullModeEXT vkCmdSetCullModeEXT;
extern PFN_vkCmdSetDepthBoundsTestEnableEXT vkCmdSetDepthBoundsTestEnableEXT;
extern PFN_vkCmdSetDepthCompareOpEXT vkCmdSetDepthCompareOpEXT;
extern PFN_vkCmdSetDepthTestEnableEXT vkCmdSetDepthTestEnableEXT;
extern PFN_vkCmdSetDepthWriteEnableEXT vkCmdSetDepthWriteEnableEXT;
extern PFN_vkCmdSetFrontFaceEXT vkCmdSetFrontFaceEXT;
extern PFN_vkCmdSetPrimitiveTopologyEXT vkCmdSetPrimitiveTopologyEXT;
extern PFN_vkCmdSetScissorWithCountEXT vkCmdSetScissorWithCountEXT;
extern PFN_vkCmdSetStencilOpEXT vkCmdSetStencilOpEXT;
extern PFN_vkCmdSetStencilTestEnableEXT vkCmdSetStencilTestEnableEXT;
extern PFN_vkCmdSetViewportWithCountEXT vkCmdSetViewportWithCountEXT;
#endif /* defined(VK_EXT_extended_dynamic_state) */
#if defined(VK_EXT_extended_dynamic_state2)
extern PFN_vkCmdSetDepthBiasEnableEXT vkCmdSetDepthBiasEnableEXT;
extern PFN_vkCmdSetLogicOpEXT vkCmdSetLogicOpEXT;
extern PFN_vkCmdSetPatchControlPointsEXT vkCmdSetPatchControlPointsEXT;
extern PFN_vkCmdSetPrimitiveRestartEnableEXT vkCmdSetPrimitiveRestartEnableEXT;
extern PFN_vkCmdSetRasterizerDiscardEnableEXT vkCmdSetRasterizerDiscardEnableEXT;
#endif /* defined(VK_EXT_extended_dynamic_state2) */
#if defined(VK_EXT_external_memory_host)
extern PFN_vkGetMemoryHostPointerPropertiesEXT vkGetMemoryHostPointerPropertiesEXT;
#endif /* defined(VK_EXT_external_memory_host) */
#if defined(VK_EXT_full_screen_exclusive)
extern PFN_vkAcquireFullScreenExclusiveModeEXT vkAcquireFullScreenExclusiveModeEXT;
extern PFN_vkGetPhysicalDeviceSurfacePresentModes2EXT vkGetPhysicalDeviceSurfacePresentModes2EXT;
extern PFN_vkReleaseFullScreenExclusiveModeEXT vkReleaseFullScreenExclusiveModeEXT;
#endif /* defined(VK_EXT_full_screen_exclusive) */
#if defined(VK_EXT_hdr_metadata)
extern PFN_vkSetHdrMetadataEXT vkSetHdrMetadataEXT;
#endif /* defined(VK_EXT_hdr_metadata) */
#if defined(VK_EXT_headless_surface)
extern PFN_vkCreateHeadlessSurfaceEXT vkCreateHeadlessSurfaceEXT;
#endif /* defined(VK_EXT_headless_surface) */
#if defined(VK_EXT_host_query_reset)
extern PFN_vkResetQueryPoolEXT vkResetQueryPoolEXT;
#endif /* defined(VK_EXT_host_query_reset) */
#if defined(VK_EXT_image_drm_format_modifier)
extern PFN_vkGetImageDrmFormatModifierPropertiesEXT vkGetImageDrmFormatModifierPropertiesEXT;
#endif /* defined(VK_EXT_image_drm_format_modifier) */
#if defined(VK_EXT_line_rasterization)
extern PFN_vkCmdSetLineStippleEXT vkCmdSetLineStippleEXT;
#endif /* defined(VK_EXT_line_rasterization) */
#if defined(VK_EXT_metal_surface)
extern PFN_vkCreateMetalSurfaceEXT vkCreateMetalSurfaceEXT;
#endif /* defined(VK_EXT_metal_surface) */
#if defined(VK_EXT_multi_draw)
extern PFN_vkCmdDrawMultiEXT vkCmdDrawMultiEXT;
extern PFN_vkCmdDrawMultiIndexedEXT vkCmdDrawMultiIndexedEXT;
#endif /* defined(VK_EXT_multi_draw) */
#if defined(VK_EXT_private_data)
extern PFN_vkCreatePrivateDataSlotEXT vkCreatePrivateDataSlotEXT;
extern PFN_vkDestroyPrivateDataSlotEXT vkDestroyPrivateDataSlotEXT;
extern PFN_vkGetPrivateDataEXT vkGetPrivateDataEXT;
extern PFN_vkSetPrivateDataEXT vkSetPrivateDataEXT;
#endif /* defined(VK_EXT_private_data) */
#if defined(VK_EXT_sample_locations)
extern PFN_vkCmdSetSampleLocationsEXT vkCmdSetSampleLocationsEXT;
extern PFN_vkGetPhysicalDeviceMultisamplePropertiesEXT vkGetPhysicalDeviceMultisamplePropertiesEXT;
#endif /* defined(VK_EXT_sample_locations) */
#if defined(VK_EXT_tooling_info)
extern PFN_vkGetPhysicalDeviceToolPropertiesEXT vkGetPhysicalDeviceToolPropertiesEXT;
#endif /* defined(VK_EXT_tooling_info) */
#if defined(VK_EXT_transform_feedback)
extern PFN_vkCmdBeginQueryIndexedEXT vkCmdBeginQueryIndexedEXT;
extern PFN_vkCmdBeginTransformFeedbackEXT vkCmdBeginTransformFeedbackEXT;
@ -709,13 +1033,63 @@ extern PFN_vkDestroyValidationCacheEXT vkDestroyValidationCacheEXT;
extern PFN_vkGetValidationCacheDataEXT vkGetValidationCacheDataEXT;
extern PFN_vkMergeValidationCachesEXT vkMergeValidationCachesEXT;
#endif /* defined(VK_EXT_validation_cache) */
#if defined(VK_EXT_vertex_input_dynamic_state)
extern PFN_vkCmdSetVertexInputEXT vkCmdSetVertexInputEXT;
#endif /* defined(VK_EXT_vertex_input_dynamic_state) */
#if defined(VK_FUCHSIA_external_memory)
extern PFN_vkGetMemoryZirconHandleFUCHSIA vkGetMemoryZirconHandleFUCHSIA;
extern PFN_vkGetMemoryZirconHandlePropertiesFUCHSIA vkGetMemoryZirconHandlePropertiesFUCHSIA;
#endif /* defined(VK_FUCHSIA_external_memory) */
#if defined(VK_FUCHSIA_external_semaphore)
extern PFN_vkGetSemaphoreZirconHandleFUCHSIA vkGetSemaphoreZirconHandleFUCHSIA;
extern PFN_vkImportSemaphoreZirconHandleFUCHSIA vkImportSemaphoreZirconHandleFUCHSIA;
#endif /* defined(VK_FUCHSIA_external_semaphore) */
#if defined(VK_FUCHSIA_imagepipe_surface)
extern PFN_vkCreateImagePipeSurfaceFUCHSIA vkCreateImagePipeSurfaceFUCHSIA;
#endif /* defined(VK_FUCHSIA_imagepipe_surface) */
#if defined(VK_GGP_stream_descriptor_surface)
extern PFN_vkCreateStreamDescriptorSurfaceGGP vkCreateStreamDescriptorSurfaceGGP;
#endif /* defined(VK_GGP_stream_descriptor_surface) */
#if defined(VK_GOOGLE_display_timing)
extern PFN_vkGetPastPresentationTimingGOOGLE vkGetPastPresentationTimingGOOGLE;
extern PFN_vkGetRefreshCycleDurationGOOGLE vkGetRefreshCycleDurationGOOGLE;
#endif /* defined(VK_GOOGLE_display_timing) */
#if defined(VK_HUAWEI_invocation_mask)
extern PFN_vkCmdBindInvocationMaskHUAWEI vkCmdBindInvocationMaskHUAWEI;
#endif /* defined(VK_HUAWEI_invocation_mask) */
#if defined(VK_HUAWEI_subpass_shading)
extern PFN_vkCmdSubpassShadingHUAWEI vkCmdSubpassShadingHUAWEI;
extern PFN_vkGetDeviceSubpassShadingMaxWorkgroupSizeHUAWEI vkGetDeviceSubpassShadingMaxWorkgroupSizeHUAWEI;
#endif /* defined(VK_HUAWEI_subpass_shading) */
#if defined(VK_INTEL_performance_query)
extern PFN_vkAcquirePerformanceConfigurationINTEL vkAcquirePerformanceConfigurationINTEL;
extern PFN_vkCmdSetPerformanceMarkerINTEL vkCmdSetPerformanceMarkerINTEL;
extern PFN_vkCmdSetPerformanceOverrideINTEL vkCmdSetPerformanceOverrideINTEL;
extern PFN_vkCmdSetPerformanceStreamMarkerINTEL vkCmdSetPerformanceStreamMarkerINTEL;
extern PFN_vkGetPerformanceParameterINTEL vkGetPerformanceParameterINTEL;
extern PFN_vkInitializePerformanceApiINTEL vkInitializePerformanceApiINTEL;
extern PFN_vkQueueSetPerformanceConfigurationINTEL vkQueueSetPerformanceConfigurationINTEL;
extern PFN_vkReleasePerformanceConfigurationINTEL vkReleasePerformanceConfigurationINTEL;
extern PFN_vkUninitializePerformanceApiINTEL vkUninitializePerformanceApiINTEL;
#endif /* defined(VK_INTEL_performance_query) */
#if defined(VK_KHR_acceleration_structure)
extern PFN_vkBuildAccelerationStructuresKHR vkBuildAccelerationStructuresKHR;
extern PFN_vkCmdBuildAccelerationStructuresIndirectKHR vkCmdBuildAccelerationStructuresIndirectKHR;
extern PFN_vkCmdBuildAccelerationStructuresKHR vkCmdBuildAccelerationStructuresKHR;
extern PFN_vkCmdCopyAccelerationStructureKHR vkCmdCopyAccelerationStructureKHR;
extern PFN_vkCmdCopyAccelerationStructureToMemoryKHR vkCmdCopyAccelerationStructureToMemoryKHR;
extern PFN_vkCmdCopyMemoryToAccelerationStructureKHR vkCmdCopyMemoryToAccelerationStructureKHR;
extern PFN_vkCmdWriteAccelerationStructuresPropertiesKHR vkCmdWriteAccelerationStructuresPropertiesKHR;
extern PFN_vkCopyAccelerationStructureKHR vkCopyAccelerationStructureKHR;
extern PFN_vkCopyAccelerationStructureToMemoryKHR vkCopyAccelerationStructureToMemoryKHR;
extern PFN_vkCopyMemoryToAccelerationStructureKHR vkCopyMemoryToAccelerationStructureKHR;
extern PFN_vkCreateAccelerationStructureKHR vkCreateAccelerationStructureKHR;
extern PFN_vkDestroyAccelerationStructureKHR vkDestroyAccelerationStructureKHR;
extern PFN_vkGetAccelerationStructureBuildSizesKHR vkGetAccelerationStructureBuildSizesKHR;
extern PFN_vkGetAccelerationStructureDeviceAddressKHR vkGetAccelerationStructureDeviceAddressKHR;
extern PFN_vkGetDeviceAccelerationStructureCompatibilityKHR vkGetDeviceAccelerationStructureCompatibilityKHR;
extern PFN_vkWriteAccelerationStructuresPropertiesKHR vkWriteAccelerationStructuresPropertiesKHR;
#endif /* defined(VK_KHR_acceleration_structure) */
#if defined(VK_KHR_android_surface)
extern PFN_vkCreateAndroidSurfaceKHR vkCreateAndroidSurfaceKHR;
#endif /* defined(VK_KHR_android_surface) */
@ -723,12 +1097,32 @@ extern PFN_vkCreateAndroidSurfaceKHR vkCreateAndroidSurfaceKHR;
extern PFN_vkBindBufferMemory2KHR vkBindBufferMemory2KHR;
extern PFN_vkBindImageMemory2KHR vkBindImageMemory2KHR;
#endif /* defined(VK_KHR_bind_memory2) */
#if defined(VK_KHR_buffer_device_address)
extern PFN_vkGetBufferDeviceAddressKHR vkGetBufferDeviceAddressKHR;
extern PFN_vkGetBufferOpaqueCaptureAddressKHR vkGetBufferOpaqueCaptureAddressKHR;
extern PFN_vkGetDeviceMemoryOpaqueCaptureAddressKHR vkGetDeviceMemoryOpaqueCaptureAddressKHR;
#endif /* defined(VK_KHR_buffer_device_address) */
#if defined(VK_KHR_copy_commands2)
extern PFN_vkCmdBlitImage2KHR vkCmdBlitImage2KHR;
extern PFN_vkCmdCopyBuffer2KHR vkCmdCopyBuffer2KHR;
extern PFN_vkCmdCopyBufferToImage2KHR vkCmdCopyBufferToImage2KHR;
extern PFN_vkCmdCopyImage2KHR vkCmdCopyImage2KHR;
extern PFN_vkCmdCopyImageToBuffer2KHR vkCmdCopyImageToBuffer2KHR;
extern PFN_vkCmdResolveImage2KHR vkCmdResolveImage2KHR;
#endif /* defined(VK_KHR_copy_commands2) */
#if defined(VK_KHR_create_renderpass2)
extern PFN_vkCmdBeginRenderPass2KHR vkCmdBeginRenderPass2KHR;
extern PFN_vkCmdEndRenderPass2KHR vkCmdEndRenderPass2KHR;
extern PFN_vkCmdNextSubpass2KHR vkCmdNextSubpass2KHR;
extern PFN_vkCreateRenderPass2KHR vkCreateRenderPass2KHR;
#endif /* defined(VK_KHR_create_renderpass2) */
#if defined(VK_KHR_deferred_host_operations)
extern PFN_vkCreateDeferredOperationKHR vkCreateDeferredOperationKHR;
extern PFN_vkDeferredOperationJoinKHR vkDeferredOperationJoinKHR;
extern PFN_vkDestroyDeferredOperationKHR vkDestroyDeferredOperationKHR;
extern PFN_vkGetDeferredOperationMaxConcurrencyKHR vkGetDeferredOperationMaxConcurrencyKHR;
extern PFN_vkGetDeferredOperationResultKHR vkGetDeferredOperationResultKHR;
#endif /* defined(VK_KHR_deferred_host_operations) */
#if defined(VK_KHR_descriptor_update_template)
extern PFN_vkCreateDescriptorUpdateTemplateKHR vkCreateDescriptorUpdateTemplateKHR;
extern PFN_vkDestroyDescriptorUpdateTemplateKHR vkDestroyDescriptorUpdateTemplateKHR;
@ -791,6 +1185,10 @@ extern PFN_vkImportSemaphoreFdKHR vkImportSemaphoreFdKHR;
extern PFN_vkGetSemaphoreWin32HandleKHR vkGetSemaphoreWin32HandleKHR;
extern PFN_vkImportSemaphoreWin32HandleKHR vkImportSemaphoreWin32HandleKHR;
#endif /* defined(VK_KHR_external_semaphore_win32) */
#if defined(VK_KHR_fragment_shading_rate)
extern PFN_vkCmdSetFragmentShadingRateKHR vkCmdSetFragmentShadingRateKHR;
extern PFN_vkGetPhysicalDeviceFragmentShadingRatesKHR vkGetPhysicalDeviceFragmentShadingRatesKHR;
#endif /* defined(VK_KHR_fragment_shading_rate) */
#if defined(VK_KHR_get_display_properties2)
extern PFN_vkGetDisplayModeProperties2KHR vkGetDisplayModeProperties2KHR;
extern PFN_vkGetDisplayPlaneCapabilities2KHR vkGetDisplayPlaneCapabilities2KHR;
@ -821,9 +1219,32 @@ extern PFN_vkTrimCommandPoolKHR vkTrimCommandPoolKHR;
#if defined(VK_KHR_maintenance3)
extern PFN_vkGetDescriptorSetLayoutSupportKHR vkGetDescriptorSetLayoutSupportKHR;
#endif /* defined(VK_KHR_maintenance3) */
#if defined(VK_KHR_performance_query)
extern PFN_vkAcquireProfilingLockKHR vkAcquireProfilingLockKHR;
extern PFN_vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR vkEnumeratePhysicalDeviceQueueFamilyPerformanceQueryCountersKHR;
extern PFN_vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR vkGetPhysicalDeviceQueueFamilyPerformanceQueryPassesKHR;
extern PFN_vkReleaseProfilingLockKHR vkReleaseProfilingLockKHR;
#endif /* defined(VK_KHR_performance_query) */
#if defined(VK_KHR_pipeline_executable_properties)
extern PFN_vkGetPipelineExecutableInternalRepresentationsKHR vkGetPipelineExecutableInternalRepresentationsKHR;
extern PFN_vkGetPipelineExecutablePropertiesKHR vkGetPipelineExecutablePropertiesKHR;
extern PFN_vkGetPipelineExecutableStatisticsKHR vkGetPipelineExecutableStatisticsKHR;
#endif /* defined(VK_KHR_pipeline_executable_properties) */
#if defined(VK_KHR_present_wait)
extern PFN_vkWaitForPresentKHR vkWaitForPresentKHR;
#endif /* defined(VK_KHR_present_wait) */
#if defined(VK_KHR_push_descriptor)
extern PFN_vkCmdPushDescriptorSetKHR vkCmdPushDescriptorSetKHR;
#endif /* defined(VK_KHR_push_descriptor) */
#if defined(VK_KHR_ray_tracing_pipeline)
extern PFN_vkCmdSetRayTracingPipelineStackSizeKHR vkCmdSetRayTracingPipelineStackSizeKHR;
extern PFN_vkCmdTraceRaysIndirectKHR vkCmdTraceRaysIndirectKHR;
extern PFN_vkCmdTraceRaysKHR vkCmdTraceRaysKHR;
extern PFN_vkCreateRayTracingPipelinesKHR vkCreateRayTracingPipelinesKHR;
extern PFN_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR vkGetRayTracingCaptureReplayShaderGroupHandlesKHR;
extern PFN_vkGetRayTracingShaderGroupHandlesKHR vkGetRayTracingShaderGroupHandlesKHR;
extern PFN_vkGetRayTracingShaderGroupStackSizeKHR vkGetRayTracingShaderGroupStackSizeKHR;
#endif /* defined(VK_KHR_ray_tracing_pipeline) */
#if defined(VK_KHR_sampler_ycbcr_conversion)
extern PFN_vkCreateSamplerYcbcrConversionKHR vkCreateSamplerYcbcrConversionKHR;
extern PFN_vkDestroySamplerYcbcrConversionKHR vkDestroySamplerYcbcrConversionKHR;
@ -845,6 +1266,45 @@ extern PFN_vkDestroySwapchainKHR vkDestroySwapchainKHR;
extern PFN_vkGetSwapchainImagesKHR vkGetSwapchainImagesKHR;
extern PFN_vkQueuePresentKHR vkQueuePresentKHR;
#endif /* defined(VK_KHR_swapchain) */
#if defined(VK_KHR_synchronization2)
extern PFN_vkCmdPipelineBarrier2KHR vkCmdPipelineBarrier2KHR;
extern PFN_vkCmdResetEvent2KHR vkCmdResetEvent2KHR;
extern PFN_vkCmdSetEvent2KHR vkCmdSetEvent2KHR;
extern PFN_vkCmdWaitEvents2KHR vkCmdWaitEvents2KHR;
extern PFN_vkCmdWriteTimestamp2KHR vkCmdWriteTimestamp2KHR;
extern PFN_vkQueueSubmit2KHR vkQueueSubmit2KHR;
#endif /* defined(VK_KHR_synchronization2) */
#if defined(VK_KHR_synchronization2) && defined(VK_AMD_buffer_marker)
extern PFN_vkCmdWriteBufferMarker2AMD vkCmdWriteBufferMarker2AMD;
#endif /* defined(VK_KHR_synchronization2) && defined(VK_AMD_buffer_marker) */
#if defined(VK_KHR_synchronization2) && defined(VK_NV_device_diagnostic_checkpoints)
extern PFN_vkGetQueueCheckpointData2NV vkGetQueueCheckpointData2NV;
#endif /* defined(VK_KHR_synchronization2) && defined(VK_NV_device_diagnostic_checkpoints) */
#if defined(VK_KHR_timeline_semaphore)
extern PFN_vkGetSemaphoreCounterValueKHR vkGetSemaphoreCounterValueKHR;
extern PFN_vkSignalSemaphoreKHR vkSignalSemaphoreKHR;
extern PFN_vkWaitSemaphoresKHR vkWaitSemaphoresKHR;
#endif /* defined(VK_KHR_timeline_semaphore) */
#if defined(VK_KHR_video_decode_queue)
extern PFN_vkCmdDecodeVideoKHR vkCmdDecodeVideoKHR;
#endif /* defined(VK_KHR_video_decode_queue) */
#if defined(VK_KHR_video_encode_queue)
extern PFN_vkCmdEncodeVideoKHR vkCmdEncodeVideoKHR;
#endif /* defined(VK_KHR_video_encode_queue) */
#if defined(VK_KHR_video_queue)
extern PFN_vkBindVideoSessionMemoryKHR vkBindVideoSessionMemoryKHR;
extern PFN_vkCmdBeginVideoCodingKHR vkCmdBeginVideoCodingKHR;
extern PFN_vkCmdControlVideoCodingKHR vkCmdControlVideoCodingKHR;
extern PFN_vkCmdEndVideoCodingKHR vkCmdEndVideoCodingKHR;
extern PFN_vkCreateVideoSessionKHR vkCreateVideoSessionKHR;
extern PFN_vkCreateVideoSessionParametersKHR vkCreateVideoSessionParametersKHR;
extern PFN_vkDestroyVideoSessionKHR vkDestroyVideoSessionKHR;
extern PFN_vkDestroyVideoSessionParametersKHR vkDestroyVideoSessionParametersKHR;
extern PFN_vkGetPhysicalDeviceVideoCapabilitiesKHR vkGetPhysicalDeviceVideoCapabilitiesKHR;
extern PFN_vkGetPhysicalDeviceVideoFormatPropertiesKHR vkGetPhysicalDeviceVideoFormatPropertiesKHR;
extern PFN_vkGetVideoSessionMemoryRequirementsKHR vkGetVideoSessionMemoryRequirementsKHR;
extern PFN_vkUpdateVideoSessionParametersKHR vkUpdateVideoSessionParametersKHR;
#endif /* defined(VK_KHR_video_queue) */
#if defined(VK_KHR_wayland_surface)
extern PFN_vkCreateWaylandSurfaceKHR vkCreateWaylandSurfaceKHR;
extern PFN_vkGetPhysicalDeviceWaylandPresentationSupportKHR vkGetPhysicalDeviceWaylandPresentationSupportKHR;
@ -870,36 +1330,54 @@ extern PFN_vkCreateMacOSSurfaceMVK vkCreateMacOSSurfaceMVK;
#if defined(VK_NN_vi_surface)
extern PFN_vkCreateViSurfaceNN vkCreateViSurfaceNN;
#endif /* defined(VK_NN_vi_surface) */
#if defined(VK_NVX_device_generated_commands)
extern PFN_vkCmdProcessCommandsNVX vkCmdProcessCommandsNVX;
extern PFN_vkCmdReserveSpaceForCommandsNVX vkCmdReserveSpaceForCommandsNVX;
extern PFN_vkCreateIndirectCommandsLayoutNVX vkCreateIndirectCommandsLayoutNVX;
extern PFN_vkCreateObjectTableNVX vkCreateObjectTableNVX;
extern PFN_vkDestroyIndirectCommandsLayoutNVX vkDestroyIndirectCommandsLayoutNVX;
extern PFN_vkDestroyObjectTableNVX vkDestroyObjectTableNVX;
extern PFN_vkGetPhysicalDeviceGeneratedCommandsPropertiesNVX vkGetPhysicalDeviceGeneratedCommandsPropertiesNVX;
extern PFN_vkRegisterObjectsNVX vkRegisterObjectsNVX;
extern PFN_vkUnregisterObjectsNVX vkUnregisterObjectsNVX;
#endif /* defined(VK_NVX_device_generated_commands) */
#if defined(VK_NVX_binary_import)
extern PFN_vkCmdCuLaunchKernelNVX vkCmdCuLaunchKernelNVX;
extern PFN_vkCreateCuFunctionNVX vkCreateCuFunctionNVX;
extern PFN_vkCreateCuModuleNVX vkCreateCuModuleNVX;
extern PFN_vkDestroyCuFunctionNVX vkDestroyCuFunctionNVX;
extern PFN_vkDestroyCuModuleNVX vkDestroyCuModuleNVX;
#endif /* defined(VK_NVX_binary_import) */
#if defined(VK_NVX_image_view_handle)
extern PFN_vkGetImageViewAddressNVX vkGetImageViewAddressNVX;
extern PFN_vkGetImageViewHandleNVX vkGetImageViewHandleNVX;
#endif /* defined(VK_NVX_image_view_handle) */
#if defined(VK_NV_acquire_winrt_display)
extern PFN_vkAcquireWinrtDisplayNV vkAcquireWinrtDisplayNV;
extern PFN_vkGetWinrtDisplayNV vkGetWinrtDisplayNV;
#endif /* defined(VK_NV_acquire_winrt_display) */
#if defined(VK_NV_clip_space_w_scaling)
extern PFN_vkCmdSetViewportWScalingNV vkCmdSetViewportWScalingNV;
#endif /* defined(VK_NV_clip_space_w_scaling) */
#if defined(VK_NV_cooperative_matrix)
extern PFN_vkGetPhysicalDeviceCooperativeMatrixPropertiesNV vkGetPhysicalDeviceCooperativeMatrixPropertiesNV;
#endif /* defined(VK_NV_cooperative_matrix) */
#if defined(VK_NV_coverage_reduction_mode)
extern PFN_vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV;
#endif /* defined(VK_NV_coverage_reduction_mode) */
#if defined(VK_NV_device_diagnostic_checkpoints)
extern PFN_vkCmdSetCheckpointNV vkCmdSetCheckpointNV;
extern PFN_vkGetQueueCheckpointDataNV vkGetQueueCheckpointDataNV;
#endif /* defined(VK_NV_device_diagnostic_checkpoints) */
#if defined(VK_NV_device_generated_commands)
extern PFN_vkCmdBindPipelineShaderGroupNV vkCmdBindPipelineShaderGroupNV;
extern PFN_vkCmdExecuteGeneratedCommandsNV vkCmdExecuteGeneratedCommandsNV;
extern PFN_vkCmdPreprocessGeneratedCommandsNV vkCmdPreprocessGeneratedCommandsNV;
extern PFN_vkCreateIndirectCommandsLayoutNV vkCreateIndirectCommandsLayoutNV;
extern PFN_vkDestroyIndirectCommandsLayoutNV vkDestroyIndirectCommandsLayoutNV;
extern PFN_vkGetGeneratedCommandsMemoryRequirementsNV vkGetGeneratedCommandsMemoryRequirementsNV;
#endif /* defined(VK_NV_device_generated_commands) */
#if defined(VK_NV_external_memory_capabilities)
extern PFN_vkGetPhysicalDeviceExternalImageFormatPropertiesNV vkGetPhysicalDeviceExternalImageFormatPropertiesNV;
#endif /* defined(VK_NV_external_memory_capabilities) */
#if defined(VK_NV_external_memory_rdma)
extern PFN_vkGetMemoryRemoteAddressNV vkGetMemoryRemoteAddressNV;
#endif /* defined(VK_NV_external_memory_rdma) */
#if defined(VK_NV_external_memory_win32)
extern PFN_vkGetMemoryWin32HandleNV vkGetMemoryWin32HandleNV;
#endif /* defined(VK_NV_external_memory_win32) */
#if defined(VK_NV_fragment_shading_rate_enums)
extern PFN_vkCmdSetFragmentShadingRateEnumNV vkCmdSetFragmentShadingRateEnumNV;
#endif /* defined(VK_NV_fragment_shading_rate_enums) */
#if defined(VK_NV_mesh_shader)
extern PFN_vkCmdDrawMeshTasksIndirectCountNV vkCmdDrawMeshTasksIndirectCountNV;
extern PFN_vkCmdDrawMeshTasksIndirectNV vkCmdDrawMeshTasksIndirectNV;
@ -927,9 +1405,16 @@ extern PFN_vkCmdBindShadingRateImageNV vkCmdBindShadingRateImageNV;
extern PFN_vkCmdSetCoarseSampleOrderNV vkCmdSetCoarseSampleOrderNV;
extern PFN_vkCmdSetViewportShadingRatePaletteNV vkCmdSetViewportShadingRatePaletteNV;
#endif /* defined(VK_NV_shading_rate_image) */
#if (defined(VK_KHR_descriptor_update_template) && defined(VK_KHR_push_descriptor)) || (defined(VK_KHR_push_descriptor) && defined(VK_VERSION_1_1))
#if defined(VK_QNX_screen_surface)
extern PFN_vkCreateScreenSurfaceQNX vkCreateScreenSurfaceQNX;
extern PFN_vkGetPhysicalDeviceScreenPresentationSupportQNX vkGetPhysicalDeviceScreenPresentationSupportQNX;
#endif /* defined(VK_QNX_screen_surface) */
#if (defined(VK_EXT_full_screen_exclusive) && defined(VK_KHR_device_group)) || (defined(VK_EXT_full_screen_exclusive) && defined(VK_VERSION_1_1))
extern PFN_vkGetDeviceGroupSurfacePresentModes2EXT vkGetDeviceGroupSurfacePresentModes2EXT;
#endif /* (defined(VK_EXT_full_screen_exclusive) && defined(VK_KHR_device_group)) || (defined(VK_EXT_full_screen_exclusive) && defined(VK_VERSION_1_1)) */
#if (defined(VK_KHR_descriptor_update_template) && defined(VK_KHR_push_descriptor)) || (defined(VK_KHR_push_descriptor) && defined(VK_VERSION_1_1)) || (defined(VK_KHR_push_descriptor) && defined(VK_KHR_descriptor_update_template))
extern PFN_vkCmdPushDescriptorSetWithTemplateKHR vkCmdPushDescriptorSetWithTemplateKHR;
#endif /* (defined(VK_KHR_descriptor_update_template) && defined(VK_KHR_push_descriptor)) || (defined(VK_KHR_push_descriptor) && defined(VK_VERSION_1_1)) */
#endif /* (defined(VK_KHR_descriptor_update_template) && defined(VK_KHR_push_descriptor)) || (defined(VK_KHR_push_descriptor) && defined(VK_VERSION_1_1)) || (defined(VK_KHR_push_descriptor) && defined(VK_KHR_descriptor_update_template)) */
#if (defined(VK_KHR_device_group) && defined(VK_KHR_surface)) || (defined(VK_KHR_swapchain) && defined(VK_VERSION_1_1))
extern PFN_vkGetDeviceGroupPresentCapabilitiesKHR vkGetDeviceGroupPresentCapabilitiesKHR;
extern PFN_vkGetDeviceGroupSurfacePresentModesKHR vkGetDeviceGroupSurfacePresentModesKHR;
@ -946,6 +1431,14 @@ extern PFN_vkAcquireNextImage2KHR vkAcquireNextImage2KHR;
#endif
#ifdef VOLK_IMPLEMENTATION
#undef VOLK_IMPLEMENTATION
// Prevent tools like dependency checkers that don't evaluate
// macros from detecting a cyclic dependency.
#define VOLK_SOURCE "volk.c"
#include VOLK_SOURCE
#endif
/**
* Copyright (c) 2018-2019 Arseny Kapoulkine
*
@ -967,3 +1460,4 @@ extern PFN_vkAcquireNextImage2KHR vkAcquireNextImage2KHR;
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/* clang-format on */

View file

@ -0,0 +1,519 @@
#pragma once
#include "vulkanobjects.h"
#include <cassert>
#include <set>
class VulkanCompatibleDevice;
class VulkanInstanceBuilder
{
public:
VulkanInstanceBuilder();
VulkanInstanceBuilder& ApiVersionsToTry(const std::vector<uint32_t>& versions);
VulkanInstanceBuilder& RequireExtension(const std::string& extensionName);
VulkanInstanceBuilder& RequireSurfaceExtensions(bool enable = true);
VulkanInstanceBuilder& OptionalExtension(const std::string& extensionName);
VulkanInstanceBuilder& DebugLayer(bool enable = true);
std::shared_ptr<VulkanInstance> Create();
private:
std::vector<uint32_t> apiVersionsToTry;
std::set<std::string> requiredExtensions;
std::set<std::string> optionalExtensions;
bool debugLayer = false;
};
#ifdef VK_USE_PLATFORM_WIN32_KHR
class VulkanSurfaceBuilder
{
public:
VulkanSurfaceBuilder();
VulkanSurfaceBuilder& Win32Window(HWND handle);
std::shared_ptr<VulkanSurface> Create(std::shared_ptr<VulkanInstance> instance);
private:
HWND hwnd = {};
};
#endif
class VulkanDeviceBuilder
{
public:
VulkanDeviceBuilder();
VulkanDeviceBuilder& RequireExtension(const std::string& extensionName);
VulkanDeviceBuilder& OptionalExtension(const std::string& extensionName);
VulkanDeviceBuilder& OptionalRayQuery();
VulkanDeviceBuilder& OptionalDescriptorIndexing();
VulkanDeviceBuilder& Surface(std::shared_ptr<VulkanSurface> surface);
VulkanDeviceBuilder& SelectDevice(int index);
std::vector<VulkanCompatibleDevice> FindDevices(const std::shared_ptr<VulkanInstance>& instance);
std::shared_ptr<VulkanDevice> Create(std::shared_ptr<VulkanInstance> instance);
private:
std::set<std::string> requiredDeviceExtensions;
std::set<std::string> optionalDeviceExtensions;
std::shared_ptr<VulkanSurface> surface;
int deviceIndex = 0;
};
class VulkanSwapChainBuilder
{
public:
VulkanSwapChainBuilder();
std::shared_ptr<VulkanSwapChain> Create(VulkanDevice* device);
};
class CommandPoolBuilder
{
public:
CommandPoolBuilder();
CommandPoolBuilder& QueueFamily(int index);
CommandPoolBuilder& DebugName(const char* name) { debugName = name; return *this; }
std::unique_ptr<VulkanCommandPool> Create(VulkanDevice* device);
private:
const char* debugName = nullptr;
int queueFamilyIndex = -1;
};
class SemaphoreBuilder
{
public:
SemaphoreBuilder();
SemaphoreBuilder& DebugName(const char* name) { debugName = name; return *this; }
std::unique_ptr<VulkanSemaphore> Create(VulkanDevice* device);
private:
const char* debugName = nullptr;
};
class FenceBuilder
{
public:
FenceBuilder();
FenceBuilder& DebugName(const char* name) { debugName = name; return *this; }
std::unique_ptr<VulkanFence> Create(VulkanDevice* device);
private:
const char* debugName = nullptr;
};
class ImageBuilder
{
public:
ImageBuilder();
ImageBuilder& Size(int width, int height, int miplevels = 1, int arrayLayers = 1);
ImageBuilder& Samples(VkSampleCountFlagBits samples);
ImageBuilder& Format(VkFormat format);
ImageBuilder& Usage(VkImageUsageFlags imageUsage, VmaMemoryUsage memoryUsage = VMA_MEMORY_USAGE_GPU_ONLY, VmaAllocationCreateFlags allocFlags = 0);
ImageBuilder& MemoryType(VkMemoryPropertyFlags requiredFlags, VkMemoryPropertyFlags preferredFlags, uint32_t memoryTypeBits = 0);
ImageBuilder& LinearTiling();
ImageBuilder& DebugName(const char* name) { debugName = name; return *this; }
bool IsFormatSupported(VulkanDevice *device, VkFormatFeatureFlags bufferFeatures = 0);
std::unique_ptr<VulkanImage> Create(VulkanDevice *device, VkDeviceSize* allocatedBytes = nullptr);
std::unique_ptr<VulkanImage> TryCreate(VulkanDevice *device);
private:
VkImageCreateInfo imageInfo = {};
VmaAllocationCreateInfo allocInfo = {};
const char* debugName = nullptr;
};
class ImageViewBuilder
{
public:
ImageViewBuilder();
ImageViewBuilder& Type(VkImageViewType type);
ImageViewBuilder& Image(VulkanImage *image, VkFormat format, VkImageAspectFlags aspectMask = VK_IMAGE_ASPECT_COLOR_BIT);
ImageViewBuilder& DebugName(const char* name) { debugName = name; return *this; }
std::unique_ptr<VulkanImageView> Create(VulkanDevice *device);
private:
VkImageViewCreateInfo viewInfo = {};
const char* debugName = nullptr;
};
class SamplerBuilder
{
public:
SamplerBuilder();
SamplerBuilder& AddressMode(VkSamplerAddressMode addressMode);
SamplerBuilder& AddressMode(VkSamplerAddressMode u, VkSamplerAddressMode v, VkSamplerAddressMode w);
SamplerBuilder& MinFilter(VkFilter minFilter);
SamplerBuilder& MagFilter(VkFilter magFilter);
SamplerBuilder& MipmapMode(VkSamplerMipmapMode mode);
SamplerBuilder& Anisotropy(float maxAnisotropy);
SamplerBuilder& MipLodBias(float bias);
SamplerBuilder& MaxLod(float value);
SamplerBuilder& DebugName(const char* name) { debugName = name; return *this; }
std::unique_ptr<VulkanSampler> Create(VulkanDevice *device);
private:
VkSamplerCreateInfo samplerInfo = {};
const char* debugName = nullptr;
};
class BufferBuilder
{
public:
BufferBuilder();
BufferBuilder& Size(size_t size);
BufferBuilder& Usage(VkBufferUsageFlags bufferUsage, VmaMemoryUsage memoryUsage = VMA_MEMORY_USAGE_GPU_ONLY, VmaAllocationCreateFlags allocFlags = 0);
BufferBuilder& MemoryType(VkMemoryPropertyFlags requiredFlags, VkMemoryPropertyFlags preferredFlags, uint32_t memoryTypeBits = 0);
BufferBuilder& DebugName(const char* name) { debugName = name; return *this; }
std::unique_ptr<VulkanBuffer> Create(VulkanDevice *device);
private:
VkBufferCreateInfo bufferInfo = {};
VmaAllocationCreateInfo allocInfo = {};
const char* debugName = nullptr;
};
class ShaderBuilder
{
public:
ShaderBuilder();
static void Init();
static void Deinit();
ShaderBuilder& VertexShader(const std::string &code);
ShaderBuilder& FragmentShader(const std::string&code);
ShaderBuilder& DebugName(const char* name) { debugName = name; return *this; }
std::unique_ptr<VulkanShader> Create(const char *shadername, VulkanDevice *device);
private:
std::string code;
int stage = 0;
const char* debugName = nullptr;
};
class AccelerationStructureBuilder
{
public:
AccelerationStructureBuilder();
AccelerationStructureBuilder& Type(VkAccelerationStructureTypeKHR type);
AccelerationStructureBuilder& Buffer(VulkanBuffer* buffer, VkDeviceSize size);
AccelerationStructureBuilder& Buffer(VulkanBuffer* buffer, VkDeviceSize offset, VkDeviceSize size);
AccelerationStructureBuilder& DebugName(const char* name) { debugName = name; return *this; }
std::unique_ptr<VulkanAccelerationStructure> Create(VulkanDevice* device);
private:
VkAccelerationStructureCreateInfoKHR createInfo = { VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_CREATE_INFO_KHR };
const char* debugName = nullptr;
};
class ComputePipelineBuilder
{
public:
ComputePipelineBuilder();
ComputePipelineBuilder& Cache(VulkanPipelineCache* cache);
ComputePipelineBuilder& Layout(VulkanPipelineLayout *layout);
ComputePipelineBuilder& ComputeShader(VulkanShader *shader);
ComputePipelineBuilder& DebugName(const char* name) { debugName = name; return *this; }
std::unique_ptr<VulkanPipeline> Create(VulkanDevice *device);
private:
VkComputePipelineCreateInfo pipelineInfo = {};
VkPipelineShaderStageCreateInfo stageInfo = {};
VulkanPipelineCache* cache = nullptr;
const char* debugName = nullptr;
};
class DescriptorSetLayoutBuilder
{
public:
DescriptorSetLayoutBuilder();
DescriptorSetLayoutBuilder& Flags(VkDescriptorSetLayoutCreateFlags flags);
DescriptorSetLayoutBuilder& AddBinding(int binding, VkDescriptorType type, int arrayCount, VkShaderStageFlags stageFlags, VkDescriptorBindingFlags flags = 0);
DescriptorSetLayoutBuilder& DebugName(const char* name) { debugName = name; return *this; }
std::unique_ptr<VulkanDescriptorSetLayout> Create(VulkanDevice *device);
private:
VkDescriptorSetLayoutCreateInfo layoutInfo = { VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO };
VkDescriptorSetLayoutBindingFlagsCreateInfoEXT bindingFlagsInfo = { VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO_EXT };
std::vector<VkDescriptorSetLayoutBinding> bindings;
std::vector<VkDescriptorBindingFlags> bindingFlags;
const char* debugName = nullptr;
};
class DescriptorPoolBuilder
{
public:
DescriptorPoolBuilder();
DescriptorPoolBuilder& Flags(VkDescriptorPoolCreateFlags flags);
DescriptorPoolBuilder& MaxSets(int value);
DescriptorPoolBuilder& AddPoolSize(VkDescriptorType type, int count);
DescriptorPoolBuilder& DebugName(const char* name) { debugName = name; return *this; }
std::unique_ptr<VulkanDescriptorPool> Create(VulkanDevice *device);
private:
std::vector<VkDescriptorPoolSize> poolSizes;
VkDescriptorPoolCreateInfo poolInfo = {};
const char* debugName = nullptr;
};
class QueryPoolBuilder
{
public:
QueryPoolBuilder();
QueryPoolBuilder& QueryType(VkQueryType type, int count, VkQueryPipelineStatisticFlags pipelineStatistics = 0);
QueryPoolBuilder& DebugName(const char* name) { debugName = name; return *this; }
std::unique_ptr<VulkanQueryPool> Create(VulkanDevice *device);
private:
VkQueryPoolCreateInfo poolInfo = {};
const char* debugName = nullptr;
};
class FramebufferBuilder
{
public:
FramebufferBuilder();
FramebufferBuilder& RenderPass(VulkanRenderPass *renderPass);
FramebufferBuilder& AddAttachment(VulkanImageView *view);
FramebufferBuilder& AddAttachment(VkImageView view);
FramebufferBuilder& Size(int width, int height, int layers = 1);
FramebufferBuilder& DebugName(const char* name) { debugName = name; return *this; }
std::unique_ptr<VulkanFramebuffer> Create(VulkanDevice *device);
private:
VkFramebufferCreateInfo framebufferInfo = {};
std::vector<VkImageView> attachments;
const char* debugName = nullptr;
};
class GraphicsPipelineBuilder
{
public:
GraphicsPipelineBuilder();
GraphicsPipelineBuilder& Cache(VulkanPipelineCache* cache);
GraphicsPipelineBuilder& Subpass(int subpass);
GraphicsPipelineBuilder& Layout(VulkanPipelineLayout *layout);
GraphicsPipelineBuilder& RenderPass(VulkanRenderPass *renderPass);
GraphicsPipelineBuilder& Topology(VkPrimitiveTopology topology);
GraphicsPipelineBuilder& Viewport(float x, float y, float width, float height, float minDepth = 0.0f, float maxDepth = 1.0f);
GraphicsPipelineBuilder& Scissor(int x, int y, int width, int height);
GraphicsPipelineBuilder& RasterizationSamples(VkSampleCountFlagBits samples);
GraphicsPipelineBuilder& Cull(VkCullModeFlags cullMode, VkFrontFace frontFace);
GraphicsPipelineBuilder& DepthStencilEnable(bool test, bool write, bool stencil);
GraphicsPipelineBuilder& DepthFunc(VkCompareOp func);
GraphicsPipelineBuilder& DepthClampEnable(bool value);
GraphicsPipelineBuilder& DepthBias(bool enable, float biasConstantFactor, float biasClamp, float biasSlopeFactor);
GraphicsPipelineBuilder& ColorWriteMask(VkColorComponentFlags mask);
GraphicsPipelineBuilder& Stencil(VkStencilOp failOp, VkStencilOp passOp, VkStencilOp depthFailOp, VkCompareOp compareOp, uint32_t compareMask, uint32_t writeMask, uint32_t reference);
GraphicsPipelineBuilder& AdditiveBlendMode();
GraphicsPipelineBuilder& AlphaBlendMode();
GraphicsPipelineBuilder& BlendMode(VkBlendOp op, VkBlendFactor src, VkBlendFactor dst);
GraphicsPipelineBuilder& SubpassColorAttachmentCount(int count);
GraphicsPipelineBuilder& AddVertexShader(VulkanShader *shader);
GraphicsPipelineBuilder& AddFragmentShader(VulkanShader *shader);
GraphicsPipelineBuilder& AddVertexBufferBinding(int index, size_t stride);
GraphicsPipelineBuilder& AddVertexAttribute(int location, int binding, VkFormat format, size_t offset);
GraphicsPipelineBuilder& AddDynamicState(VkDynamicState state);
GraphicsPipelineBuilder& DebugName(const char* name) { debugName = name; return *this; }
std::unique_ptr<VulkanPipeline> Create(VulkanDevice *device);
private:
VkGraphicsPipelineCreateInfo pipelineInfo = { };
VkPipelineVertexInputStateCreateInfo vertexInputInfo = { };
VkPipelineInputAssemblyStateCreateInfo inputAssembly = { };
VkViewport viewport = { };
VkRect2D scissor = { };
VkPipelineViewportStateCreateInfo viewportState = { };
VkPipelineRasterizationStateCreateInfo rasterizer = { };
VkPipelineMultisampleStateCreateInfo multisampling = { };
VkPipelineColorBlendAttachmentState colorBlendAttachment = { };
VkPipelineColorBlendStateCreateInfo colorBlending = { };
VkPipelineDepthStencilStateCreateInfo depthStencil = { };
VkPipelineDynamicStateCreateInfo dynamicState = {};
std::vector<VkPipelineShaderStageCreateInfo> shaderStages;
std::vector<VkPipelineColorBlendAttachmentState> colorBlendAttachments;
std::vector<VkVertexInputBindingDescription> vertexInputBindings;
std::vector<VkVertexInputAttributeDescription> vertexInputAttributes;
std::vector<VkDynamicState> dynamicStates;
VulkanPipelineCache* cache = nullptr;
const char* debugName = nullptr;
};
class PipelineLayoutBuilder
{
public:
PipelineLayoutBuilder();
PipelineLayoutBuilder& AddSetLayout(VulkanDescriptorSetLayout *setLayout);
PipelineLayoutBuilder& AddPushConstantRange(VkShaderStageFlags stageFlags, size_t offset, size_t size);
PipelineLayoutBuilder& DebugName(const char* name) { debugName = name; return *this; }
std::unique_ptr<VulkanPipelineLayout> Create(VulkanDevice *device);
private:
VkPipelineLayoutCreateInfo pipelineLayoutInfo = {};
std::vector<VkDescriptorSetLayout> setLayouts;
std::vector<VkPushConstantRange> pushConstantRanges;
const char* debugName = nullptr;
};
class PipelineCacheBuilder
{
public:
PipelineCacheBuilder();
PipelineCacheBuilder& InitialData(const void* data, size_t size);
PipelineCacheBuilder& Flags(VkPipelineCacheCreateFlags flags);
PipelineCacheBuilder& DebugName(const char* name) { debugName = name; return *this; }
std::unique_ptr<VulkanPipelineCache> Create(VulkanDevice* device);
private:
VkPipelineCacheCreateInfo pipelineCacheInfo = {};
std::vector<uint8_t> initData;
const char* debugName = nullptr;
};
class RenderPassBuilder
{
public:
RenderPassBuilder();
RenderPassBuilder& AddAttachment(VkFormat format, VkSampleCountFlagBits samples, VkAttachmentLoadOp load, VkAttachmentStoreOp store, VkImageLayout initialLayout, VkImageLayout finalLayout);
RenderPassBuilder& AddDepthStencilAttachment(VkFormat format, VkSampleCountFlagBits samples, VkAttachmentLoadOp load, VkAttachmentStoreOp store, VkAttachmentLoadOp stencilLoad, VkAttachmentStoreOp stencilStore, VkImageLayout initialLayout, VkImageLayout finalLayout);
RenderPassBuilder& AddExternalSubpassDependency(VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, VkAccessFlags srcAccessMask, VkAccessFlags dstAccessMask);
RenderPassBuilder& AddSubpass();
RenderPassBuilder& AddSubpassColorAttachmentRef(uint32_t index, VkImageLayout layout);
RenderPassBuilder& AddSubpassDepthStencilAttachmentRef(uint32_t index, VkImageLayout layout);
RenderPassBuilder& DebugName(const char* name) { debugName = name; return *this; }
std::unique_ptr<VulkanRenderPass> Create(VulkanDevice *device);
private:
VkRenderPassCreateInfo renderPassInfo = { };
std::vector<VkAttachmentDescription> attachments;
std::vector<VkSubpassDependency> dependencies;
std::vector<VkSubpassDescription> subpasses;
struct SubpassData
{
std::vector<VkAttachmentReference> colorRefs;
VkAttachmentReference depthRef = { };
};
std::vector<std::unique_ptr<SubpassData>> subpassData;
const char* debugName = nullptr;
};
class PipelineBarrier
{
public:
PipelineBarrier& AddMemory(VkAccessFlags srcAccessMask, VkAccessFlags dstAccessMask);
PipelineBarrier& AddBuffer(VulkanBuffer *buffer, VkAccessFlags srcAccessMask, VkAccessFlags dstAccessMask);
PipelineBarrier& AddBuffer(VulkanBuffer *buffer, VkDeviceSize offset, VkDeviceSize size, VkAccessFlags srcAccessMask, VkAccessFlags dstAccessMask);
PipelineBarrier& AddImage(VulkanImage *image, VkImageLayout oldLayout, VkImageLayout newLayout, VkAccessFlags srcAccessMask, VkAccessFlags dstAccessMask, VkImageAspectFlags aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, int baseMipLevel = 0, int levelCount = 1);
PipelineBarrier& AddImage(VkImage image, VkImageLayout oldLayout, VkImageLayout newLayout, VkAccessFlags srcAccessMask, VkAccessFlags dstAccessMask, VkImageAspectFlags aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, int baseMipLevel = 0, int levelCount = 1);
PipelineBarrier& AddQueueTransfer(int srcFamily, int dstFamily, VulkanBuffer *buffer, VkAccessFlags srcAccessMask, VkAccessFlags dstAccessMask);
PipelineBarrier& AddQueueTransfer(int srcFamily, int dstFamily, VulkanImage *image, VkImageLayout layout, VkImageAspectFlags aspectMask = VK_IMAGE_ASPECT_COLOR_BIT, int baseMipLevel = 0, int levelCount = 1);
void Execute(VulkanCommandBuffer *commandBuffer, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, VkDependencyFlags dependencyFlags = 0);
private:
std::vector<VkMemoryBarrier> memoryBarriers;
std::vector<VkBufferMemoryBarrier> bufferMemoryBarriers;
std::vector<VkImageMemoryBarrier> imageMemoryBarriers;
};
class QueueSubmit
{
public:
QueueSubmit();
QueueSubmit& AddCommandBuffer(VulkanCommandBuffer *buffer);
QueueSubmit& AddWait(VkPipelineStageFlags waitStageMask, VulkanSemaphore *semaphore);
QueueSubmit& AddSignal(VulkanSemaphore *semaphore);
void Execute(VulkanDevice *device, VkQueue queue, VulkanFence *fence = nullptr);
private:
VkSubmitInfo submitInfo = {};
std::vector<VkSemaphore> waitSemaphores;
std::vector<VkPipelineStageFlags> waitStages;
std::vector<VkSemaphore> signalSemaphores;
std::vector<VkCommandBuffer> commandBuffers;
};
class WriteDescriptors
{
public:
WriteDescriptors& AddBuffer(VulkanDescriptorSet *descriptorSet, int binding, VkDescriptorType type, VulkanBuffer *buffer);
WriteDescriptors& AddBuffer(VulkanDescriptorSet *descriptorSet, int binding, VkDescriptorType type, VulkanBuffer *buffer, size_t offset, size_t range);
WriteDescriptors& AddStorageImage(VulkanDescriptorSet *descriptorSet, int binding, VulkanImageView *view, VkImageLayout imageLayout);
WriteDescriptors& AddCombinedImageSampler(VulkanDescriptorSet *descriptorSet, int binding, VulkanImageView *view, VulkanSampler *sampler, VkImageLayout imageLayout);
WriteDescriptors& AddCombinedImageSampler(VulkanDescriptorSet* descriptorSet, int binding, int arrayIndex, VulkanImageView* view, VulkanSampler* sampler, VkImageLayout imageLayout);
WriteDescriptors& AddAccelerationStructure(VulkanDescriptorSet* descriptorSet, int binding, VulkanAccelerationStructure* accelStruct);
void Execute(VulkanDevice *device);
private:
struct WriteExtra
{
VkDescriptorImageInfo imageInfo;
VkDescriptorBufferInfo bufferInfo;
VkBufferView bufferView;
VkWriteDescriptorSetAccelerationStructureKHR accelStruct;
};
std::vector<VkWriteDescriptorSet> writes;
std::vector<std::unique_ptr<WriteExtra>> writeExtras;
};

View file

@ -0,0 +1,19 @@
#pragma once
#include "vulkaninstance.h"
class VulkanSurface;
class VulkanCompatibleDevice
{
public:
VulkanPhysicalDevice* Device = nullptr;
int GraphicsFamily = -1;
int PresentFamily = -1;
bool GraphicsTimeQueries = false;
std::set<std::string> EnabledDeviceExtensions;
VulkanDeviceFeatures EnabledFeatures;
};

View file

@ -0,0 +1,52 @@
#pragma once
#include "vulkaninstance.h"
#include <functional>
#include <mutex>
#include <vector>
#include <algorithm>
#include <memory>
class VulkanSwapChain;
class VulkanSemaphore;
class VulkanFence;
class VulkanPhysicalDevice;
class VulkanSurface;
class VulkanCompatibleDevice;
class VulkanDevice
{
public:
VulkanDevice(std::shared_ptr<VulkanInstance> instance, std::shared_ptr<VulkanSurface> surface, const VulkanCompatibleDevice& selectedDevice);
~VulkanDevice();
std::set<std::string> EnabledDeviceExtensions;
VulkanDeviceFeatures EnabledFeatures;
VulkanPhysicalDevice PhysicalDevice;
std::shared_ptr<VulkanInstance> Instance;
std::shared_ptr<VulkanSurface> Surface;
VkDevice device = VK_NULL_HANDLE;
VmaAllocator allocator = VK_NULL_HANDLE;
VkQueue GraphicsQueue = VK_NULL_HANDLE;
VkQueue PresentQueue = VK_NULL_HANDLE;
int GraphicsFamily = -1;
int PresentFamily = -1;
bool GraphicsTimeQueries = false;
bool SupportsDeviceExtension(const char* ext) const;
void SetObjectName(const char* name, uint64_t handle, VkObjectType type);
private:
bool DebugLayerActive = false;
void CreateDevice();
void CreateAllocator();
void ReleaseResources();
};

View file

@ -0,0 +1,94 @@
#pragma once
#if defined(_WIN32)
#define VK_USE_PLATFORM_WIN32_KHR
#elif defined(__APPLE__)
#define VK_USE_PLATFORM_MACOS_MVK
#define VK_USE_PLATFORM_METAL_EXT
#endif
#include "volk/volk.h"
#include "vk_mem_alloc/vk_mem_alloc.h"
#if defined(_WIN32)
#undef min
#undef max
#endif
#include <functional>
#include <memory>
#include <string>
#include <vector>
#include <set>
class VulkanDeviceFeatures
{
public:
VkPhysicalDeviceFeatures Features = {};
VkPhysicalDeviceBufferDeviceAddressFeatures BufferDeviceAddress = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BUFFER_DEVICE_ADDRESS_FEATURES };
VkPhysicalDeviceAccelerationStructureFeaturesKHR AccelerationStructure = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ACCELERATION_STRUCTURE_FEATURES_KHR };
VkPhysicalDeviceRayQueryFeaturesKHR RayQuery = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_QUERY_FEATURES_KHR };
VkPhysicalDeviceDescriptorIndexingFeatures DescriptorIndexing = { VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT };
};
class VulkanPhysicalDevice
{
public:
VkPhysicalDevice Device = VK_NULL_HANDLE;
std::vector<VkExtensionProperties> Extensions;
std::vector<VkQueueFamilyProperties> QueueFamilies;
VkPhysicalDeviceProperties Properties = {};
VkPhysicalDeviceMemoryProperties MemoryProperties = {};
VulkanDeviceFeatures Features;
};
class VulkanInstance
{
public:
VulkanInstance(std::vector<uint32_t> apiVersionsToTry, std::set<std::string> requiredExtensions, std::set<std::string> optionalExtensions, bool wantDebugLayer);
~VulkanInstance();
std::vector<uint32_t> ApiVersionsToTry;
std::set<std::string> RequiredExtensions;
std::set<std::string> OptionalExtensions;
std::vector<VkLayerProperties> AvailableLayers;
std::vector<VkExtensionProperties> AvailableExtensions;
std::set<std::string> EnabledValidationLayers;
std::set<std::string> EnabledExtensions;
std::vector<VulkanPhysicalDevice> PhysicalDevices;
uint32_t ApiVersion = {};
VkInstance Instance = VK_NULL_HANDLE;
bool DebugLayerActive = false;
private:
bool WantDebugLayer = false;
VkDebugUtilsMessengerEXT debugMessenger = VK_NULL_HANDLE;
void CreateInstance();
void ReleaseResources();
static void InitVolk();
static std::vector<VkLayerProperties> GetAvailableLayers();
static std::vector<VkExtensionProperties> GetExtensions();
static std::vector<VulkanPhysicalDevice> GetPhysicalDevices(VkInstance instance, uint32_t apiVersion);
static VKAPI_ATTR VkBool32 VKAPI_CALL DebugCallback(VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity, VkDebugUtilsMessageTypeFlagsEXT messageType, const VkDebugUtilsMessengerCallbackDataEXT* pCallbackData, void* pUserData);
static std::vector<std::string> SplitString(const std::string& s, const std::string& seperator);
};
std::string VkResultToString(VkResult result);
void VulkanPrintLog(const char* typestr, const std::string& msg);
void VulkanError(const char* text);
inline void CheckVulkanError(VkResult result, const char* text)
{
if (result >= VK_SUCCESS) return;
VulkanError((text + std::string(": ") + VkResultToString(result)).c_str());
}

View file

@ -1,10 +1,10 @@
#pragma once
#include "vk_device.h"
#include "engineerrors.h"
#include "vulkandevice.h"
class VulkanCommandPool;
class VulkanDescriptorPool;
class VulkanCommandBuffer;
class VulkanSemaphore
{
@ -12,7 +12,7 @@ public:
VulkanSemaphore(VulkanDevice *device);
~VulkanSemaphore();
void SetDebugName(const char *name) { device->SetDebugObjectName(name, (uint64_t)semaphore, VK_OBJECT_TYPE_SEMAPHORE); }
void SetDebugName(const char *name) { device->SetObjectName(name, (uint64_t)semaphore, VK_OBJECT_TYPE_SEMAPHORE); }
VulkanDevice *device = nullptr;
VkSemaphore semaphore = VK_NULL_HANDLE;
@ -28,7 +28,7 @@ public:
VulkanFence(VulkanDevice *device);
~VulkanFence();
void SetDebugName(const char *name) { device->SetDebugObjectName(name, (uint64_t)fence, VK_OBJECT_TYPE_FENCE); }
void SetDebugName(const char *name) { device->SetObjectName(name, (uint64_t)fence, VK_OBJECT_TYPE_FENCE); }
VulkanDevice *device = nullptr;
VkFence fence = VK_NULL_HANDLE;
@ -44,7 +44,19 @@ public:
VulkanBuffer(VulkanDevice *device, VkBuffer buffer, VmaAllocation allocation, size_t size);
~VulkanBuffer();
void SetDebugName(const char *name) { device->SetDebugObjectName(name, (uint64_t)buffer, VK_OBJECT_TYPE_BUFFER); }
VkDeviceAddress GetDeviceAddress()
{
VkBufferDeviceAddressInfo info = { VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_INFO };
info.buffer = buffer;
return vkGetBufferDeviceAddress(device->device, &info);
}
#ifdef _DEBUG
void SetDebugName(const char* name) { debugName = name; device->SetObjectName(name, (uint64_t)buffer, VK_OBJECT_TYPE_BUFFER); }
std::string debugName;
#else
void SetDebugName(const char* name) { device->SetObjectName(name, (uint64_t)buffer, VK_OBJECT_TYPE_BUFFER); }
#endif
VulkanDevice *device = nullptr;
@ -66,7 +78,7 @@ public:
VulkanFramebuffer(VulkanDevice *device, VkFramebuffer framebuffer);
~VulkanFramebuffer();
void SetDebugName(const char *name) { device->SetDebugObjectName(name, (uint64_t)framebuffer, VK_OBJECT_TYPE_FRAMEBUFFER); }
void SetDebugName(const char *name) { device->SetObjectName(name, (uint64_t)framebuffer, VK_OBJECT_TYPE_FRAMEBUFFER); }
VulkanDevice *device;
VkFramebuffer framebuffer;
@ -79,15 +91,16 @@ private:
class VulkanImage
{
public:
VulkanImage(VulkanDevice *device, VkImage image, VmaAllocation allocation, int width, int height, int mipLevels);
VulkanImage(VulkanDevice *device, VkImage image, VmaAllocation allocation, int width, int height, int mipLevels, int layerCount);
~VulkanImage();
void SetDebugName(const char *name) { device->SetDebugObjectName(name, (uint64_t)image, VK_OBJECT_TYPE_IMAGE); }
void SetDebugName(const char *name) { device->SetObjectName(name, (uint64_t)image, VK_OBJECT_TYPE_IMAGE); }
VkImage image = VK_NULL_HANDLE;
int width = 0;
int height = 0;
int mipLevels = 1;
int layerCount = 1;
void *Map(size_t offset, size_t size);
void Unmap();
@ -106,7 +119,7 @@ public:
VulkanImageView(VulkanDevice *device, VkImageView view);
~VulkanImageView();
void SetDebugName(const char *name) { device->SetDebugObjectName(name, (uint64_t)view, VK_OBJECT_TYPE_IMAGE_VIEW); }
void SetDebugName(const char *name) { device->SetObjectName(name, (uint64_t)view, VK_OBJECT_TYPE_IMAGE_VIEW); }
VkImageView view = VK_NULL_HANDLE;
@ -123,7 +136,7 @@ public:
VulkanSampler(VulkanDevice *device, VkSampler sampler);
~VulkanSampler();
void SetDebugName(const char *name) { device->SetDebugObjectName(name, (uint64_t)sampler, VK_OBJECT_TYPE_SAMPLER); }
void SetDebugName(const char *name) { device->SetObjectName(name, (uint64_t)sampler, VK_OBJECT_TYPE_SAMPLER); }
VkSampler sampler = VK_NULL_HANDLE;
@ -140,7 +153,7 @@ public:
VulkanShader(VulkanDevice *device, VkShaderModule module);
~VulkanShader();
void SetDebugName(const char *name) { device->SetDebugObjectName(name, (uint64_t)module, VK_OBJECT_TYPE_SHADER_MODULE); }
void SetDebugName(const char *name) { device->SetObjectName(name, (uint64_t)module, VK_OBJECT_TYPE_SHADER_MODULE); }
VkShaderModule module = VK_NULL_HANDLE;
@ -157,7 +170,7 @@ public:
VulkanDescriptorSetLayout(VulkanDevice *device, VkDescriptorSetLayout layout);
~VulkanDescriptorSetLayout();
void SetDebugName(const char *name) { device->SetDebugObjectName(name, (uint64_t)layout, VK_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT); }
void SetDebugName(const char *name) { device->SetObjectName(name, (uint64_t)layout, VK_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT); }
VulkanDevice *device;
VkDescriptorSetLayout layout;
@ -173,7 +186,12 @@ public:
VulkanDescriptorSet(VulkanDevice *device, VulkanDescriptorPool *pool, VkDescriptorSet set);
~VulkanDescriptorSet();
void SetDebugName(const char *name) { device->SetDebugObjectName(name, (uint64_t)set, VK_OBJECT_TYPE_DESCRIPTOR_SET); }
#ifdef _DEBUG
void SetDebugName(const char* name) { debugName = name; device->SetObjectName(name, (uint64_t)set, VK_OBJECT_TYPE_DESCRIPTOR_SET); }
std::string debugName;
#else
void SetDebugName(const char* name) { device->SetObjectName(name, (uint64_t)set, VK_OBJECT_TYPE_DESCRIPTOR_SET); }
#endif
VulkanDevice *device;
VulkanDescriptorPool *pool;
@ -190,15 +208,26 @@ public:
VulkanDescriptorPool(VulkanDevice *device, VkDescriptorPool pool);
~VulkanDescriptorPool();
void SetDebugName(const char *name) { device->SetDebugObjectName(name, (uint64_t)pool, VK_OBJECT_TYPE_DESCRIPTOR_POOL); }
#ifdef _DEBUG
void SetDebugName(const char* name) { debugName = name; device->SetObjectName(name, (uint64_t)pool, VK_OBJECT_TYPE_DESCRIPTOR_POOL); }
std::string debugName;
#else
void SetDebugName(const char* name) { device->SetObjectName(name, (uint64_t)pool, VK_OBJECT_TYPE_DESCRIPTOR_POOL); }
#endif
std::unique_ptr<VulkanDescriptorSet> tryAllocate(VulkanDescriptorSetLayout *layout);
std::unique_ptr<VulkanDescriptorSet> tryAllocate(VulkanDescriptorSetLayout* layout, uint32_t bindlessCount);
std::unique_ptr<VulkanDescriptorSet> allocate(VulkanDescriptorSetLayout *layout);
std::unique_ptr<VulkanDescriptorSet> allocate(VulkanDescriptorSetLayout* layout, uint32_t bindlessCount);
VulkanDevice *device;
VkDescriptorPool pool;
private:
enum class AllocType { TryAllocate, AlwaysAllocate };
std::unique_ptr<VulkanDescriptorSet> allocate(VulkanDescriptorSetLayout* layout, AllocType allocType);
std::unique_ptr<VulkanDescriptorSet> allocate(VulkanDescriptorSetLayout* layout, uint32_t bindlessCount, AllocType allocType);
VulkanDescriptorPool(const VulkanDescriptorPool &) = delete;
VulkanDescriptorPool &operator=(const VulkanDescriptorPool &) = delete;
};
@ -209,7 +238,7 @@ public:
VulkanQueryPool(VulkanDevice *device, VkQueryPool pool);
~VulkanQueryPool();
void SetDebugName(const char *name) { device->SetDebugObjectName(name, (uint64_t)pool, VK_OBJECT_TYPE_QUERY_POOL); }
void SetDebugName(const char *name) { device->SetObjectName(name, (uint64_t)pool, VK_OBJECT_TYPE_QUERY_POOL); }
bool getResults(uint32_t firstQuery, uint32_t queryCount, size_t dataSize, void *data, VkDeviceSize stride, VkQueryResultFlags flags);
@ -221,13 +250,36 @@ private:
VulkanQueryPool &operator=(const VulkanQueryPool &) = delete;
};
class VulkanAccelerationStructure
{
public:
VulkanAccelerationStructure(VulkanDevice* device, VkAccelerationStructureKHR accelstruct);
~VulkanAccelerationStructure();
VkDeviceAddress GetDeviceAddress()
{
VkAccelerationStructureDeviceAddressInfoKHR addressInfo = { VK_STRUCTURE_TYPE_ACCELERATION_STRUCTURE_DEVICE_ADDRESS_INFO_KHR };
addressInfo.accelerationStructure = accelstruct;
return vkGetAccelerationStructureDeviceAddressKHR(device->device, &addressInfo);
}
void SetDebugName(const char* name) { device->SetObjectName(name, (uint64_t)accelstruct, VK_OBJECT_TYPE_ACCELERATION_STRUCTURE_KHR); }
VulkanDevice* device;
VkAccelerationStructureKHR accelstruct;
private:
VulkanAccelerationStructure(const VulkanAccelerationStructure&) = delete;
VulkanAccelerationStructure& operator=(const VulkanAccelerationStructure&) = delete;
};
class VulkanPipeline
{
public:
VulkanPipeline(VulkanDevice *device, VkPipeline pipeline);
~VulkanPipeline();
void SetDebugName(const char *name) { device->SetDebugObjectName(name, (uint64_t)pipeline, VK_OBJECT_TYPE_PIPELINE); }
void SetDebugName(const char *name) { device->SetObjectName(name, (uint64_t)pipeline, VK_OBJECT_TYPE_PIPELINE); }
VulkanDevice *device;
VkPipeline pipeline;
@ -243,7 +295,7 @@ public:
VulkanPipelineLayout(VulkanDevice *device, VkPipelineLayout layout);
~VulkanPipelineLayout();
void SetDebugName(const char *name) { device->SetDebugObjectName(name, (uint64_t)layout, VK_OBJECT_TYPE_PIPELINE_LAYOUT); }
void SetDebugName(const char *name) { device->SetObjectName(name, (uint64_t)layout, VK_OBJECT_TYPE_PIPELINE_LAYOUT); }
VulkanDevice *device;
VkPipelineLayout layout;
@ -253,13 +305,31 @@ private:
VulkanPipelineLayout &operator=(const VulkanPipelineLayout &) = delete;
};
class VulkanPipelineCache
{
public:
VulkanPipelineCache(VulkanDevice* device, VkPipelineCache cache);
~VulkanPipelineCache();
void SetDebugName(const char* name) { device->SetObjectName(name, (uint64_t)cache, VK_OBJECT_TYPE_PIPELINE_CACHE); }
std::vector<uint8_t> GetCacheData();
VulkanDevice* device;
VkPipelineCache cache;
private:
VulkanPipelineCache(const VulkanPipelineCache&) = delete;
VulkanPipelineCache& operator=(const VulkanPipelineCache&) = delete;
};
class VulkanRenderPass
{
public:
VulkanRenderPass(VulkanDevice *device, VkRenderPass renderPass);
~VulkanRenderPass();
void SetDebugName(const char *name) { device->SetDebugObjectName(name, (uint64_t)renderPass, VK_OBJECT_TYPE_RENDER_PASS); }
void SetDebugName(const char *name) { device->SetObjectName(name, (uint64_t)renderPass, VK_OBJECT_TYPE_RENDER_PASS); }
VulkanDevice *device;
VkRenderPass renderPass;
@ -274,13 +344,15 @@ class RenderPassBegin
public:
RenderPassBegin();
void setRenderPass(VulkanRenderPass *renderpass);
void setRenderArea(int x, int y, int width, int height);
void setFramebuffer(VulkanFramebuffer *framebuffer);
void addClearColor(float r, float g, float b, float a);
void addClearDepth(float value);
void addClearStencil(int value);
void addClearDepthStencil(float depthValue, int stencilValue);
RenderPassBegin& RenderPass(VulkanRenderPass* renderpass);
RenderPassBegin& RenderArea(int x, int y, int width, int height);
RenderPassBegin& Framebuffer(VulkanFramebuffer* framebuffer);
RenderPassBegin& AddClearColor(float r, float g, float b, float a);
RenderPassBegin& AddClearDepth(float value);
RenderPassBegin& AddClearStencil(int value);
RenderPassBegin& AddClearDepthStencil(float depthValue, int stencilValue);
void Execute(VulkanCommandBuffer* cmdbuffer, VkSubpassContents contents = VK_SUBPASS_CONTENTS_INLINE);
VkRenderPassBeginInfo renderPassInfo = {};
@ -354,6 +426,10 @@ public:
void endRenderPass();
void executeCommands(uint32_t commandBufferCount, const VkCommandBuffer* pCommandBuffers);
void buildAccelerationStructures(uint32_t infoCount, const VkAccelerationStructureBuildGeometryInfoKHR* pInfos, const VkAccelerationStructureBuildRangeInfoKHR* const* ppBuildRangeInfos);
void traceRays(const VkStridedDeviceAddressRegionKHR* pRaygenShaderBindingTable, const VkStridedDeviceAddressRegionKHR* pMissShaderBindingTable, const VkStridedDeviceAddressRegionKHR* pHitShaderBindingTable, const VkStridedDeviceAddressRegionKHR* pCallableShaderBindingTable, uint32_t width, uint32_t height, uint32_t depth);
void writeAccelerationStructuresProperties(uint32_t accelerationStructureCount, const VkAccelerationStructureKHR* pAccelerationStructures, VkQueryType queryType, VkQueryPool queryPool, uint32_t firstQuery);
void debugFullPipelineBarrier();
VkCommandBuffer buffer = nullptr;
@ -371,7 +447,7 @@ public:
VulkanCommandPool(VulkanDevice *device, int queueFamilyIndex);
~VulkanCommandPool();
void SetDebugName(const char *name) { device->SetDebugObjectName(name, (uint64_t)pool, VK_OBJECT_TYPE_COMMAND_POOL); }
void SetDebugName(const char *name) { device->SetObjectName(name, (uint64_t)pool, VK_OBJECT_TYPE_COMMAND_POOL); }
std::unique_ptr<VulkanCommandBuffer> createBuffer();
@ -469,35 +545,39 @@ inline RenderPassBegin::RenderPassBegin()
renderPassInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
}
inline void RenderPassBegin::setRenderPass(VulkanRenderPass *renderPass)
inline RenderPassBegin& RenderPassBegin::RenderPass(VulkanRenderPass* renderPass)
{
renderPassInfo.renderPass = renderPass->renderPass;
return *this;
}
inline void RenderPassBegin::setRenderArea(int x, int y, int width, int height)
inline RenderPassBegin& RenderPassBegin::RenderArea(int x, int y, int width, int height)
{
renderPassInfo.renderArea.offset.x = x;
renderPassInfo.renderArea.offset.y = y;
renderPassInfo.renderArea.extent.width = width;
renderPassInfo.renderArea.extent.height = height;
return *this;
}
inline void RenderPassBegin::setFramebuffer(VulkanFramebuffer *framebuffer)
inline RenderPassBegin& RenderPassBegin::Framebuffer(VulkanFramebuffer* framebuffer)
{
renderPassInfo.framebuffer = framebuffer->framebuffer;
return *this;
}
inline void RenderPassBegin::addClearColor(float r, float g, float b, float a)
inline RenderPassBegin& RenderPassBegin::AddClearColor(float r, float g, float b, float a)
{
VkClearValue clearValue = { };
clearValue.color = { {r, g, b, a} };
clearValue.color = { r, g, b, a };
clearValues.push_back(clearValue);
renderPassInfo.clearValueCount = (uint32_t)clearValues.size();
renderPassInfo.pClearValues = clearValues.data();
return *this;
}
inline void RenderPassBegin::addClearDepth(float value)
inline RenderPassBegin& RenderPassBegin::AddClearDepth(float value)
{
VkClearValue clearValue = { };
clearValue.depthStencil.depth = value;
@ -505,9 +585,10 @@ inline void RenderPassBegin::addClearDepth(float value)
renderPassInfo.clearValueCount = (uint32_t)clearValues.size();
renderPassInfo.pClearValues = clearValues.data();
return *this;
}
inline void RenderPassBegin::addClearStencil(int value)
inline RenderPassBegin& RenderPassBegin::AddClearStencil(int value)
{
VkClearValue clearValue = { };
clearValue.depthStencil.stencil = value;
@ -515,9 +596,10 @@ inline void RenderPassBegin::addClearStencil(int value)
renderPassInfo.clearValueCount = (uint32_t)clearValues.size();
renderPassInfo.pClearValues = clearValues.data();
return *this;
}
inline void RenderPassBegin::addClearDepthStencil(float depthValue, int stencilValue)
inline RenderPassBegin& RenderPassBegin::AddClearDepthStencil(float depthValue, int stencilValue)
{
VkClearValue clearValue = { };
clearValue.depthStencil.depth = depthValue;
@ -526,6 +608,12 @@ inline void RenderPassBegin::addClearDepthStencil(float depthValue, int stencilV
renderPassInfo.clearValueCount = (uint32_t)clearValues.size();
renderPassInfo.pClearValues = clearValues.data();
return *this;
}
inline void RenderPassBegin::Execute(VulkanCommandBuffer* cmdbuffer, VkSubpassContents contents)
{
cmdbuffer->beginRenderPass(&renderPassInfo, contents);
}
/////////////////////////////////////////////////////////////////////////////
@ -878,9 +966,24 @@ inline void VulkanCommandBuffer::executeCommands(uint32_t commandBufferCount, co
vkCmdExecuteCommands(buffer, commandBufferCount, pCommandBuffers);
}
inline void VulkanCommandBuffer::buildAccelerationStructures(uint32_t infoCount, const VkAccelerationStructureBuildGeometryInfoKHR* pInfos, const VkAccelerationStructureBuildRangeInfoKHR* const* ppBuildRangeInfos)
{
vkCmdBuildAccelerationStructuresKHR(buffer, infoCount, pInfos, ppBuildRangeInfos);
}
inline void VulkanCommandBuffer::traceRays(const VkStridedDeviceAddressRegionKHR* pRaygenShaderBindingTable, const VkStridedDeviceAddressRegionKHR* pMissShaderBindingTable, const VkStridedDeviceAddressRegionKHR* pHitShaderBindingTable, const VkStridedDeviceAddressRegionKHR* pCallableShaderBindingTable, uint32_t width, uint32_t height, uint32_t depth)
{
vkCmdTraceRaysKHR(buffer, pRaygenShaderBindingTable, pMissShaderBindingTable, pHitShaderBindingTable, pCallableShaderBindingTable, width, height, depth);
}
inline void VulkanCommandBuffer::writeAccelerationStructuresProperties(uint32_t accelerationStructureCount, const VkAccelerationStructureKHR* pAccelerationStructures, VkQueryType queryType, VkQueryPool queryPool, uint32_t firstQuery)
{
vkCmdWriteAccelerationStructuresPropertiesKHR(buffer, accelerationStructureCount, pAccelerationStructures, queryType, queryPool, firstQuery);
}
inline void VulkanCommandBuffer::SetDebugName(const char *name)
{
pool->device->SetDebugObjectName(name, (uint64_t)buffer, VK_OBJECT_TYPE_COMMAND_BUFFER);
pool->device->SetObjectName(name, (uint64_t)buffer, VK_OBJECT_TYPE_COMMAND_BUFFER);
}
/////////////////////////////////////////////////////////////////////////////
@ -927,35 +1030,60 @@ inline VulkanDescriptorPool::~VulkanDescriptorPool()
vkDestroyDescriptorPool(device->device, pool, nullptr);
}
inline std::unique_ptr<VulkanDescriptorSet> VulkanDescriptorPool::tryAllocate(VulkanDescriptorSetLayout *layout)
inline std::unique_ptr<VulkanDescriptorSet> VulkanDescriptorPool::allocate(VulkanDescriptorSetLayout* layout, AllocType allocType)
{
VkDescriptorSetAllocateInfo allocInfo = {};
allocInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
VkDescriptorSetAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO };
allocInfo.descriptorPool = pool;
allocInfo.descriptorSetCount = 1;
allocInfo.pSetLayouts = &layout->layout;
VkDescriptorSet descriptorSet;
VkResult result = vkAllocateDescriptorSets(device->device, &allocInfo, &descriptorSet);
if (result != VK_SUCCESS)
if (allocType == AllocType::TryAllocate && result != VK_SUCCESS)
return nullptr;
else
CheckVulkanError(result, "Could not allocate descriptor sets");
return std::make_unique<VulkanDescriptorSet>(device, this, descriptorSet);
}
inline std::unique_ptr<VulkanDescriptorSet> VulkanDescriptorPool::allocate(VulkanDescriptorSetLayout* layout, uint32_t bindlessCount, AllocType allocType)
{
VkDescriptorSetAllocateInfo allocInfo = { VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO };
VkDescriptorSetVariableDescriptorCountAllocateInfoEXT countInfo{ VK_STRUCTURE_TYPE_DESCRIPTOR_SET_VARIABLE_DESCRIPTOR_COUNT_ALLOCATE_INFO_EXT };
allocInfo.descriptorPool = pool;
allocInfo.descriptorSetCount = 1;
allocInfo.pSetLayouts = &layout->layout;
allocInfo.pNext = &countInfo;
countInfo.descriptorSetCount = 1;
countInfo.pDescriptorCounts = &bindlessCount;
VkDescriptorSet descriptorSet;
VkResult result = vkAllocateDescriptorSets(device->device, &allocInfo, &descriptorSet);
if (allocType == AllocType::TryAllocate && result != VK_SUCCESS)
return nullptr;
else
CheckVulkanError(result, "Could not allocate descriptor sets");
return std::make_unique<VulkanDescriptorSet>(device, this, descriptorSet);
}
inline std::unique_ptr<VulkanDescriptorSet> VulkanDescriptorPool::tryAllocate(VulkanDescriptorSetLayout *layout)
{
return allocate(layout, AllocType::TryAllocate);
}
inline std::unique_ptr<VulkanDescriptorSet> VulkanDescriptorPool::allocate(VulkanDescriptorSetLayout *layout)
{
VkDescriptorSetAllocateInfo allocInfo = {};
allocInfo.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
allocInfo.descriptorPool = pool;
allocInfo.descriptorSetCount = 1;
allocInfo.pSetLayouts = &layout->layout;
return allocate(layout, AllocType::AlwaysAllocate);
}
VkDescriptorSet descriptorSet;
VkResult result = vkAllocateDescriptorSets(device->device, &allocInfo, &descriptorSet);
CheckVulkanError(result, "Could not allocate descriptor sets");
inline std::unique_ptr<VulkanDescriptorSet> VulkanDescriptorPool::tryAllocate(VulkanDescriptorSetLayout* layout, uint32_t bindlessCount)
{
return allocate(layout, bindlessCount, AllocType::TryAllocate);
}
return std::make_unique<VulkanDescriptorSet>(device, this, descriptorSet);
inline std::unique_ptr<VulkanDescriptorSet> VulkanDescriptorPool::allocate(VulkanDescriptorSetLayout* layout, uint32_t bindlessCount)
{
return allocate(layout, bindlessCount, AllocType::AlwaysAllocate);
}
/////////////////////////////////////////////////////////////////////////////
@ -989,13 +1117,14 @@ inline VulkanFramebuffer::~VulkanFramebuffer()
/////////////////////////////////////////////////////////////////////////////
inline VulkanImage::VulkanImage(VulkanDevice *device, VkImage image, VmaAllocation allocation, int width, int height, int mipLevels) : image(image), width(width), height(height), mipLevels(mipLevels), device(device), allocation(allocation)
inline VulkanImage::VulkanImage(VulkanDevice *device, VkImage image, VmaAllocation allocation, int width, int height, int mipLevels, int layerCount) : image(image), width(width), height(height), mipLevels(mipLevels), layerCount(layerCount), device(device), allocation(allocation)
{
}
inline VulkanImage::~VulkanImage()
{
vmaDestroyImage(device->allocator, image, allocation);
if (allocation)
vmaDestroyImage(device->allocator, image, allocation);
}
inline void *VulkanImage::Map(size_t offset, size_t size)
@ -1034,6 +1163,18 @@ inline VulkanSampler::~VulkanSampler()
/////////////////////////////////////////////////////////////////////////////
inline VulkanAccelerationStructure::VulkanAccelerationStructure(VulkanDevice* device, VkAccelerationStructureKHR accelstruct)
: device(device), accelstruct(accelstruct)
{
}
inline VulkanAccelerationStructure::~VulkanAccelerationStructure()
{
vkDestroyAccelerationStructureKHR(device->device, accelstruct, nullptr);
}
/////////////////////////////////////////////////////////////////////////////
inline VulkanPipeline::VulkanPipeline(VulkanDevice *device, VkPipeline pipeline) : device(device), pipeline(pipeline)
{
}
@ -1056,6 +1197,33 @@ inline VulkanPipelineLayout::~VulkanPipelineLayout()
/////////////////////////////////////////////////////////////////////////////
inline VulkanPipelineCache::VulkanPipelineCache(VulkanDevice* device, VkPipelineCache cache) : device(device), cache(cache)
{
}
inline VulkanPipelineCache::~VulkanPipelineCache()
{
vkDestroyPipelineCache(device->device, cache, nullptr);
}
inline std::vector<uint8_t> VulkanPipelineCache::GetCacheData()
{
size_t dataSize = 0;
VkResult result = vkGetPipelineCacheData(device->device, cache, &dataSize, nullptr);
CheckVulkanError(result, "Could not get cache data size");
std::vector<uint8_t> buffer;
buffer.resize(dataSize);
result = vkGetPipelineCacheData(device->device, cache, &dataSize, buffer.data());
if (result == VK_INCOMPLETE)
VulkanError("Could not get cache data (incomplete)");
CheckVulkanError(result, "Could not get cache data");
buffer.resize(dataSize);
return buffer;
}
/////////////////////////////////////////////////////////////////////////////
inline VulkanRenderPass::VulkanRenderPass(VulkanDevice *device, VkRenderPass renderPass) : device(device), renderPass(renderPass)
{
}

View file

@ -0,0 +1,20 @@
#pragma once
#include "vulkaninstance.h"
class VulkanSurface
{
public:
VulkanSurface(std::shared_ptr<VulkanInstance> instance, VkSurfaceKHR surface);
~VulkanSurface();
std::shared_ptr<VulkanInstance> Instance;
VkSurfaceKHR Surface = VK_NULL_HANDLE;
#ifdef VK_USE_PLATFORM_WIN32_KHR
VulkanSurface(std::shared_ptr<VulkanInstance> instance, HWND window);
HWND Window = 0;
#endif
};

View file

@ -0,0 +1,50 @@
#pragma once
#include "vulkandevice.h"
#include "vulkanobjects.h"
class VulkanSemaphore;
class VulkanFence;
class VulkanSwapChain
{
public:
VulkanSwapChain(VulkanDevice* device);
~VulkanSwapChain();
void Create(int width, int height, int imageCount, bool vsync, bool hdr, bool exclusivefullscreen);
bool Lost() const { return lost; }
int Width() const { return actualExtent.width; }
int Height() const { return actualExtent.height; }
VkSurfaceFormatKHR Format() const { return format; }
int ImageCount() const { return (int)images.size(); }
VulkanImage* GetImage(int index) { return images[index].get(); }
VulkanImageView* GetImageView(int index) { return views[index].get(); }
int AcquireImage(VulkanSemaphore* semaphore = nullptr, VulkanFence* fence = nullptr);
void QueuePresent(int imageIndex, VulkanSemaphore* semaphore = nullptr);
private:
void SelectFormat(bool hdr);
void SelectPresentMode(bool vsync, bool exclusivefullscreen);
bool CreateSwapchain(int width, int height, int imageCount, bool exclusivefullscreen, VkSwapchainKHR oldSwapChain = VK_NULL_HANDLE);
std::vector<VkSurfaceFormatKHR> GetSurfaceFormats();
std::vector<VkPresentModeKHR> GetPresentModes(bool exclusivefullscreen);
VulkanDevice* device = nullptr;
bool lost = true;
VkExtent2D actualExtent = {};
VkSwapchainKHR swapchain = VK_NULL_HANDLE;
VkSurfaceFormatKHR format = {};
VkPresentModeKHR presentMode;
std::vector<std::unique_ptr<VulkanImage>> images;
std::vector<std::unique_ptr<VulkanImageView>> views;
VulkanSwapChain(const VulkanSwapChain&) = delete;
VulkanSwapChain& operator=(const VulkanSwapChain&) = delete;
};

File diff suppressed because it is too large Load diff

View file

@ -2,6 +2,7 @@
// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
// Copyright (C) 2012-2013 LunarG, Inc.
// Copyright (C) 2017 ARM Limited.
// Modifications Copyright (C) 2020 Advanced Micro Devices, Inc. All rights reserved.
//
// All rights reserved.
//
@ -61,8 +62,13 @@ enum TBasicType {
EbtSampler,
EbtStruct,
EbtBlock,
EbtAccStructNV,
EbtAccStruct,
EbtReference,
EbtRayQuery,
#ifndef GLSLANG_WEB
// SPIR-V type defined by spirv_type
EbtSpirvType,
#endif
// HLSL types that live only temporarily.
EbtString,
@ -89,12 +95,15 @@ enum TStorageQualifier {
EvqUniform, // read only, shared with app
EvqBuffer, // read/write, shared with app
EvqShared, // compute shader's read/write 'shared' qualifier
#ifndef GLSLANG_WEB
EvqSpirvStorageClass, // spirv_storage_class
#endif
EvqPayloadNV,
EvqPayloadInNV,
EvqHitAttrNV,
EvqCallableDataNV,
EvqCallableDataInNV,
EvqPayload,
EvqPayloadIn,
EvqHitAttr,
EvqCallableData,
EvqCallableDataIn,
// parameters
EvqIn, // also, for 'in' in the grammar before we know if it's a pipeline input or an 'in' parameter
@ -226,9 +235,15 @@ enum TBuiltInVariable {
EbvViewIndex,
EbvDeviceIndex,
EbvShadingRateKHR,
EbvPrimitiveShadingRateKHR,
EbvFragSizeEXT,
EbvFragInvocationCountEXT,
EbvSecondaryFragDataEXT,
EbvSecondaryFragColorEXT,
EbvViewportMaskNV,
EbvSecondaryPositionNV,
EbvSecondaryViewportMaskNV,
@ -238,20 +253,24 @@ enum TBuiltInVariable {
EbvFragmentSizeNV,
EbvInvocationsPerPixelNV,
// ray tracing
EbvLaunchIdNV,
EbvLaunchSizeNV,
EbvInstanceCustomIndexNV,
EbvWorldRayOriginNV,
EbvWorldRayDirectionNV,
EbvObjectRayOriginNV,
EbvObjectRayDirectionNV,
EbvRayTminNV,
EbvRayTmaxNV,
EbvHitTNV,
EbvHitKindNV,
EbvObjectToWorldNV,
EbvWorldToObjectNV,
EbvIncomingRayFlagsNV,
EbvLaunchId,
EbvLaunchSize,
EbvInstanceCustomIndex,
EbvGeometryIndex,
EbvWorldRayOrigin,
EbvWorldRayDirection,
EbvObjectRayOrigin,
EbvObjectRayDirection,
EbvRayTmin,
EbvRayTmax,
EbvHitT,
EbvHitKind,
EbvObjectToWorld,
EbvObjectToWorld3x4,
EbvWorldToObject,
EbvWorldToObject3x4,
EbvIncomingRayFlags,
EbvCurrentRayTimeNV,
// barycentrics
EbvBaryCoordNV,
EbvBaryCoordNoPerspNV,
@ -310,6 +329,9 @@ __inline const char* GetStorageQualifierString(TStorageQualifier q)
case EvqGlobal: return "global"; break;
case EvqConst: return "const"; break;
case EvqConstReadOnly: return "const (read only)"; break;
#ifndef GLSLANG_WEB
case EvqSpirvStorageClass: return "spirv_storage_class"; break;
#endif
case EvqVaryingIn: return "in"; break;
case EvqVaryingOut: return "out"; break;
case EvqUniform: return "uniform"; break;
@ -328,11 +350,11 @@ __inline const char* GetStorageQualifierString(TStorageQualifier q)
case EvqPointCoord: return "gl_PointCoord"; break;
case EvqFragColor: return "fragColor"; break;
case EvqFragDepth: return "gl_FragDepth"; break;
case EvqPayloadNV: return "rayPayloadNV"; break;
case EvqPayloadInNV: return "rayPayloadInNV"; break;
case EvqHitAttrNV: return "hitAttributeNV"; break;
case EvqCallableDataNV: return "callableDataNV"; break;
case EvqCallableDataInNV: return "callableDataInNV"; break;
case EvqPayload: return "rayPayloadNV"; break;
case EvqPayloadIn: return "rayPayloadInNV"; break;
case EvqHitAttr: return "hitAttributeNV"; break;
case EvqCallableData: return "callableDataNV"; break;
case EvqCallableDataIn: return "callableDataInNV"; break;
default: return "unknown qualifier";
}
}
@ -428,6 +450,9 @@ __inline const char* GetBuiltInVariableString(TBuiltInVariable v)
case EbvFragSizeEXT: return "FragSizeEXT";
case EbvFragInvocationCountEXT: return "FragInvocationCountEXT";
case EbvSecondaryFragDataEXT: return "SecondaryFragDataEXT";
case EbvSecondaryFragColorEXT: return "SecondaryFragColorEXT";
case EbvViewportMaskNV: return "ViewportMaskNV";
case EbvSecondaryPositionNV: return "SecondaryPositionNV";
case EbvSecondaryViewportMaskNV: return "SecondaryViewportMaskNV";
@ -436,20 +461,22 @@ __inline const char* GetBuiltInVariableString(TBuiltInVariable v)
case EbvFragFullyCoveredNV: return "FragFullyCoveredNV";
case EbvFragmentSizeNV: return "FragmentSizeNV";
case EbvInvocationsPerPixelNV: return "InvocationsPerPixelNV";
case EbvLaunchIdNV: return "LaunchIdNV";
case EbvLaunchSizeNV: return "LaunchSizeNV";
case EbvInstanceCustomIndexNV: return "InstanceCustomIndexNV";
case EbvWorldRayOriginNV: return "WorldRayOriginNV";
case EbvWorldRayDirectionNV: return "WorldRayDirectionNV";
case EbvObjectRayOriginNV: return "ObjectRayOriginNV";
case EbvObjectRayDirectionNV: return "ObjectRayDirectionNV";
case EbvRayTminNV: return "ObjectRayTminNV";
case EbvRayTmaxNV: return "ObjectRayTmaxNV";
case EbvHitTNV: return "HitTNV";
case EbvHitKindNV: return "HitKindNV";
case EbvIncomingRayFlagsNV: return "IncomingRayFlagsNV";
case EbvObjectToWorldNV: return "ObjectToWorldNV";
case EbvWorldToObjectNV: return "WorldToObjectNV";
case EbvLaunchId: return "LaunchIdNV";
case EbvLaunchSize: return "LaunchSizeNV";
case EbvInstanceCustomIndex: return "InstanceCustomIndexNV";
case EbvGeometryIndex: return "GeometryIndexEXT";
case EbvWorldRayOrigin: return "WorldRayOriginNV";
case EbvWorldRayDirection: return "WorldRayDirectionNV";
case EbvObjectRayOrigin: return "ObjectRayOriginNV";
case EbvObjectRayDirection: return "ObjectRayDirectionNV";
case EbvRayTmin: return "ObjectRayTminNV";
case EbvRayTmax: return "ObjectRayTmaxNV";
case EbvHitT: return "HitTNV";
case EbvHitKind: return "HitKindNV";
case EbvIncomingRayFlags: return "IncomingRayFlagsNV";
case EbvObjectToWorld: return "ObjectToWorldNV";
case EbvWorldToObject: return "WorldToObjectNV";
case EbvCurrentRayTimeNV: return "CurrentRayTimeNV";
case EbvBaryCoordNV: return "BaryCoordNV";
case EbvBaryCoordNoPerspNV: return "BaryCoordNoPerspNV";
@ -468,6 +495,9 @@ __inline const char* GetBuiltInVariableString(TBuiltInVariable v)
case EbvWarpID: return "WarpIDNV";
case EbvSMID: return "SMIDNV";
case EbvShadingRateKHR: return "ShadingRateKHR";
case EbvPrimitiveShadingRateKHR: return "PrimitiveShadingRateKHR";
default: return "unknown built-in variable";
}
}

View file

@ -37,6 +37,17 @@
#ifndef _COMMON_INCLUDED_
#define _COMMON_INCLUDED_
#include <algorithm>
#include <cassert>
#include <cstdio>
#include <cstdlib>
#include <list>
#include <map>
#include <set>
#include <string>
#include <unordered_map>
#include <unordered_set>
#include <vector>
#if defined(__ANDROID__) || (defined(_MSC_VER) && _MSC_VER < 1700)
#include <sstream>
@ -94,18 +105,6 @@ std::string to_string(const T& val) {
#pragma warning(disable : 4201) // nameless union
#endif
#include <set>
#include <unordered_set>
#include <vector>
#include <map>
#include <unordered_map>
#include <list>
#include <algorithm>
#include <string>
#include <cstdio>
#include <cstdlib>
#include <cassert>
#include "PoolAlloc.h"
//
@ -196,6 +195,10 @@ template <class K, class D, class HASH = std::hash<K>, class PRED = std::equal_t
class TUnorderedMap : public std::unordered_map<K, D, HASH, PRED, pool_allocator<std::pair<K const, D> > > {
};
template <class K, class CMP = std::less<K> >
class TSet : public std::set<K, CMP, pool_allocator<K> > {
};
//
// Persistent string memory. Should only be used for strings that survive
// across compiles/links.
@ -288,6 +291,18 @@ template <class T> bool IsMultipleOfPow2(T number, int powerOf2)
return ! (number & (powerOf2 - 1));
}
// Returns log2 of an integer power of 2.
// T should be integral.
template <class T> int IntLog2(T n)
{
assert(IsPow2(n));
int result = 0;
while ((T(1) << result) != n) {
result++;
}
return result;
}
} // end namespace glslang
#endif // _COMMON_INCLUDED_

View file

@ -921,7 +921,7 @@ public:
else
unionArray = new TConstUnionVector(size);
}
TConstUnionArray(const TConstUnionArray& a) : unionArray(a.unionArray) { }
TConstUnionArray(const TConstUnionArray& a) = default;
TConstUnionArray(const TConstUnionArray& a, int start, int size)
{
unionArray = new TConstUnionVector(size);

View file

@ -142,6 +142,7 @@ struct TBuiltInResource {
int maxTaskWorkGroupSizeY_NV;
int maxTaskWorkGroupSizeZ_NV;
int maxMeshViewCountNV;
int maxDualSourceDrawBuffersEXT;
TLimits limits;
};

View file

@ -0,0 +1,136 @@
//
// Copyright(C) 2021 Advanced Micro Devices, Inc.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
#pragma once
#ifndef GLSLANG_WEB
//
// GL_EXT_spirv_intrinsics
//
#include "Common.h"
namespace glslang {
class TIntermTyped;
class TIntermConstantUnion;
class TType;
// SPIR-V requirements
struct TSpirvRequirement {
POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator())
// capability = [..]
TSet<TString> extensions;
// extension = [..]
TSet<int> capabilities;
};
// SPIR-V execution modes
struct TSpirvExecutionMode {
POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator())
// spirv_execution_mode
TMap<int, TVector<const TIntermConstantUnion*>> modes;
// spirv_execution_mode_id
TMap<int, TVector<const TIntermConstantUnion*> > modeIds;
};
// SPIR-V decorations
struct TSpirvDecorate {
POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator())
// spirv_decorate
TMap<int, TVector<const TIntermConstantUnion*> > decorates;
// spirv_decorate_id
TMap<int, TVector<const TIntermConstantUnion*> > decorateIds;
// spirv_decorate_string
TMap<int, TVector<const TIntermConstantUnion*> > decorateStrings;
};
// SPIR-V instruction
struct TSpirvInstruction {
POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator())
TSpirvInstruction() { set = ""; id = -1; }
bool operator==(const TSpirvInstruction& rhs) const { return set == rhs.set && id == rhs.id; }
bool operator!=(const TSpirvInstruction& rhs) const { return !operator==(rhs); }
// spirv_instruction
TString set;
int id;
};
// SPIR-V type parameter
struct TSpirvTypeParameter {
POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator())
TSpirvTypeParameter(const TIntermConstantUnion* arg) { isConstant = true; constant = arg; }
TSpirvTypeParameter(const TType* arg) { isConstant = false; type = arg; }
bool operator==(const TSpirvTypeParameter& rhs) const
{
return isConstant == rhs.isConstant && ((isConstant && constant == rhs.constant) || (!isConstant && type == rhs.type));
}
bool operator!=(const TSpirvTypeParameter& rhs) const { return !operator==(rhs); }
bool isConstant;
union {
const TIntermConstantUnion* constant;
const TType* type;
};
};
typedef TVector<TSpirvTypeParameter> TSpirvTypeParameters;
// SPIR-V type
struct TSpirvType {
POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator())
bool operator==(const TSpirvType& rhs) const
{
return spirvInst == rhs.spirvInst && typeParams == rhs.typeParams;
}
bool operator!=(const TSpirvType& rhs) const { return !operator==(rhs); }
// spirv_type
TSpirvInstruction spirvInst;
TSpirvTypeParameters typeParams;
};
} // end namespace glslang
#endif // GLSLANG_WEB

View file

@ -3,6 +3,7 @@
// Copyright (C) 2012-2016 LunarG, Inc.
// Copyright (C) 2015-2016 Google, Inc.
// Copyright (C) 2017 ARM Limited.
// Modifications Copyright (C) 2020 Advanced Micro Devices, Inc. All rights reserved.
//
// All rights reserved.
//
@ -43,11 +44,14 @@
#include "../Include/BaseTypes.h"
#include "../Public/ShaderLang.h"
#include "arrays.h"
#include "SpirvIntrinsics.h"
#include <algorithm>
namespace glslang {
class TIntermAggregate;
const int GlslangMaxTypeLength = 200; // TODO: need to print block/struct one member per line, so this can stay bounded
const char* const AnonymousPrefix = "anon@"; // for something like a block whose members can be directly accessed
@ -114,6 +118,7 @@ struct TSampler { // misnomer now; includes images, textures without sampler,
#endif
bool is1D() const { return dim == Esd1D; }
bool is2D() const { return dim == Esd2D; }
bool isBuffer() const { return dim == EsdBuffer; }
bool isRect() const { return dim == EsdRect; }
bool isSubpass() const { return dim == EsdSubpass; }
@ -405,6 +410,7 @@ enum TLayoutFormat {
ElfRg8i,
ElfR16i,
ElfR8i,
ElfR64i,
ElfIntGuard, // to help with comparisons
@ -422,6 +428,7 @@ enum TLayoutFormat {
ElfRg8ui,
ElfR16ui,
ElfR8ui,
ElfR64ui,
ElfCount
};
@ -472,6 +479,17 @@ enum TInterlockOrdering {
EioCount,
};
enum TShaderInterface
{
// Includes both uniform blocks and buffer blocks
EsiUniform = 0,
EsiInput,
EsiOutput,
EsiNone,
EsiCount
};
class TQualifier {
public:
static const int layoutNotSet = -1;
@ -484,7 +502,11 @@ public:
declaredBuiltIn = EbvNone;
#ifndef GLSLANG_WEB
noContraction = false;
nullInit = false;
spirvByReference = false;
spirvLiteral = false;
#endif
defaultBlock = false;
}
// drop qualifiers that don't belong in a temporary variable
@ -497,7 +519,15 @@ public:
clearMemory();
specConstant = false;
nonUniform = false;
nullInit = false;
defaultBlock = false;
clearLayout();
#ifndef GLSLANG_WEB
spirvStorageClass = -1;
spirvDecorate = nullptr;
spirvByReference = false;
spirvLiteral = false;
#endif
}
void clearInterstage()
@ -532,6 +562,7 @@ public:
queuefamilycoherent = false;
workgroupcoherent = false;
subgroupcoherent = false;
shadercallcoherent = false;
nonprivate = false;
volatil = false;
restrict = false;
@ -553,6 +584,8 @@ public:
// having a constant_id is not sufficient: expressions have no id, but are still specConstant
bool specConstant : 1;
bool nonUniform : 1;
bool explicitOffset : 1;
bool defaultBlock : 1; // default blocks with matching names have structures merged when linking
#ifdef GLSLANG_WEB
bool isWriteOnly() const { return false; }
@ -571,6 +604,12 @@ public:
bool isNoContraction() const { return false; }
void setNoContraction() { }
bool isPervertexNV() const { return false; }
void setNullInit() { }
bool isNullInit() const { return false; }
void setSpirvByReference() { }
bool isSpirvByReference() { return false; }
void setSpirvLiteral() { }
bool isSpirvLiteral() { return false; }
#else
bool noContraction: 1; // prevent contraction and reassociation, e.g., for 'precise' keyword, and expressions it affects
bool nopersp : 1;
@ -590,7 +629,11 @@ public:
bool queuefamilycoherent : 1;
bool workgroupcoherent : 1;
bool subgroupcoherent : 1;
bool shadercallcoherent : 1;
bool nonprivate : 1;
bool nullInit : 1;
bool spirvByReference : 1;
bool spirvLiteral : 1;
bool isWriteOnly() const { return writeonly; }
bool isReadOnly() const { return readonly; }
bool isRestrict() const { return restrict; }
@ -599,11 +642,11 @@ public:
bool isSample() const { return sample; }
bool isMemory() const
{
return subgroupcoherent || workgroupcoherent || queuefamilycoherent || devicecoherent || coherent || volatil || restrict || readonly || writeonly || nonprivate;
return shadercallcoherent || subgroupcoherent || workgroupcoherent || queuefamilycoherent || devicecoherent || coherent || volatil || restrict || readonly || writeonly || nonprivate;
}
bool isMemoryQualifierImageAndSSBOOnly() const
{
return subgroupcoherent || workgroupcoherent || queuefamilycoherent || devicecoherent || coherent || volatil || restrict || readonly || writeonly;
return shadercallcoherent || subgroupcoherent || workgroupcoherent || queuefamilycoherent || devicecoherent || coherent || volatil || restrict || readonly || writeonly;
}
bool bufferReferenceNeedsVulkanMemoryModel() const
{
@ -626,6 +669,12 @@ public:
bool isNoContraction() const { return noContraction; }
void setNoContraction() { noContraction = true; }
bool isPervertexNV() const { return pervertexNV; }
void setNullInit() { nullInit = true; }
bool isNullInit() const { return nullInit; }
void setSpirvByReference() { spirvByReference = true; }
bool isSpirvByReference() const { return spirvByReference; }
void setSpirvLiteral() { spirvLiteral = true; }
bool isSpirvLiteral() const { return spirvLiteral; }
#endif
bool isPipeInput() const
@ -731,6 +780,46 @@ public:
}
}
TBlockStorageClass getBlockStorage() const {
if (storage == EvqUniform && !isPushConstant()) {
return EbsUniform;
}
else if (storage == EvqUniform) {
return EbsPushConstant;
}
else if (storage == EvqBuffer) {
return EbsStorageBuffer;
}
return EbsNone;
}
void setBlockStorage(TBlockStorageClass newBacking) {
#ifndef GLSLANG_WEB
layoutPushConstant = (newBacking == EbsPushConstant);
#endif
switch (newBacking) {
case EbsUniform :
if (layoutPacking == ElpStd430) {
// std430 would not be valid
layoutPacking = ElpStd140;
}
storage = EvqUniform;
break;
case EbsStorageBuffer :
storage = EvqBuffer;
break;
#ifndef GLSLANG_WEB
case EbsPushConstant :
storage = EvqUniform;
layoutSet = TQualifier::layoutSetEnd;
layoutBinding = TQualifier::layoutBindingEnd;
break;
#endif
default:
break;
}
}
#ifdef GLSLANG_WEB
bool isPerView() const { return false; }
bool isTaskMemory() const { return false; }
@ -739,6 +828,12 @@ public:
bool isPerPrimitive() const { return perPrimitiveNV; }
bool isPerView() const { return perViewNV; }
bool isTaskMemory() const { return perTaskNV; }
bool isAnyPayload() const {
return storage == EvqPayload || storage == EvqPayloadIn;
}
bool isAnyCallable() const {
return storage == EvqCallableData || storage == EvqCallableDataIn;
}
// True if this type of IO is supposed to be arrayed with extra level for per-vertex data
bool isArrayedIo(EShLanguage language) const
@ -773,7 +868,7 @@ public:
layoutViewportRelative = false;
// -2048 as the default value indicating layoutSecondaryViewportRelative is not set
layoutSecondaryViewportRelativeOffset = -2048;
layoutShaderRecordNV = false;
layoutShaderRecord = false;
layoutBufferReferenceAlign = layoutBufferReferenceAlignEnd;
layoutFormat = ElfNone;
#endif
@ -812,7 +907,7 @@ public:
hasAnyLocation() ||
hasStream() ||
hasFormat() ||
isShaderRecordNV() ||
isShaderRecord() ||
isPushConstant() ||
hasBufferReference();
}
@ -821,6 +916,7 @@ public:
return hasNonXfbLayout() ||
hasXfb();
}
TLayoutMatrix layoutMatrix : 3;
TLayoutPacking layoutPacking : 4;
int layoutOffset;
@ -871,7 +967,11 @@ public:
bool layoutPassthrough;
bool layoutViewportRelative;
int layoutSecondaryViewportRelativeOffset;
bool layoutShaderRecordNV;
bool layoutShaderRecord;
// GL_EXT_spirv_intrinsics
int spirvStorageClass;
TSpirvDecorate* spirvDecorate;
#endif
bool hasUniformLayout() const
@ -942,7 +1042,7 @@ public:
bool hasAttachment() const { return false; }
TLayoutFormat getFormat() const { return ElfNone; }
bool isPushConstant() const { return false; }
bool isShaderRecordNV() const { return false; }
bool isShaderRecord() const { return false; }
bool hasBufferReference() const { return false; }
bool hasBufferReferenceAlign() const { return false; }
bool isNonUniform() const { return false; }
@ -993,7 +1093,7 @@ public:
}
TLayoutFormat getFormat() const { return layoutFormat; }
bool isPushConstant() const { return layoutPushConstant; }
bool isShaderRecordNV() const { return layoutShaderRecordNV; }
bool isShaderRecord() const { return layoutShaderRecord; }
bool hasBufferReference() const { return layoutBufferReference; }
bool hasBufferReferenceAlign() const
{
@ -1003,6 +1103,15 @@ public:
{
return nonUniform;
}
// GL_EXT_spirv_intrinsics
bool hasSprivDecorate() const { return spirvDecorate != nullptr; }
void setSpirvDecorate(int decoration, const TIntermAggregate* args = nullptr);
void setSpirvDecorateId(int decoration, const TIntermAggregate* args);
void setSpirvDecorateString(int decoration, const TIntermAggregate* args);
const TSpirvDecorate& getSpirvDecorate() const { assert(spirvDecorate); return *spirvDecorate; }
TSpirvDecorate& getSpirvDecorate() { assert(spirvDecorate); return *spirvDecorate; }
TString getSpirvDecorateQualifierString() const;
#endif
bool hasSpecConstantId() const
{
@ -1101,6 +1210,8 @@ public:
case ElfR32ui: return "r32ui";
case ElfR16ui: return "r16ui";
case ElfR8ui: return "r8ui";
case ElfR64ui: return "r64ui";
case ElfR64i: return "r64i";
default: return "none";
}
}
@ -1219,6 +1330,7 @@ struct TShaderQualifiers {
bool layoutDerivativeGroupQuads; // true if layout derivative_group_quadsNV set
bool layoutDerivativeGroupLinear; // true if layout derivative_group_linearNV set
int primitives; // mesh shader "max_primitives"DerivativeGroupLinear; // true if layout derivative_group_linearNV set
bool layoutPrimitiveCulling; // true if layout primitive_culling set
TLayoutDepth getDepth() const { return layoutDepth; }
#else
TLayoutDepth getDepth() const { return EldNone; }
@ -1252,6 +1364,7 @@ struct TShaderQualifiers {
layoutOverrideCoverage = false;
layoutDerivativeGroupQuads = false;
layoutDerivativeGroupLinear = false;
layoutPrimitiveCulling = false;
primitives = TQualifier::layoutNotSet;
interlockOrdering = EioNone;
#endif
@ -1315,6 +1428,8 @@ struct TShaderQualifiers {
primitives = src.primitives;
if (src.interlockOrdering != EioNone)
interlockOrdering = src.interlockOrdering;
if (src.layoutPrimitiveCulling)
layoutPrimitiveCulling = src.layoutPrimitiveCulling;
#endif
}
};
@ -1341,6 +1456,10 @@ public:
const TType* userDef;
TSourceLoc loc;
TArraySizes* typeParameters;
#ifndef GLSLANG_WEB
// SPIR-V type defined by spirv_type directive
TSpirvType* spirvType;
#endif
#ifdef GLSLANG_WEB
bool isCoopmat() const { return false; }
@ -1359,6 +1478,9 @@ public:
loc = l;
typeParameters = nullptr;
coopmat = false;
#ifndef GLSLANG_WEB
spirvType = nullptr;
#endif
}
void initQualifiers(bool global = false)
@ -1395,6 +1517,11 @@ public:
return matrixCols == 0 && vectorSize == 1 && arraySizes == nullptr && userDef == nullptr;
}
#ifndef GLSLANG_WEB
// GL_EXT_spirv_intrinsics
void setSpirvType(const TSpirvInstruction& spirvInst, const TSpirvTypeParameters* typeParams = nullptr);
#endif
// "Image" is a superset of "Subpass"
bool isImage() const { return basicType == EbtSampler && sampler.isImage(); }
bool isSubpass() const { return basicType == EbtSampler && sampler.isSubpass(); }
@ -1412,6 +1539,9 @@ public:
bool isVector = false) :
basicType(t), vectorSize(vs), matrixCols(mc), matrixRows(mr), vector1(isVector && vs == 1), coopmat(false),
arraySizes(nullptr), structure(nullptr), fieldName(nullptr), typeName(nullptr), typeParameters(nullptr)
#ifndef GLSLANG_WEB
, spirvType(nullptr)
#endif
{
sampler.clear();
qualifier.clear();
@ -1423,6 +1553,9 @@ public:
bool isVector = false) :
basicType(t), vectorSize(vs), matrixCols(mc), matrixRows(mr), vector1(isVector && vs == 1), coopmat(false),
arraySizes(nullptr), structure(nullptr), fieldName(nullptr), typeName(nullptr), typeParameters(nullptr)
#ifndef GLSLANG_WEB
, spirvType(nullptr)
#endif
{
sampler.clear();
qualifier.clear();
@ -1436,6 +1569,9 @@ public:
basicType(p.basicType),
vectorSize(p.vectorSize), matrixCols(p.matrixCols), matrixRows(p.matrixRows), vector1(false), coopmat(p.coopmat),
arraySizes(p.arraySizes), structure(nullptr), fieldName(nullptr), typeName(nullptr), typeParameters(p.typeParameters)
#ifndef GLSLANG_WEB
, spirvType(p.spirvType)
#endif
{
if (basicType == EbtSampler)
sampler = p.sampler;
@ -1470,6 +1606,9 @@ public:
basicType(EbtSampler), vectorSize(1), matrixCols(0), matrixRows(0), vector1(false), coopmat(false),
arraySizes(as), structure(nullptr), fieldName(nullptr), typeName(nullptr),
sampler(sampler), typeParameters(nullptr)
#ifndef GLSLANG_WEB
, spirvType(nullptr)
#endif
{
qualifier.clear();
qualifier.storage = q;
@ -1520,6 +1659,9 @@ public:
TType(TTypeList* userDef, const TString& n) :
basicType(EbtStruct), vectorSize(1), matrixCols(0), matrixRows(0), vector1(false), coopmat(false),
arraySizes(nullptr), structure(userDef), fieldName(nullptr), typeParameters(nullptr)
#ifndef GLSLANG_WEB
, spirvType(nullptr)
#endif
{
sampler.clear();
qualifier.clear();
@ -1529,6 +1671,9 @@ public:
TType(TTypeList* userDef, const TString& n, const TQualifier& q) :
basicType(EbtBlock), vectorSize(1), matrixCols(0), matrixRows(0), vector1(false), coopmat(false),
qualifier(q), arraySizes(nullptr), structure(userDef), fieldName(nullptr), typeParameters(nullptr)
#ifndef GLSLANG_WEB
, spirvType(nullptr)
#endif
{
sampler.clear();
typeName = NewPoolTString(n.c_str());
@ -1537,6 +1682,9 @@ public:
explicit TType(TBasicType t, const TType &p, const TString& n) :
basicType(t), vectorSize(1), matrixCols(0), matrixRows(0), vector1(false),
arraySizes(nullptr), structure(nullptr), fieldName(nullptr), typeName(nullptr)
#ifndef GLSLANG_WEB
, spirvType(nullptr)
#endif
{
assert(t == EbtReference);
typeName = NewPoolTString(n.c_str());
@ -1567,6 +1715,9 @@ public:
referentType = copyOf.referentType;
}
typeParameters = copyOf.typeParameters;
#ifndef GLSLANG_WEB
spirvType = copyOf.spirvType;
#endif
coopmat = copyOf.isCoopMat();
}
@ -1612,6 +1763,23 @@ public:
assert(fieldName);
return *fieldName;
}
TShaderInterface getShaderInterface() const
{
if (basicType != EbtBlock)
return EsiNone;
switch (qualifier.storage) {
default:
return EsiNone;
case EvqVaryingIn:
return EsiInput;
case EvqVaryingOut:
return EsiOutput;
case EvqUniform:
case EvqBuffer:
return EsiUniform;
}
}
virtual TBasicType getBasicType() const { return basicType; }
virtual const TSampler& getSampler() const { return sampler; }
@ -1640,6 +1808,7 @@ public:
virtual bool isScalar() const { return ! isVector() && ! isMatrix() && ! isStruct() && ! isArray(); }
virtual bool isScalarOrVec1() const { return isScalar() || vector1; }
virtual bool isScalarOrVector() const { return !isMatrix() && !isStruct() && !isArray(); }
virtual bool isVector() const { return vectorSize > 1 || vector1; }
virtual bool isMatrix() const { return matrixCols ? true : false; }
virtual bool isArray() const { return arraySizes != nullptr; }
@ -1670,7 +1839,7 @@ public:
}
virtual bool isOpaque() const { return basicType == EbtSampler
#ifndef GLSLANG_WEB
|| basicType == EbtAtomicUint || basicType == EbtAccStructNV
|| basicType == EbtAtomicUint || basicType == EbtAccStruct || basicType == EbtRayQuery
#endif
; }
virtual bool isBuiltIn() const { return getQualifier().builtIn != EbvNone; }
@ -1918,8 +2087,6 @@ public:
}
}
const char* getBasicString() const
{
return TType::getBasicString(basicType);
@ -1946,8 +2113,11 @@ public:
case EbtAtomicUint: return "atomic_uint";
case EbtStruct: return "structure";
case EbtBlock: return "block";
case EbtAccStructNV: return "accelerationStructureNV";
case EbtAccStruct: return "accelerationStructureNV";
case EbtRayQuery: return "rayQueryEXT";
case EbtReference: return "reference";
case EbtString: return "string";
case EbtSpirvType: return "spirv_type";
#endif
default: return "unknown type";
}
@ -1968,6 +2138,9 @@ public:
const auto appendUint = [&](unsigned int u) { typeString.append(std::to_string(u).c_str()); };
const auto appendInt = [&](int i) { typeString.append(std::to_string(i).c_str()); };
if (qualifier.hasSprivDecorate())
appendStr(qualifier.getSpirvDecorateQualifierString().c_str());
if (qualifier.hasLayout()) {
// To reduce noise, skip this if the only layout is an xfb_buffer
// with no triggering xfb_offset.
@ -2056,7 +2229,7 @@ public:
appendStr(" layoutSecondaryViewportRelativeOffset=");
appendInt(qualifier.layoutSecondaryViewportRelativeOffset);
}
if (qualifier.layoutShaderRecordNV)
if (qualifier.layoutShaderRecord)
appendStr(" shaderRecordNV");
appendStr(")");
@ -2099,6 +2272,8 @@ public:
appendStr(" workgroupcoherent");
if (qualifier.subgroupcoherent)
appendStr(" subgroupcoherent");
if (qualifier.shadercallcoherent)
appendStr(" shadercallcoherent");
if (qualifier.nonprivate)
appendStr(" nonprivate");
if (qualifier.volatil)
@ -2113,6 +2288,12 @@ public:
appendStr(" specialization-constant");
if (qualifier.nonUniform)
appendStr(" nonuniform");
if (qualifier.isNullInit())
appendStr(" null-init");
if (qualifier.isSpirvByReference())
appendStr(" spirv_by_reference");
if (qualifier.isSpirvLiteral())
appendStr(" spirv_literal");
appendStr(" ");
appendStr(getStorageQualifierString());
if (isArray()) {
@ -2232,6 +2413,17 @@ public:
name += ';' ;
}
// These variables are inconsistently declared inside and outside of gl_PerVertex in glslang right now.
// They are declared inside of 'in gl_PerVertex', but sitting as standalone when they are 'out'puts.
bool isInconsistentGLPerVertexMember(const TString& name) const
{
if (name == "gl_SecondaryPositionNV" ||
name == "gl_PositionPerViewNV")
return true;
return false;
}
// Do two structure types match? They could be declared independently,
// in different places, but still might satisfy the definition of matching.
// From the spec:
@ -2247,22 +2439,48 @@ public:
(isStruct() && right.isStruct() && structure == right.structure))
return true;
// Both being nullptr was caught above, now they both have to be structures of the same number of elements
if (!isStruct() || !right.isStruct() ||
structure->size() != right.structure->size())
return false;
// Structure names have to match
if (*typeName != *right.typeName)
return false;
// Compare the names and types of all the members, which have to match
for (unsigned int i = 0; i < structure->size(); ++i) {
if ((*structure)[i].type->getFieldName() != (*right.structure)[i].type->getFieldName())
return false;
// There are inconsistencies with how gl_PerVertex is setup. For now ignore those as errors if they
// are known inconsistencies.
bool isGLPerVertex = *typeName == "gl_PerVertex";
if (*(*structure)[i].type != *(*right.structure)[i].type)
return false;
// Both being nullptr was caught above, now they both have to be structures of the same number of elements
if (!isStruct() || !right.isStruct() ||
(structure->size() != right.structure->size() && !isGLPerVertex))
return false;
// Compare the names and types of all the members, which have to match
for (size_t li = 0, ri = 0; li < structure->size() || ri < right.structure->size(); ++li, ++ri) {
if (li < structure->size() && ri < right.structure->size()) {
if ((*structure)[li].type->getFieldName() == (*right.structure)[ri].type->getFieldName()) {
if (*(*structure)[li].type != *(*right.structure)[ri].type)
return false;
} else {
// If one of the members is something that's inconsistently declared, skip over it
// for now.
if (isGLPerVertex) {
if (isInconsistentGLPerVertexMember((*structure)[li].type->getFieldName())) {
ri--;
continue;
} else if (isInconsistentGLPerVertexMember((*right.structure)[ri].type->getFieldName())) {
li--;
continue;
}
} else {
return false;
}
}
// If we get here, then there should only be inconsistently declared members left
} else if (li < structure->size()) {
if (!isInconsistentGLPerVertexMember((*structure)[li].type->getFieldName()))
return false;
} else {
if (!isInconsistentGLPerVertexMember((*right.structure)[ri].type->getFieldName()))
return false;
}
}
return true;
@ -2312,6 +2530,15 @@ public:
(typeParameters != nullptr && right.typeParameters != nullptr && *typeParameters == *right.typeParameters));
}
#ifndef GLSLANG_WEB
// See if two type's SPIR-V type contents match
bool sameSpirvType(const TType& right) const
{
return ((spirvType == nullptr && right.spirvType == nullptr) ||
(spirvType != nullptr && right.spirvType != nullptr && *spirvType == *right.spirvType));
}
#endif
// See if two type's elements match in all ways except basic type
bool sameElementShape(const TType& right) const
{
@ -2350,7 +2577,11 @@ public:
// See if two types match in all ways (just the actual type, not qualification)
bool operator==(const TType& right) const
{
#ifndef GLSLANG_WEB
return sameElementType(right) && sameArrayness(right) && sameTypeParameters(right) && sameSpirvType(right);
#else
return sameElementType(right) && sameArrayness(right) && sameTypeParameters(right);
#endif
}
bool operator!=(const TType& right) const
@ -2369,6 +2600,10 @@ public:
return 0;
}
#ifndef GLSLANG_WEB
const TSpirvType& getSpirvType() const { assert(spirvType); return *spirvType; }
#endif
protected:
// Require consumer to pick between deep copy and shallow copy.
TType(const TType& type);
@ -2381,6 +2616,19 @@ protected:
{
shallowCopy(copyOf);
#ifndef GLSLANG_WEB
// GL_EXT_spirv_intrinsics
if (copyOf.qualifier.spirvDecorate) {
qualifier.spirvDecorate = new TSpirvDecorate;
*qualifier.spirvDecorate = *copyOf.qualifier.spirvDecorate;
}
if (copyOf.spirvType) {
spirvType = new TSpirvType;
*spirvType = *copyOf.spirvType;
}
#endif
if (copyOf.arraySizes) {
arraySizes = new TArraySizes;
*arraySizes = *copyOf.arraySizes;
@ -2440,6 +2688,9 @@ protected:
TString *typeName; // for structure type name
TSampler sampler;
TArraySizes* typeParameters;// nullptr unless a parameterized type; can be shared across types
#ifndef GLSLANG_WEB
TSpirvType* spirvType; // SPIR-V type defined by spirv_type directive
#endif
};
} // end namespace glslang

View file

@ -1,5 +1,5 @@
// Copyright (C) 2020 The Khronos Group Inc.
//
// Copyright (C) 2018 The Khronos Group Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
@ -14,7 +14,7 @@
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// Neither the name of The Khronos Group Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
@ -30,6 +30,33 @@
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
#include "pch.h"
#ifndef GLSLANG_BUILD_INFO
#define GLSLANG_BUILD_INFO
#define GLSLANG_VERSION_MAJOR 11
#define GLSLANG_VERSION_MINOR 6
#define GLSLANG_VERSION_PATCH 0
#define GLSLANG_VERSION_FLAVOR ""
#define GLSLANG_VERSION_GREATER_THAN(major, minor, patch) \
(((major) > GLSLANG_VERSION_MAJOR) || ((major) == GLSLANG_VERSION_MAJOR && \
(((minor) > GLSLANG_VERSION_MINOR) || ((minor) == GLSLANG_VERSION_MINOR && \
((patch) > GLSLANG_VERSION_PATCH)))))
#define GLSLANG_VERSION_GREATER_OR_EQUAL_TO(major, minor, patch) \
(((major) > GLSLANG_VERSION_MAJOR) || ((major) == GLSLANG_VERSION_MAJOR && \
(((minor) > GLSLANG_VERSION_MINOR) || ((minor) == GLSLANG_VERSION_MINOR && \
((patch) >= GLSLANG_VERSION_PATCH)))))
#define GLSLANG_VERSION_LESS_THAN(major, minor, patch) \
(((major) < GLSLANG_VERSION_MAJOR) || ((major) == GLSLANG_VERSION_MAJOR && \
(((minor) < GLSLANG_VERSION_MINOR) || ((minor) == GLSLANG_VERSION_MINOR && \
((patch) < GLSLANG_VERSION_PATCH)))))
#define GLSLANG_VERSION_LESS_OR_EQUAL_TO(major, minor, patch) \
(((major) < GLSLANG_VERSION_MAJOR) || ((major) == GLSLANG_VERSION_MAJOR && \
(((minor) < GLSLANG_VERSION_MINOR) || ((minor) == GLSLANG_VERSION_MINOR && \
((patch) <= GLSLANG_VERSION_PATCH)))))
#endif // GLSLANG_BUILD_INFO

View file

@ -2,6 +2,7 @@
// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
// Copyright (C) 2012-2016 LunarG, Inc.
// Copyright (C) 2017 ARM Limited.
// Modifications Copyright (C) 2020 Advanced Micro Devices, Inc. All rights reserved.
//
// All rights reserved.
//
@ -70,6 +71,9 @@ enum TOperator {
EOpFunctionCall,
EOpFunction, // For function definition
EOpParameters, // an aggregate listing the parameters to a function
#ifndef GLSLANG_WEB
EOpSpirvInst,
#endif
//
// Unary operators
@ -279,6 +283,12 @@ enum TOperator {
EOpConvUvec2ToPtr,
EOpConvPtrToUvec2,
// uint64_t -> accelerationStructureEXT
EOpConvUint64ToAccStruct,
// uvec2 -> accelerationStructureEXT
EOpConvUvec2ToAccStruct,
//
// binary operations
//
@ -586,6 +596,7 @@ enum TOperator {
EOpTime,
EOpAtomicAdd,
EOpAtomicSubtract,
EOpAtomicMin,
EOpAtomicMax,
EOpAtomicAnd,
@ -621,17 +632,22 @@ enum TOperator {
EOpIsHelperInvocation,
EOpDebugPrintf,
//
// Branch
//
EOpKill, // Fragment only
EOpKill, // Fragment only
EOpTerminateInvocation, // Fragment only
EOpDemote, // Fragment only
EOpTerminateRayKHR, // Any-hit only
EOpIgnoreIntersectionKHR, // Any-hit only
EOpReturn,
EOpBreak,
EOpContinue,
EOpCase,
EOpDefault,
EOpDemote, // Fragment only
//
// Constructors
@ -748,6 +764,7 @@ enum TOperator {
EOpConstructNonuniform, // expected to be transformed away, not present in final AST
EOpConstructReference,
EOpConstructCooperativeMatrix,
EOpConstructAccStruct,
EOpConstructGuardEnd,
//
@ -909,11 +926,43 @@ enum TOperator {
EOpMul32x16,
EOpTraceNV,
EOpReportIntersectionNV,
EOpTraceRayMotionNV,
EOpTraceKHR,
EOpReportIntersection,
EOpIgnoreIntersectionNV,
EOpTerminateRayNV,
EOpExecuteCallableNV,
EOpExecuteCallableKHR,
EOpWritePackedPrimitiveIndices4x8NV,
//
// GL_EXT_ray_query operations
//
EOpRayQueryInitialize,
EOpRayQueryTerminate,
EOpRayQueryGenerateIntersection,
EOpRayQueryConfirmIntersection,
EOpRayQueryProceed,
EOpRayQueryGetIntersectionType,
EOpRayQueryGetRayTMin,
EOpRayQueryGetRayFlags,
EOpRayQueryGetIntersectionT,
EOpRayQueryGetIntersectionInstanceCustomIndex,
EOpRayQueryGetIntersectionInstanceId,
EOpRayQueryGetIntersectionInstanceShaderBindingTableRecordOffset,
EOpRayQueryGetIntersectionGeometryIndex,
EOpRayQueryGetIntersectionPrimitiveIndex,
EOpRayQueryGetIntersectionBarycentrics,
EOpRayQueryGetIntersectionFrontFace,
EOpRayQueryGetIntersectionCandidateAABBOpaque,
EOpRayQueryGetIntersectionObjectRayDirection,
EOpRayQueryGetIntersectionObjectRayOrigin,
EOpRayQueryGetWorldRayDirection,
EOpRayQueryGetWorldRayOrigin,
EOpRayQueryGetIntersectionObjectToWorld,
EOpRayQueryGetIntersectionWorldToObject,
//
// HLSL operations
//
@ -1091,6 +1140,8 @@ public:
virtual TBasicType getBasicType() const { return type.getBasicType(); }
virtual TQualifier& getQualifier() { return type.getQualifier(); }
virtual const TQualifier& getQualifier() const { return type.getQualifier(); }
virtual TArraySizes* getArraySizes() { return type.getArraySizes(); }
virtual const TArraySizes* getArraySizes() const { return type.getArraySizes(); }
virtual void propagatePrecision(TPrecisionQualifier);
virtual int getVectorSize() const { return type.getVectorSize(); }
virtual int getMatrixCols() const { return type.getMatrixCols(); }
@ -1199,6 +1250,7 @@ public:
TOperator getFlowOp() const { return flowOp; }
TIntermTyped* getExpression() const { return expression; }
void setExpression(TIntermTyped* pExpression) { expression = pExpression; }
void updatePrecision(TPrecisionQualifier parentPrecision);
protected:
TOperator flowOp;
TIntermTyped* expression;
@ -1230,15 +1282,15 @@ public:
// if symbol is initialized as symbol(sym), the memory comes from the pool allocator of sym. If sym comes from
// per process threadPoolAllocator, then it causes increased memory usage per compile
// it is essential to use "symbol = sym" to assign to symbol
TIntermSymbol(int i, const TString& n, const TType& t)
TIntermSymbol(long long i, const TString& n, const TType& t)
: TIntermTyped(t), id(i),
#ifndef GLSLANG_WEB
flattenSubset(-1),
#endif
constSubtree(nullptr)
{ name = n; }
virtual int getId() const { return id; }
virtual void changeId(int i) { id = i; }
virtual long long getId() const { return id; }
virtual void changeId(long long i) { id = i; }
virtual const TString& getName() const { return name; }
virtual void traverse(TIntermTraverser*);
virtual TIntermSymbol* getAsSymbolNode() { return this; }
@ -1249,15 +1301,17 @@ public:
TIntermTyped* getConstSubtree() const { return constSubtree; }
#ifndef GLSLANG_WEB
void setFlattenSubset(int subset) { flattenSubset = subset; }
virtual const TString& getAccessName() const;
int getFlattenSubset() const { return flattenSubset; } // -1 means full object
#endif
// This is meant for cases where a node has already been constructed, and
// later on, it becomes necessary to switch to a different symbol.
virtual void switchId(int newId) { id = newId; }
virtual void switchId(long long newId) { id = newId; }
protected:
int id; // the unique id of the symbol this node represents
long long id; // the unique id of the symbol this node represents
#ifndef GLSLANG_WEB
int flattenSubset; // how deeply the flattened object rooted at id has been dereferenced
#endif
@ -1566,8 +1620,15 @@ public:
virtual TIntermUnary* getAsUnaryNode() { return this; }
virtual const TIntermUnary* getAsUnaryNode() const { return this; }
virtual void updatePrecision();
#ifndef GLSLANG_WEB
void setSpirvInstruction(const TSpirvInstruction& inst) { spirvInst = inst; }
const TSpirvInstruction& getSpirvInstruction() const { return spirvInst; }
#endif
protected:
TIntermTyped* operand;
#ifndef GLSLANG_WEB
TSpirvInstruction spirvInst;
#endif
};
typedef TVector<TIntermNode*> TIntermSequence;
@ -1598,6 +1659,10 @@ public:
bool getDebug() const { return debug; }
void setPragmaTable(const TPragmaTable& pTable);
const TPragmaTable& getPragmaTable() const { return *pragmaTable; }
#ifndef GLSLANG_WEB
void setSpirvInstruction(const TSpirvInstruction& inst) { spirvInst = inst; }
const TSpirvInstruction& getSpirvInstruction() const { return spirvInst; }
#endif
protected:
TIntermAggregate(const TIntermAggregate&); // disallow copy constructor
TIntermAggregate& operator=(const TIntermAggregate&); // disallow assignment operator
@ -1608,6 +1673,9 @@ protected:
bool optimize;
bool debug;
TPragmaTable* pragmaTable;
#ifndef GLSLANG_WEB
TSpirvInstruction spirvInst;
#endif
};
//
@ -1625,8 +1693,11 @@ public:
flatten(false), dontFlatten(false) {}
virtual void traverse(TIntermTraverser*);
virtual TIntermTyped* getCondition() const { return condition; }
virtual void setCondition(TIntermTyped* c) { condition = c; }
virtual TIntermNode* getTrueBlock() const { return trueBlock; }
virtual void setTrueBlock(TIntermTyped* tb) { trueBlock = tb; }
virtual TIntermNode* getFalseBlock() const { return falseBlock; }
virtual void setFalseBlock(TIntermTyped* fb) { falseBlock = fb; }
virtual TIntermSelection* getAsSelectionNode() { return this; }
virtual const TIntermSelection* getAsSelectionNode() const { return this; }

View file

@ -2,7 +2,7 @@
// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
// Copyright (C) 2012-2013 LunarG, Inc.
// Copyright (C) 2017 ARM Limited.
// Copyright (C) 2018 Google, Inc.
// Copyright (C) 2018-2020 Google, Inc.
//
// All rights reserved.
//
@ -42,6 +42,10 @@
#include <cstdlib>
#include <climits>
#ifdef _MSC_VER
#pragma warning(disable: 4146) // warning C4146: unary minus operator applied to unsigned type, result still unsigned
#endif
namespace {
using namespace glslang;
@ -529,7 +533,12 @@ TIntermTyped* TIntermConstantUnion::fold(TOperator op, const TType& returnType)
case EbtDouble:
case EbtFloat16:
case EbtFloat: newConstArray[i].setDConst(-unionArray[i].getDConst()); break;
case EbtInt: newConstArray[i].setIConst(-unionArray[i].getIConst()); break;
// Note: avoid UBSAN error regarding negating 0x80000000
case EbtInt: newConstArray[i].setIConst(
unionArray[i].getIConst() == 0x80000000
? -0x7FFFFFFF - 1
: -unionArray[i].getIConst());
break;
case EbtUint: newConstArray[i].setUConst(static_cast<unsigned int>(-static_cast<int>(unionArray[i].getUConst()))); break;
#ifndef GLSLANG_WEB
case EbtInt8: newConstArray[i].setI8Const(-unionArray[i].getI8Const()); break;
@ -599,17 +608,11 @@ TIntermTyped* TIntermConstantUnion::fold(TOperator op, const TType& returnType)
newConstArray[i].setDConst(log(unionArray[i].getDConst()));
break;
case EOpExp2:
{
const double inv_log2_e = 0.69314718055994530941723212145818;
newConstArray[i].setDConst(exp(unionArray[i].getDConst() * inv_log2_e));
break;
}
newConstArray[i].setDConst(exp2(unionArray[i].getDConst()));
break;
case EOpLog2:
{
const double log2_e = 1.4426950408889634073599246810019;
newConstArray[i].setDConst(log2_e * log(unionArray[i].getDConst()));
break;
}
newConstArray[i].setDConst(log2(unionArray[i].getDConst()));
break;
case EOpSqrt:
newConstArray[i].setDConst(sqrt(unionArray[i].getDConst()));
break;
@ -1012,6 +1015,7 @@ TIntermTyped* TIntermediate::fold(TIntermAggregate* aggrNode)
case EOpMin:
case EOpMax:
case EOpMix:
case EOpMod:
case EOpClamp:
case EOpLessThan:
case EOpGreaterThan:
@ -1074,6 +1078,14 @@ TIntermTyped* TIntermediate::fold(TIntermAggregate* aggrNode)
case EOpPow:
newConstArray[comp].setDConst(pow(childConstUnions[0][arg0comp].getDConst(), childConstUnions[1][arg1comp].getDConst()));
break;
case EOpMod:
{
double arg0 = childConstUnions[0][arg0comp].getDConst();
double arg1 = childConstUnions[1][arg1comp].getDConst();
double result = arg0 - arg1 * floor(arg0 / arg1);
newConstArray[comp].setDConst(result);
break;
}
case EOpMin:
switch(children[0]->getAsTyped()->getBasicType()) {
case EbtFloat16:

View file

@ -71,6 +71,13 @@ void TIntermConstantUnion::traverse(TIntermTraverser *it)
it->visitConstantUnion(this);
}
const TString& TIntermSymbol::getAccessName() const {
if (getBasicType() == EbtBlock)
return getType().getTypeName();
else
return getName();
}
//
// Traverse a binary node.
//

View file

@ -1,7 +1,7 @@
//
// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
// Copyright (C) 2012-2015 LunarG, Inc.
// Copyright (C) 2015-2018 Google, Inc.
// Copyright (C) 2015-2020 Google, Inc.
// Copyright (C) 2017 ARM Limited.
//
// All rights reserved.
@ -65,7 +65,7 @@ namespace glslang {
// Returns the added node.
//
TIntermSymbol* TIntermediate::addSymbol(int id, const TString& name, const TType& type, const TConstUnionArray& constArray,
TIntermSymbol* TIntermediate::addSymbol(long long id, const TString& name, const TType& type, const TConstUnionArray& constArray,
TIntermTyped* constSubtree, const TSourceLoc& loc)
{
TIntermSymbol* node = new TIntermSymbol(id, name, type);
@ -113,14 +113,14 @@ TIntermSymbol* TIntermediate::addSymbol(const TType& type, const TSourceLoc& loc
//
// Returns nullptr if the working conversions and promotions could not be found.
//
TIntermTyped* TIntermediate::addBinaryMath(TOperator op, TIntermTyped* left, TIntermTyped* right, TSourceLoc loc)
TIntermTyped* TIntermediate::addBinaryMath(TOperator op, TIntermTyped* left, TIntermTyped* right, const TSourceLoc& loc)
{
// No operations work on blocks
if (left->getType().getBasicType() == EbtBlock || right->getType().getBasicType() == EbtBlock)
return nullptr;
// Convert "reference +/- int" and "reference - reference" to integer math
if ((op == EOpAdd || op == EOpSub) && extensionRequested(E_GL_EXT_buffer_reference2)) {
if (op == EOpAdd || op == EOpSub) {
// No addressing math on struct with unsized array.
if ((left->isReference() && left->getType().getReferentType()->containsUnsizedArray()) ||
@ -140,43 +140,44 @@ TIntermTyped* TIntermediate::addBinaryMath(TOperator op, TIntermTyped* left, TIn
node = addBuiltInFunctionCall(loc, EOpConvUint64ToPtr, true, node, referenceType);
return node;
}
if (op == EOpAdd && right->isReference() && isTypeInt(left->getBasicType())) {
const TType& referenceType = right->getType();
TIntermConstantUnion* size = addConstantUnion((unsigned long long)computeBufferReferenceTypeSize(right->getType()), loc, true);
right = addBuiltInFunctionCall(loc, EOpConvPtrToUint64, true, right, TType(EbtUint64));
left = createConversion(EbtInt64, left);
left = addBinaryMath(EOpMul, left, size, loc);
TIntermTyped *node = addBinaryMath(op, left, right, loc);
node = addBuiltInFunctionCall(loc, EOpConvUint64ToPtr, true, node, referenceType);
return node;
}
if (op == EOpSub && left->isReference() && right->isReference()) {
TIntermConstantUnion* size = addConstantUnion((long long)computeBufferReferenceTypeSize(left->getType()), loc, true);
left = addBuiltInFunctionCall(loc, EOpConvPtrToUint64, true, left, TType(EbtUint64));
right = addBuiltInFunctionCall(loc, EOpConvPtrToUint64, true, right, TType(EbtUint64));
left = addBuiltInFunctionCall(loc, EOpConvUint64ToInt64, true, left, TType(EbtInt64));
right = addBuiltInFunctionCall(loc, EOpConvUint64ToInt64, true, right, TType(EbtInt64));
left = addBinaryMath(EOpSub, left, right, loc);
TIntermTyped *node = addBinaryMath(EOpDiv, left, size, loc);
return node;
}
// No other math operators supported on references
if (left->isReference() || right->isReference()) {
return nullptr;
}
}
if (op == EOpAdd && right->isReference() && isTypeInt(left->getBasicType())) {
const TType& referenceType = right->getType();
TIntermConstantUnion* size =
addConstantUnion((unsigned long long)computeBufferReferenceTypeSize(right->getType()), loc, true);
right = addBuiltInFunctionCall(loc, EOpConvPtrToUint64, true, right, TType(EbtUint64));
left = createConversion(EbtInt64, left);
left = addBinaryMath(EOpMul, left, size, loc);
TIntermTyped *node = addBinaryMath(op, left, right, loc);
node = addBuiltInFunctionCall(loc, EOpConvUint64ToPtr, true, node, referenceType);
return node;
}
if (op == EOpSub && left->isReference() && right->isReference()) {
TIntermConstantUnion* size =
addConstantUnion((long long)computeBufferReferenceTypeSize(left->getType()), loc, true);
left = addBuiltInFunctionCall(loc, EOpConvPtrToUint64, true, left, TType(EbtUint64));
right = addBuiltInFunctionCall(loc, EOpConvPtrToUint64, true, right, TType(EbtUint64));
left = addBuiltInFunctionCall(loc, EOpConvUint64ToInt64, true, left, TType(EbtInt64));
right = addBuiltInFunctionCall(loc, EOpConvUint64ToInt64, true, right, TType(EbtInt64));
left = addBinaryMath(EOpSub, left, right, loc);
TIntermTyped *node = addBinaryMath(EOpDiv, left, size, loc);
return node;
}
// No other math operators supported on references
if (left->isReference() || right->isReference())
return nullptr;
// Try converting the children's base types to compatible types.
auto children = addConversion(op, left, right);
auto children = addPairConversion(op, left, right);
left = std::get<0>(children);
right = std::get<1>(children);
@ -226,13 +227,12 @@ TIntermTyped* TIntermediate::addBinaryMath(TOperator op, TIntermTyped* left, TIn
//
// Low level: add binary node (no promotions or other argument modifications)
//
TIntermBinary* TIntermediate::addBinaryNode(TOperator op, TIntermTyped* left, TIntermTyped* right, TSourceLoc loc) const
TIntermBinary* TIntermediate::addBinaryNode(TOperator op, TIntermTyped* left, TIntermTyped* right,
const TSourceLoc& loc) const
{
// build the node
TIntermBinary* node = new TIntermBinary(op);
if (loc.line == 0)
loc = left->getLoc();
node->setLoc(loc);
node->setLoc(loc.line != 0 ? loc : left->getLoc());
node->setLeft(left);
node->setRight(right);
@ -242,7 +242,8 @@ TIntermBinary* TIntermediate::addBinaryNode(TOperator op, TIntermTyped* left, TI
//
// like non-type form, but sets node's type.
//
TIntermBinary* TIntermediate::addBinaryNode(TOperator op, TIntermTyped* left, TIntermTyped* right, TSourceLoc loc, const TType& type) const
TIntermBinary* TIntermediate::addBinaryNode(TOperator op, TIntermTyped* left, TIntermTyped* right,
const TSourceLoc& loc, const TType& type) const
{
TIntermBinary* node = addBinaryNode(op, left, right, loc);
node->setType(type);
@ -252,12 +253,10 @@ TIntermBinary* TIntermediate::addBinaryNode(TOperator op, TIntermTyped* left, TI
//
// Low level: add unary node (no promotions or other argument modifications)
//
TIntermUnary* TIntermediate::addUnaryNode(TOperator op, TIntermTyped* child, TSourceLoc loc) const
TIntermUnary* TIntermediate::addUnaryNode(TOperator op, TIntermTyped* child, const TSourceLoc& loc) const
{
TIntermUnary* node = new TIntermUnary(op);
if (loc.line == 0)
loc = child->getLoc();
node->setLoc(loc);
node->setLoc(loc.line != 0 ? loc : child->getLoc());
node->setOperand(child);
return node;
@ -266,7 +265,8 @@ TIntermUnary* TIntermediate::addUnaryNode(TOperator op, TIntermTyped* child, TSo
//
// like non-type form, but sets node's type.
//
TIntermUnary* TIntermediate::addUnaryNode(TOperator op, TIntermTyped* child, TSourceLoc loc, const TType& type) const
TIntermUnary* TIntermediate::addUnaryNode(TOperator op, TIntermTyped* child, const TSourceLoc& loc, const TType& type)
const
{
TIntermUnary* node = addUnaryNode(op, child, loc);
node->setType(type);
@ -281,7 +281,8 @@ TIntermUnary* TIntermediate::addUnaryNode(TOperator op, TIntermTyped* child, TSo
// Returns nullptr if the 'right' type could not be converted to match the 'left' type,
// or the resulting operation cannot be properly promoted.
//
TIntermTyped* TIntermediate::addAssign(TOperator op, TIntermTyped* left, TIntermTyped* right, TSourceLoc loc)
TIntermTyped* TIntermediate::addAssign(TOperator op, TIntermTyped* left, TIntermTyped* right,
const TSourceLoc& loc)
{
// No block assignment
if (left->getType().getBasicType() == EbtBlock || right->getType().getBasicType() == EbtBlock)
@ -290,9 +291,7 @@ TIntermTyped* TIntermediate::addAssign(TOperator op, TIntermTyped* left, TInterm
// Convert "reference += int" to "reference = reference + int". We need this because the
// "reference + int" calculation involves a cast back to the original type, which makes it
// not an lvalue.
if ((op == EOpAddAssign || op == EOpSubAssign) && left->isReference() &&
extensionRequested(E_GL_EXT_buffer_reference2)) {
if ((op == EOpAddAssign || op == EOpSubAssign) && left->isReference()) {
if (!(right->getType().isScalar() && right->getType().isIntegerDomain()))
return nullptr;
@ -338,7 +337,8 @@ TIntermTyped* TIntermediate::addAssign(TOperator op, TIntermTyped* left, TInterm
// Returns the added node.
// The caller should set the type of the returned node.
//
TIntermTyped* TIntermediate::addIndex(TOperator op, TIntermTyped* base, TIntermTyped* index, TSourceLoc loc)
TIntermTyped* TIntermediate::addIndex(TOperator op, TIntermTyped* base, TIntermTyped* index,
const TSourceLoc& loc)
{
// caller should set the type
return addBinaryNode(op, base, index, loc);
@ -349,7 +349,8 @@ TIntermTyped* TIntermediate::addIndex(TOperator op, TIntermTyped* base, TIntermT
//
// Returns the added node.
//
TIntermTyped* TIntermediate::addUnaryMath(TOperator op, TIntermTyped* child, TSourceLoc loc)
TIntermTyped* TIntermediate::addUnaryMath(TOperator op, TIntermTyped* child,
const TSourceLoc& loc)
{
if (child == 0)
return nullptr;
@ -495,7 +496,8 @@ TIntermTyped* TIntermediate::addBuiltInFunctionCall(const TSourceLoc& loc, TOper
// Returns an aggregate node, which could be the one passed in if
// it was already an aggregate.
//
TIntermTyped* TIntermediate::setAggregateOperator(TIntermNode* node, TOperator op, const TType& type, TSourceLoc loc)
TIntermTyped* TIntermediate::setAggregateOperator(TIntermNode* node, TOperator op, const TType& type,
const TSourceLoc& loc)
{
TIntermAggregate* aggNode;
@ -510,8 +512,6 @@ TIntermTyped* TIntermediate::setAggregateOperator(TIntermNode* node, TOperator o
//
aggNode = new TIntermAggregate();
aggNode->getSequence().push_back(node);
if (loc.line == 0)
loc = node->getLoc();
}
} else
aggNode = new TIntermAggregate();
@ -520,8 +520,8 @@ TIntermTyped* TIntermediate::setAggregateOperator(TIntermNode* node, TOperator o
// Set the operator.
//
aggNode->setOperator(op);
if (loc.line != 0)
aggNode->setLoc(loc);
if (loc.line != 0 || node != nullptr)
aggNode->setLoc(loc.line != 0 ? loc : node->getLoc());
aggNode->setType(type);
@ -538,7 +538,7 @@ bool TIntermediate::isConversionAllowed(TOperator op, TIntermTyped* node) const
return false;
case EbtAtomicUint:
case EbtSampler:
case EbtAccStructNV:
case EbtAccStruct:
// opaque types can be passed to functions
if (op == EOpFunction)
break;
@ -819,22 +819,25 @@ TIntermTyped* TIntermediate::createConversion(TBasicType convertTo, TIntermTyped
node->getBasicType() == EbtFloat ||
node->getBasicType() == EbtDouble);
if (! getArithemeticInt8Enabled()) {
if (((convertTo == EbtInt8 || convertTo == EbtUint8) && ! convertFromIntTypes) ||
((node->getBasicType() == EbtInt8 || node->getBasicType() == EbtUint8) && ! convertToIntTypes))
if (((convertTo == EbtInt8 || convertTo == EbtUint8) && ! convertFromIntTypes) ||
((node->getBasicType() == EbtInt8 || node->getBasicType() == EbtUint8) && ! convertToIntTypes)) {
if (! getArithemeticInt8Enabled()) {
return nullptr;
}
}
if (! getArithemeticInt16Enabled()) {
if (((convertTo == EbtInt16 || convertTo == EbtUint16) && ! convertFromIntTypes) ||
((node->getBasicType() == EbtInt16 || node->getBasicType() == EbtUint16) && ! convertToIntTypes))
if (((convertTo == EbtInt16 || convertTo == EbtUint16) && ! convertFromIntTypes) ||
((node->getBasicType() == EbtInt16 || node->getBasicType() == EbtUint16) && ! convertToIntTypes)) {
if (! getArithemeticInt16Enabled()) {
return nullptr;
}
}
if (! getArithemeticFloat16Enabled()) {
if ((convertTo == EbtFloat16 && ! convertFromFloatTypes) ||
(node->getBasicType() == EbtFloat16 && ! convertToFloatTypes))
if ((convertTo == EbtFloat16 && ! convertFromFloatTypes) ||
(node->getBasicType() == EbtFloat16 && ! convertToFloatTypes)) {
if (! getArithemeticFloat16Enabled()) {
return nullptr;
}
}
#endif
@ -887,7 +890,7 @@ TIntermTyped* TIntermediate::addConversion(TBasicType convertTo, TIntermTyped* n
// Returns the converted pair of nodes.
// Returns <nullptr, nullptr> when there is no conversion.
std::tuple<TIntermTyped*, TIntermTyped*>
TIntermediate::addConversion(TOperator op, TIntermTyped* node0, TIntermTyped* node1)
TIntermediate::addPairConversion(TOperator op, TIntermTyped* node0, TIntermTyped* node1)
{
if (!isConversionAllowed(op, node0) || !isConversionAllowed(op, node1))
return std::make_tuple(nullptr, nullptr);
@ -940,7 +943,7 @@ TIntermediate::addConversion(TOperator op, TIntermTyped* node0, TIntermTyped* no
if (node0->getBasicType() == node1->getBasicType())
return std::make_tuple(node0, node1);
promoteTo = getConversionDestinatonType(node0->getBasicType(), node1->getBasicType(), op);
promoteTo = getConversionDestinationType(node0->getBasicType(), node1->getBasicType(), op);
if (std::get<0>(promoteTo) == EbtNumTypes || std::get<1>(promoteTo) == EbtNumTypes)
return std::make_tuple(nullptr, nullptr);
@ -1040,64 +1043,30 @@ TIntermTyped* TIntermediate::addConversion(TOperator op, const TType& type, TInt
// Note: callers are responsible for other aspects of shape,
// like vector and matrix sizes.
TBasicType promoteTo;
// GL_EXT_shader_16bit_storage can't do OpConstantComposite with
// 16-bit types, so disable promotion for those types.
bool canPromoteConstant = true;
switch (op) {
//
// Explicit conversions (unary operations)
//
case EOpConstructBool:
promoteTo = EbtBool;
break;
case EOpConstructFloat:
promoteTo = EbtFloat;
break;
case EOpConstructInt:
promoteTo = EbtInt;
break;
case EOpConstructUint:
promoteTo = EbtUint;
break;
#ifndef GLSLANG_WEB
case EOpConstructDouble:
promoteTo = EbtDouble;
break;
case EOpConstructFloat16:
promoteTo = EbtFloat16;
canPromoteConstant = extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types) ||
extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_float16);
break;
case EOpConstructInt8:
promoteTo = EbtInt8;
canPromoteConstant = extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types) ||
extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_int8);
break;
case EOpConstructUint8:
promoteTo = EbtUint8;
canPromoteConstant = extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types) ||
extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_int8);
break;
case EOpConstructInt16:
promoteTo = EbtInt16;
canPromoteConstant = extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types) ||
extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_int16);
break;
case EOpConstructUint16:
promoteTo = EbtUint16;
canPromoteConstant = extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types) ||
extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_int16);
break;
case EOpConstructInt64:
promoteTo = EbtInt64;
break;
case EOpConstructUint64:
promoteTo = EbtUint64;
break;
#endif
//
// Implicit conversions
//
case EOpLogicalNot:
case EOpFunctionCall:
@ -1152,9 +1121,7 @@ TIntermTyped* TIntermediate::addConversion(TOperator op, const TType& type, TInt
if (type.getBasicType() == node->getType().getBasicType())
return node;
if (canImplicitlyPromote(node->getBasicType(), type.getBasicType(), op))
promoteTo = type.getBasicType();
else
if (! canImplicitlyPromote(node->getBasicType(), type.getBasicType(), op))
return nullptr;
break;
@ -1164,9 +1131,7 @@ TIntermTyped* TIntermediate::addConversion(TOperator op, const TType& type, TInt
case EOpLeftShiftAssign:
case EOpRightShiftAssign:
{
if (getSource() == EShSourceHlsl && node->getType().getBasicType() == EbtBool)
promoteTo = type.getBasicType();
else {
if (!(getSource() == EShSourceHlsl && node->getType().getBasicType() == EbtBool)) {
if (isTypeInt(type.getBasicType()) && isTypeInt(node->getBasicType()))
return node;
else
@ -1184,13 +1149,44 @@ TIntermTyped* TIntermediate::addConversion(TOperator op, const TType& type, TInt
return nullptr;
}
bool canPromoteConstant = true;
#ifndef GLSLANG_WEB
// GL_EXT_shader_16bit_storage can't do OpConstantComposite with
// 16-bit types, so disable promotion for those types.
// Many issues with this, from JohnK:
// - this isn't really right to discuss SPIR-V here
// - this could easily be entirely about scalars, so is overstepping
// - we should be looking at what the shader asked for, and saying whether or
// not it can be done, in the parser, by calling requireExtensions(), not
// changing language sementics on the fly by asking what extensions are in use
// - at the time of this writing (14-Aug-2020), no test results are changed by this.
switch (op) {
case EOpConstructFloat16:
canPromoteConstant = numericFeatures.contains(TNumericFeatures::shader_explicit_arithmetic_types) ||
numericFeatures.contains(TNumericFeatures::shader_explicit_arithmetic_types_float16);
break;
case EOpConstructInt8:
case EOpConstructUint8:
canPromoteConstant = numericFeatures.contains(TNumericFeatures::shader_explicit_arithmetic_types) ||
numericFeatures.contains(TNumericFeatures::shader_explicit_arithmetic_types_int8);
break;
case EOpConstructInt16:
case EOpConstructUint16:
canPromoteConstant = numericFeatures.contains(TNumericFeatures::shader_explicit_arithmetic_types) ||
numericFeatures.contains(TNumericFeatures::shader_explicit_arithmetic_types_int16);
break;
default:
break;
}
#endif
if (canPromoteConstant && node->getAsConstantUnion())
return promoteConstantUnion(promoteTo, node->getAsConstantUnion());
return promoteConstantUnion(type.getBasicType(), node->getAsConstantUnion());
//
// Add a new newNode for the conversion.
//
TIntermTyped* newNode = createConversion(promoteTo, node);
TIntermTyped* newNode = createConversion(type.getBasicType(), node);
return newNode;
}
@ -1620,7 +1616,7 @@ bool TIntermediate::isFPIntegralConversion(TBasicType from, TBasicType to) const
//
bool TIntermediate::canImplicitlyPromote(TBasicType from, TBasicType to, TOperator op) const
{
if (isEsProfile() || version == 110)
if ((isEsProfile() && version < 310 ) || version == 110)
return false;
if (from == to)
@ -1659,46 +1655,51 @@ bool TIntermediate::canImplicitlyPromote(TBasicType from, TBasicType to, TOperat
}
}
bool explicitTypesEnabled = extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types) ||
extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_int8) ||
extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_int16) ||
extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_int32) ||
extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_int64) ||
extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_float16) ||
extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_float32) ||
extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_float64);
if (explicitTypesEnabled) {
// integral promotions
if (isIntegralPromotion(from, to)) {
if (getSource() == EShSourceHlsl) {
// HLSL
if (from == EbtBool && (to == EbtInt || to == EbtUint || to == EbtFloat))
return true;
}
} else {
// GLSL
if (isIntegralPromotion(from, to) ||
isFPPromotion(from, to) ||
isIntegralConversion(from, to) ||
isFPConversion(from, to) ||
isFPIntegralConversion(from, to)) {
// floating-point promotions
if (isFPPromotion(from, to)) {
return true;
}
// integral conversions
if (isIntegralConversion(from, to)) {
return true;
}
// floating-point conversions
if (isFPConversion(from, to)) {
return true;
}
// floating-integral conversions
if (isFPIntegralConversion(from, to)) {
return true;
}
// hlsl supported conversions
if (getSource() == EShSourceHlsl) {
if (from == EbtBool && (to == EbtInt || to == EbtUint || to == EbtFloat))
if (numericFeatures.contains(TNumericFeatures::shader_explicit_arithmetic_types) ||
numericFeatures.contains(TNumericFeatures::shader_explicit_arithmetic_types_int8) ||
numericFeatures.contains(TNumericFeatures::shader_explicit_arithmetic_types_int16) ||
numericFeatures.contains(TNumericFeatures::shader_explicit_arithmetic_types_int32) ||
numericFeatures.contains(TNumericFeatures::shader_explicit_arithmetic_types_int64) ||
numericFeatures.contains(TNumericFeatures::shader_explicit_arithmetic_types_float16) ||
numericFeatures.contains(TNumericFeatures::shader_explicit_arithmetic_types_float32) ||
numericFeatures.contains(TNumericFeatures::shader_explicit_arithmetic_types_float64)) {
return true;
}
}
}
if (isEsProfile()) {
switch (to) {
case EbtFloat:
switch (from) {
case EbtInt:
case EbtUint:
return numericFeatures.contains(TNumericFeatures::shader_implicit_conversions);
default:
return false;
}
case EbtUint:
switch (from) {
case EbtInt:
return numericFeatures.contains(TNumericFeatures::shader_implicit_conversions);
default:
return false;
}
default:
return false;
}
} else {
switch (to) {
case EbtDouble:
@ -1708,13 +1709,14 @@ bool TIntermediate::canImplicitlyPromote(TBasicType from, TBasicType to, TOperat
case EbtInt64:
case EbtUint64:
case EbtFloat:
case EbtDouble:
return true;
return version >= 400 || numericFeatures.contains(TNumericFeatures::gpu_shader_fp64);
case EbtInt16:
case EbtUint16:
return extensionRequested(E_GL_AMD_gpu_shader_int16);
return (version >= 400 || numericFeatures.contains(TNumericFeatures::gpu_shader_fp64)) &&
numericFeatures.contains(TNumericFeatures::gpu_shader_int16);
case EbtFloat16:
return extensionRequested(E_GL_AMD_gpu_shader_half_float);
return (version >= 400 || numericFeatures.contains(TNumericFeatures::gpu_shader_fp64)) &&
numericFeatures.contains(TNumericFeatures::gpu_shader_half_float);
default:
return false;
}
@ -1722,16 +1724,14 @@ bool TIntermediate::canImplicitlyPromote(TBasicType from, TBasicType to, TOperat
switch (from) {
case EbtInt:
case EbtUint:
case EbtFloat:
return true;
case EbtBool:
return getSource() == EShSourceHlsl;
case EbtInt16:
case EbtUint16:
return extensionRequested(E_GL_AMD_gpu_shader_int16);
return numericFeatures.contains(TNumericFeatures::gpu_shader_int16);
case EbtFloat16:
return
extensionRequested(E_GL_AMD_gpu_shader_half_float) ||
return numericFeatures.contains(TNumericFeatures::gpu_shader_half_float) ||
getSource() == EShSourceHlsl;
default:
return false;
@ -1739,25 +1739,21 @@ bool TIntermediate::canImplicitlyPromote(TBasicType from, TBasicType to, TOperat
case EbtUint:
switch (from) {
case EbtInt:
return version >= 400 || getSource() == EShSourceHlsl;
case EbtUint:
return true;
return version >= 400 || getSource() == EShSourceHlsl || IsRequestedExtension(E_GL_ARB_gpu_shader5);
case EbtBool:
return getSource() == EShSourceHlsl;
case EbtInt16:
case EbtUint16:
return extensionRequested(E_GL_AMD_gpu_shader_int16);
return numericFeatures.contains(TNumericFeatures::gpu_shader_int16);
default:
return false;
}
case EbtInt:
switch (from) {
case EbtInt:
return true;
case EbtBool:
return getSource() == EShSourceHlsl;
case EbtInt16:
return extensionRequested(E_GL_AMD_gpu_shader_int16);
return numericFeatures.contains(TNumericFeatures::gpu_shader_int16);
default:
return false;
}
@ -1766,21 +1762,19 @@ bool TIntermediate::canImplicitlyPromote(TBasicType from, TBasicType to, TOperat
case EbtInt:
case EbtUint:
case EbtInt64:
case EbtUint64:
return true;
case EbtInt16:
case EbtUint16:
return extensionRequested(E_GL_AMD_gpu_shader_int16);
return numericFeatures.contains(TNumericFeatures::gpu_shader_int16);
default:
return false;
}
case EbtInt64:
switch (from) {
case EbtInt:
case EbtInt64:
return true;
case EbtInt16:
return extensionRequested(E_GL_AMD_gpu_shader_int16);
return numericFeatures.contains(TNumericFeatures::gpu_shader_int16);
default:
return false;
}
@ -1788,9 +1782,7 @@ bool TIntermediate::canImplicitlyPromote(TBasicType from, TBasicType to, TOperat
switch (from) {
case EbtInt16:
case EbtUint16:
return extensionRequested(E_GL_AMD_gpu_shader_int16);
case EbtFloat16:
return extensionRequested(E_GL_AMD_gpu_shader_half_float);
return numericFeatures.contains(TNumericFeatures::gpu_shader_int16);
default:
break;
}
@ -1798,8 +1790,7 @@ bool TIntermediate::canImplicitlyPromote(TBasicType from, TBasicType to, TOperat
case EbtUint16:
switch (from) {
case EbtInt16:
case EbtUint16:
return extensionRequested(E_GL_AMD_gpu_shader_int16);
return numericFeatures.contains(TNumericFeatures::gpu_shader_int16);
default:
break;
}
@ -1926,12 +1917,14 @@ static TBasicType getCorrespondingUnsignedType(TBasicType type)
// integer type corresponding to the type of the operand with signed
// integer type.
std::tuple<TBasicType, TBasicType> TIntermediate::getConversionDestinatonType(TBasicType type0, TBasicType type1, TOperator op) const
std::tuple<TBasicType, TBasicType> TIntermediate::getConversionDestinationType(TBasicType type0, TBasicType type1, TOperator op) const
{
TBasicType res0 = EbtNumTypes;
TBasicType res1 = EbtNumTypes;
if (isEsProfile() || version == 110)
if ((isEsProfile() &&
(version < 310 || !numericFeatures.contains(TNumericFeatures::shader_implicit_conversions))) ||
version == 110)
return std::make_tuple(res0, res1);
if (getSource() == EShSourceHlsl) {
@ -2305,6 +2298,10 @@ TOperator TIntermediate::mapTypeToConstructorOp(const TType& type) const
case EbtReference:
op = EOpConstructReference;
break;
case EbtAccStruct:
op = EOpConstructAccStruct;
break;
#endif
default:
break;
@ -2463,7 +2460,7 @@ TIntermTyped* TIntermediate::addSelection(TIntermTyped* cond, TIntermTyped* true
//
// Get compatible types.
//
auto children = addConversion(EOpSequence, trueBlock, falseBlock);
auto children = addPairConversion(EOpSequence, trueBlock, falseBlock);
trueBlock = std::get<0>(children);
falseBlock = std::get<1>(children);
@ -2679,7 +2676,11 @@ TIntermTyped* TIntermediate::addSwizzle(TSwizzleSelectors<selectorType>& selecto
// 'swizzleOkay' says whether or not it is okay to consider a swizzle
// a valid part of the dereference chain.
//
const TIntermTyped* TIntermediate::findLValueBase(const TIntermTyped* node, bool swizzleOkay)
// 'BufferReferenceOk' says if type is buffer_reference, the routine stop to find the most left node.
//
//
const TIntermTyped* TIntermediate::findLValueBase(const TIntermTyped* node, bool swizzleOkay , bool bufferReferenceOk)
{
do {
const TIntermBinary* binary = node->getAsBinaryNode();
@ -2697,6 +2698,8 @@ const TIntermTyped* TIntermediate::findLValueBase(const TIntermTyped* node, bool
return nullptr;
}
node = node->getAsBinaryNode()->getLeft();
if (bufferReferenceOk && node->isReference())
return node;
} while (true);
}
@ -2750,6 +2753,22 @@ TIntermBranch* TIntermediate::addBranch(TOperator branchOp, TIntermTyped* expres
return node;
}
// Propagate precision from formal function return type to actual return type,
// and on to its subtree.
void TIntermBranch::updatePrecision(TPrecisionQualifier parentPrecision)
{
TIntermTyped* exp = getExpression();
if (exp == nullptr)
return;
if (exp->getBasicType() == EbtInt || exp->getBasicType() == EbtUint ||
exp->getBasicType() == EbtFloat || exp->getBasicType() == EbtFloat16) {
if (parentPrecision != EpqNone && exp->getQualifier().precision == EpqNone) {
exp->propagatePrecision(parentPrecision);
}
}
}
//
// This is to be executed after the final root is put on top by the parsing
// process.
@ -2774,6 +2793,9 @@ bool TIntermediate::postProcess(TIntermNode* root, EShLanguage /*language*/)
case EShTexSampTransUpgradeTextureRemoveSampler:
performTextureUpgradeAndSamplerRemovalTransformation(root);
break;
case EShTexSampTransCount:
assert(0);
break;
}
#endif
@ -2854,7 +2876,7 @@ void TIntermediate::addToCallGraph(TInfoSink& /*infoSink*/, const TString& calle
return;
}
callGraph.push_front(TCall(caller, callee));
callGraph.emplace_front(caller, callee);
}
//
@ -3234,10 +3256,17 @@ bool TIntermediate::promoteUnary(TIntermUnary& node)
return false;
break;
default:
if (operand->getBasicType() != EbtFloat)
// HLSL uses this path for initial function signature finding for built-ins
// taking a single argument, which generally don't participate in
// operator-based type promotion (type conversion will occur later).
// For now, scalar argument cases are relying on the setType() call below.
if (getSource() == EShSourceHlsl)
break;
// GLSL only allows integer arguments for the cases identified above in the
// case statements.
if (operand->getBasicType() != EbtFloat)
return false;
}
@ -3247,9 +3276,11 @@ bool TIntermediate::promoteUnary(TIntermUnary& node)
return true;
}
// Propagate precision qualifiers *up* from children to parent.
void TIntermUnary::updatePrecision()
{
if (getBasicType() == EbtInt || getBasicType() == EbtUint || getBasicType() == EbtFloat || getBasicType() == EbtFloat16) {
if (getBasicType() == EbtInt || getBasicType() == EbtUint ||
getBasicType() == EbtFloat || getBasicType() == EbtFloat16) {
if (operand->getQualifier().precision > getQualifier().precision)
getQualifier().precision = operand->getQualifier().precision;
}
@ -3745,20 +3776,33 @@ bool TIntermediate::promoteAggregate(TIntermAggregate& node)
return false;
}
// Propagate precision qualifiers *up* from children to parent, and then
// back *down* again to the children's subtrees.
void TIntermBinary::updatePrecision()
{
if (getBasicType() == EbtInt || getBasicType() == EbtUint || getBasicType() == EbtFloat || getBasicType() == EbtFloat16) {
getQualifier().precision = std::max(right->getQualifier().precision, left->getQualifier().precision);
if (getQualifier().precision != EpqNone) {
left->propagatePrecision(getQualifier().precision);
right->propagatePrecision(getQualifier().precision);
}
if (getBasicType() == EbtInt || getBasicType() == EbtUint ||
getBasicType() == EbtFloat || getBasicType() == EbtFloat16) {
if (op == EOpRightShift || op == EOpLeftShift) {
// For shifts get precision from left side only and thus no need to propagate
getQualifier().precision = left->getQualifier().precision;
} else {
getQualifier().precision = std::max(right->getQualifier().precision, left->getQualifier().precision);
if (getQualifier().precision != EpqNone) {
left->propagatePrecision(getQualifier().precision);
right->propagatePrecision(getQualifier().precision);
}
}
}
}
// Recursively propagate precision qualifiers *down* the subtree of the current node,
// until reaching a node that already has a precision qualifier or otherwise does
// not participate in precision propagation.
void TIntermTyped::propagatePrecision(TPrecisionQualifier newPrecision)
{
if (getQualifier().precision != EpqNone || (getBasicType() != EbtInt && getBasicType() != EbtUint && getBasicType() != EbtFloat && getBasicType() != EbtFloat16))
if (getQualifier().precision != EpqNone ||
(getBasicType() != EbtInt && getBasicType() != EbtUint &&
getBasicType() != EbtFloat && getBasicType() != EbtFloat16))
return;
getQualifier().precision = newPrecision;

View file

@ -74,14 +74,33 @@ public:
for (unsigned int f = 0; f < globals.size(); ++f) {
TIntermAggregate* candidate = globals[f]->getAsAggregate();
if (candidate && candidate->getOp() == EOpFunction && candidate->getName() == name) {
functions.push_back(candidate);
destinations.push_back(candidate);
break;
}
}
}
typedef std::list<TIntermAggregate*> TFunctionStack;
TFunctionStack functions;
void pushGlobalReference(const TString& name)
{
TIntermSequence& globals = intermediate.getTreeRoot()->getAsAggregate()->getSequence();
for (unsigned int f = 0; f < globals.size(); ++f) {
TIntermAggregate* candidate = globals[f]->getAsAggregate();
if (candidate && candidate->getOp() == EOpSequence &&
candidate->getSequence().size() == 1 &&
candidate->getSequence()[0]->getAsBinaryNode()) {
TIntermBinary* binary = candidate->getSequence()[0]->getAsBinaryNode();
TIntermSymbol* symbol = binary->getLeft()->getAsSymbolNode();
if (symbol && symbol->getQualifier().storage == EvqGlobal &&
symbol->getName() == name) {
destinations.push_back(candidate);
break;
}
}
}
}
typedef std::list<TIntermAggregate*> TDestinationStack;
TDestinationStack destinations;
protected:
// To catch which function calls are not dead, and hence which functions must be visited.
@ -117,16 +136,27 @@ protected:
// and only visit each function once.
void addFunctionCall(TIntermAggregate* call)
{
// // just use the map to ensure we process each function at most once
// just use the map to ensure we process each function at most once
if (liveFunctions.find(call->getName()) == liveFunctions.end()) {
liveFunctions.insert(call->getName());
pushFunction(call->getName());
}
}
void addGlobalReference(const TString& name)
{
// just use the map to ensure we process each global at most once
if (liveGlobals.find(name) == liveGlobals.end()) {
liveGlobals.insert(name);
pushGlobalReference(name);
}
}
const TIntermediate& intermediate;
typedef std::unordered_set<TString> TLiveFunctions;
TLiveFunctions liveFunctions;
typedef std::unordered_set<TString> TLiveGlobals;
TLiveGlobals liveGlobals;
bool traverseAll;
private:

View file

@ -127,22 +127,6 @@ bool TParseContextBase::lValueErrorCheck(const TSourceLoc& loc, const char* op,
{
TIntermBinary* binaryNode = node->getAsBinaryNode();
if (binaryNode) {
switch(binaryNode->getOp()) {
case EOpIndexDirect:
case EOpIndexIndirect: // fall through
case EOpIndexDirectStruct: // fall through
case EOpVectorSwizzle:
case EOpMatrixSwizzle:
return lValueErrorCheck(loc, op, binaryNode->getLeft());
default:
break;
}
error(loc, " l-value required", op, "", "");
return true;
}
const char* symbol = nullptr;
TIntermSymbol* symNode = node->getAsSymbolNode();
if (symNode != nullptr)
@ -157,11 +141,11 @@ bool TParseContextBase::lValueErrorCheck(const TSourceLoc& loc, const char* op,
case EvqBuffer:
if (node->getQualifier().isReadOnly())
message = "can't modify a readonly buffer";
if (node->getQualifier().isShaderRecordNV())
if (node->getQualifier().isShaderRecord())
message = "can't modify a shaderrecordnv qualified buffer";
break;
case EvqHitAttrNV:
if (language != EShLangIntersectNV)
case EvqHitAttr:
if (language != EShLangIntersect)
message = "cannot modify hitAttributeNV in this stage";
break;
#endif
@ -181,9 +165,12 @@ bool TParseContextBase::lValueErrorCheck(const TSourceLoc& loc, const char* op,
case EbtAtomicUint:
message = "can't modify an atomic_uint";
break;
case EbtAccStructNV:
case EbtAccStruct:
message = "can't modify accelerationStructureNV";
break;
case EbtRayQuery:
message = "can't modify rayQueryEXT";
break;
#endif
default:
break;
@ -200,15 +187,40 @@ bool TParseContextBase::lValueErrorCheck(const TSourceLoc& loc, const char* op,
// Everything else is okay, no error.
//
if (message == nullptr)
{
if (binaryNode) {
switch (binaryNode->getOp()) {
case EOpIndexDirect:
case EOpIndexIndirect: // fall through
case EOpIndexDirectStruct: // fall through
case EOpVectorSwizzle:
case EOpMatrixSwizzle:
return lValueErrorCheck(loc, op, binaryNode->getLeft());
default:
break;
}
error(loc, " l-value required", op, "", "");
return true;
}
return false;
}
//
// If we get here, we have an error and a message.
//
const TIntermTyped* leftMostTypeNode = TIntermediate::findLValueBase(node, true);
if (symNode)
error(loc, " l-value required", op, "\"%s\" (%s)", symbol, message);
else
error(loc, " l-value required", op, "(%s)", message);
if (binaryNode && binaryNode->getAsOperator()->getOp() == EOpIndexDirectStruct)
if(IsAnonymous(leftMostTypeNode->getAsSymbolNode()->getName()))
error(loc, " l-value required", op, "\"%s\" (%s)", leftMostTypeNode->getAsSymbolNode()->getAccessName().c_str(), message);
else
error(loc, " l-value required", op, "\"%s\" (%s)", leftMostTypeNode->getAsSymbolNode()->getName().c_str(), message);
else
error(loc, " l-value required", op, "(%s)", message);
return true;
}
@ -216,28 +228,41 @@ bool TParseContextBase::lValueErrorCheck(const TSourceLoc& loc, const char* op,
// Test for and give an error if the node can't be read from.
void TParseContextBase::rValueErrorCheck(const TSourceLoc& loc, const char* op, TIntermTyped* node)
{
TIntermBinary* binaryNode = node->getAsBinaryNode();
const TIntermSymbol* symNode = node->getAsSymbolNode();
if (! node)
return;
TIntermBinary* binaryNode = node->getAsBinaryNode();
if (binaryNode) {
switch(binaryNode->getOp()) {
case EOpIndexDirect:
case EOpIndexIndirect:
case EOpIndexDirectStruct:
case EOpVectorSwizzle:
case EOpMatrixSwizzle:
rValueErrorCheck(loc, op, binaryNode->getLeft());
default:
break;
if (node->getQualifier().isWriteOnly()) {
const TIntermTyped* leftMostTypeNode = TIntermediate::findLValueBase(node, true);
if (symNode != nullptr)
error(loc, "can't read from writeonly object: ", op, symNode->getName().c_str());
else if (binaryNode &&
(binaryNode->getAsOperator()->getOp() == EOpIndexDirectStruct ||
binaryNode->getAsOperator()->getOp() == EOpIndexDirect))
if(IsAnonymous(leftMostTypeNode->getAsSymbolNode()->getName()))
error(loc, "can't read from writeonly object: ", op, leftMostTypeNode->getAsSymbolNode()->getAccessName().c_str());
else
error(loc, "can't read from writeonly object: ", op, leftMostTypeNode->getAsSymbolNode()->getName().c_str());
else
error(loc, "can't read from writeonly object: ", op, "");
} else {
if (binaryNode) {
switch (binaryNode->getOp()) {
case EOpIndexDirect:
case EOpIndexIndirect:
case EOpIndexDirectStruct:
case EOpVectorSwizzle:
case EOpMatrixSwizzle:
rValueErrorCheck(loc, op, binaryNode->getLeft());
default:
break;
}
}
return;
}
TIntermSymbol* symNode = node->getAsSymbolNode();
if (symNode && symNode->getQualifier().isWriteOnly())
error(loc, "can't read from writeonly object: ", op, symNode->getName().c_str());
}
// Add 'symbol' to the list of deferred linkage symbols, which
@ -576,7 +601,6 @@ void TParseContextBase::parseSwizzleSelector(const TSourceLoc& loc, const TStrin
selector.push_back(0);
}
#ifdef ENABLE_HLSL
//
// Make the passed-in variable information become a member of the
// global uniform block. If this doesn't exist yet, make it.
@ -621,7 +645,67 @@ void TParseContextBase::growGlobalUniformBlock(const TSourceLoc& loc, TType& mem
++firstNewMember;
}
#endif
void TParseContextBase::growAtomicCounterBlock(int binding, const TSourceLoc& loc, TType& memberType, const TString& memberName, TTypeList* typeList) {
// Make the atomic counter block, if not yet made.
const auto &at = atomicCounterBuffers.find(binding);
if (at == atomicCounterBuffers.end()) {
atomicCounterBuffers.insert({binding, (TVariable*)nullptr });
atomicCounterBlockFirstNewMember.insert({binding, 0});
}
TVariable*& atomicCounterBuffer = atomicCounterBuffers[binding];
int& bufferNewMember = atomicCounterBlockFirstNewMember[binding];
if (atomicCounterBuffer == nullptr) {
TQualifier blockQualifier;
blockQualifier.clear();
blockQualifier.storage = EvqBuffer;
char charBuffer[512];
if (binding != TQualifier::layoutBindingEnd) {
snprintf(charBuffer, 512, "%s_%d", getAtomicCounterBlockName(), binding);
} else {
snprintf(charBuffer, 512, "%s_0", getAtomicCounterBlockName());
}
TType blockType(new TTypeList, *NewPoolTString(charBuffer), blockQualifier);
setUniformBlockDefaults(blockType);
blockType.getQualifier().layoutPacking = ElpStd430;
atomicCounterBuffer = new TVariable(NewPoolTString(""), blockType, true);
// If we arn't auto mapping bindings then set the block to use the same
// binding as what the atomic was set to use
if (!intermediate.getAutoMapBindings()) {
atomicCounterBuffer->getWritableType().getQualifier().layoutBinding = binding;
}
bufferNewMember = 0;
atomicCounterBuffer->getWritableType().getQualifier().layoutSet = atomicCounterBlockSet;
}
// Add the requested member as a member to the global block.
TType* type = new TType;
type->shallowCopy(memberType);
type->setFieldName(memberName);
if (typeList)
type->setStruct(typeList);
TTypeLoc typeLoc = {type, loc};
atomicCounterBuffer->getType().getWritableStruct()->push_back(typeLoc);
// Insert into the symbol table.
if (bufferNewMember == 0) {
// This is the first request; we need a normal symbol table insert
if (symbolTable.insert(*atomicCounterBuffer))
trackLinkage(*atomicCounterBuffer);
else
error(loc, "failed to insert the global constant buffer", "buffer", "");
} else {
// This is a follow-on request; we need to amend the first insert
symbolTable.amend(*atomicCounterBuffer, bufferNewMember);
}
++bufferNewMember;
}
void TParseContextBase::finish()
{

View file

@ -67,7 +67,8 @@ struct TPragma {
class TScanContext;
class TPpContext;
typedef std::set<int> TIdSetType;
typedef std::set<long long> TIdSetType;
typedef std::map<const TTypeList*, std::map<size_t, const TTypeList*>> TStructRecord;
//
// Sharable code (as well as what's in TParseVersions) across
@ -82,7 +83,8 @@ public:
: TParseVersions(interm, version, profile, spvVersion, language, infoSink, forwardCompatible, messages),
scopeMangler("::"),
symbolTable(symbolTable),
statementNestingLevel(0), loopNestingLevel(0), structNestingLevel(0), controlFlowNestingLevel(0),
statementNestingLevel(0), loopNestingLevel(0), structNestingLevel(0), blockNestingLevel(0), controlFlowNestingLevel(0),
currentFunctionType(nullptr),
postEntryPointReturn(false),
contextPragma(true, false),
beginInvocationInterlockCount(0), endInvocationInterlockCount(0),
@ -90,7 +92,8 @@ public:
limits(resources.limits),
globalUniformBlock(nullptr),
globalUniformBinding(TQualifier::layoutBindingEnd),
globalUniformSet(TQualifier::layoutSetEnd)
globalUniformSet(TQualifier::layoutSetEnd),
atomicCounterBlockSet(TQualifier::layoutSetEnd)
{
if (entryPoint != nullptr)
sourceEntryPointName = *entryPoint;
@ -152,10 +155,11 @@ public:
extensionCallback(line, extension, behavior);
}
#ifdef ENABLE_HLSL
// Manage the global uniform block (default uniforms in GLSL, $Global in HLSL)
virtual void growGlobalUniformBlock(const TSourceLoc&, TType&, const TString& memberName, TTypeList* typeList = nullptr);
#endif
// Manage global buffer (used for backing atomic counters in GLSL when using relaxed Vulkan semantics)
virtual void growAtomicCounterBlock(int binding, const TSourceLoc&, TType&, const TString& memberName, TTypeList* typeList = nullptr);
// Potentially rename shader entry point function
void renameShaderFunction(TString*& name) const
@ -176,7 +180,8 @@ public:
TSymbolTable& symbolTable; // symbol table that goes with the current language, version, and profile
int statementNestingLevel; // 0 if outside all flow control or compound statements
int loopNestingLevel; // 0 if outside all loops
int structNestingLevel; // 0 if outside blocks and structures
int structNestingLevel; // 0 if outside structures
int blockNestingLevel; // 0 if outside blocks
int controlFlowNestingLevel; // 0 if outside all flow control
const TType* currentFunctionType; // the return type of the function that's currently being parsed
bool functionReturnsValue; // true if a non-void function has a return
@ -227,7 +232,25 @@ protected:
// override this to set the language-specific name
virtual const char* getGlobalUniformBlockName() const { return ""; }
virtual void setUniformBlockDefaults(TType&) const { }
virtual void finalizeGlobalUniformBlockLayout(TVariable&) { }
virtual void finalizeGlobalUniformBlockLayout(TVariable&) {}
// Manage the atomic counter block (used for atomic_uints with Vulkan-Relaxed)
TMap<int, TVariable*> atomicCounterBuffers;
unsigned int atomicCounterBlockSet;
TMap<int, int> atomicCounterBlockFirstNewMember;
// override this to set the language-specific name
virtual const char* getAtomicCounterBlockName() const { return ""; }
virtual void setAtomicCounterBlockDefaults(TType&) const {}
virtual void setInvariant(const TSourceLoc& loc, const char* builtin) {}
virtual void finalizeAtomicCounterBlockLayout(TVariable&) {}
bool isAtomicCounterBlock(const TSymbol& symbol) {
const TVariable* var = symbol.getAsVariable();
if (!var)
return false;
const auto& at = atomicCounterBuffers.find(var->getType().getQualifier().layoutBinding);
return (at != atomicCounterBuffers.end() && (*at).second->getType() == var->getType());
}
virtual void outputMessage(const TSourceLoc&, const char* szReason, const char* szToken,
const char* szExtraInfoFormat, TPrefixType prefix,
va_list args);
@ -290,6 +313,9 @@ public:
bool parseShaderStrings(TPpContext&, TInputScanner& input, bool versionWillBeError = false) override;
void parserError(const char* s); // for bison's yyerror
virtual void growGlobalUniformBlock(const TSourceLoc&, TType&, const TString& memberName, TTypeList* typeList = nullptr) override;
virtual void growAtomicCounterBlock(int binding, const TSourceLoc&, TType&, const TString& memberName, TTypeList* typeList = nullptr) override;
void reservedErrorCheck(const TSourceLoc&, const TString&);
void reservedPpErrorCheck(const TSourceLoc&, const char* name, const char* op) override;
bool lineContinuationCheck(const TSourceLoc&, bool endOfComment) override;
@ -315,6 +341,7 @@ public:
TIntermTyped* handleBinaryMath(const TSourceLoc&, const char* str, TOperator op, TIntermTyped* left, TIntermTyped* right);
TIntermTyped* handleUnaryMath(const TSourceLoc&, const char* str, TOperator op, TIntermTyped* childNode);
TIntermTyped* handleDotDereference(const TSourceLoc&, TIntermTyped* base, const TString& field);
TIntermTyped* handleDotSwizzle(const TSourceLoc&, TIntermTyped* base, const TString& field);
void blockMemberExtensionCheck(const TSourceLoc&, const TIntermTyped* base, int member, const TString& memberName);
TFunction* handleFunctionDeclarator(const TSourceLoc&, TFunction& function, bool prototype);
TIntermAggregate* handleFunctionDefinition(const TSourceLoc&, TFunction&);
@ -326,6 +353,7 @@ public:
TIntermTyped* handleLengthMethod(const TSourceLoc&, TFunction*, TIntermNode*);
void addInputArgumentConversions(const TFunction&, TIntermNode*&) const;
TIntermTyped* addOutputArgumentConversions(const TFunction&, TIntermAggregate&) const;
TIntermTyped* addAssign(const TSourceLoc&, TOperator op, TIntermTyped* left, TIntermTyped* right);
void builtInOpCheck(const TSourceLoc&, const TFunction&, TIntermOperator&);
void nonOpBuiltInCheck(const TSourceLoc&, const TFunction&, TIntermAggregate&);
void userFunctionCallCheck(const TSourceLoc&, TIntermAggregate&);
@ -335,6 +363,10 @@ public:
void checkPrecisionQualifier(const TSourceLoc&, TPrecisionQualifier);
void memorySemanticsCheck(const TSourceLoc&, const TFunction&, const TIntermOperator& callNode);
TIntermTyped* vkRelaxedRemapFunctionCall(const TSourceLoc&, TFunction*, TIntermNode*);
// returns true if the variable was remapped to something else
bool vkRelaxedRemapUniformVariable(const TSourceLoc&, TString&, const TPublicType&, TArraySizes*, TIntermTyped*, TType&);
void assignError(const TSourceLoc&, const char* op, TString left, TString right);
void unaryOpError(const TSourceLoc&, const char* op, TString operand);
void binaryOpError(const TSourceLoc&, const char* op, TString left, TString right);
@ -358,10 +390,10 @@ public:
void boolCheck(const TSourceLoc&, const TPublicType&);
void samplerCheck(const TSourceLoc&, const TType&, const TString& identifier, TIntermTyped* initializer);
void atomicUintCheck(const TSourceLoc&, const TType&, const TString& identifier);
void accStructNVCheck(const TSourceLoc & loc, const TType & type, const TString & identifier);
void accStructCheck(const TSourceLoc & loc, const TType & type, const TString & identifier);
void transparentOpaqueCheck(const TSourceLoc&, const TType&, const TString& identifier);
void memberQualifierCheck(glslang::TPublicType&);
void globalQualifierFixCheck(const TSourceLoc&, TQualifier&);
void globalQualifierFixCheck(const TSourceLoc&, TQualifier&, bool isMemberCheck = false);
void globalQualifierTypeCheck(const TSourceLoc&, const TQualifier&, const TPublicType&);
bool structQualifierErrorCheck(const TSourceLoc&, const TPublicType& pType);
void mergeQualifiers(const TSourceLoc&, TQualifier& dst, const TQualifier& src, bool force);
@ -387,7 +419,7 @@ public:
void arrayLimitCheck(const TSourceLoc&, const TString&, int size);
void limitCheck(const TSourceLoc&, int value, const char* limit, const char* feature);
void inductiveLoopBodyCheck(TIntermNode*, int loopIndexId, TSymbolTable&);
void inductiveLoopBodyCheck(TIntermNode*, long long loopIndexId, TSymbolTable&);
void constantIndexExpressionCheck(TIntermNode*);
void setLayoutQualifier(const TSourceLoc&, TPublicType&, TString&);
@ -412,17 +444,21 @@ public:
TIntermTyped* constructBuiltIn(const TType&, TOperator, TIntermTyped*, const TSourceLoc&, bool subset);
void inheritMemoryQualifiers(const TQualifier& from, TQualifier& to);
void declareBlock(const TSourceLoc&, TTypeList& typeList, const TString* instanceName = 0, TArraySizes* arraySizes = 0);
void blockStorageRemap(const TSourceLoc&, const TString*, TQualifier&);
void blockStageIoCheck(const TSourceLoc&, const TQualifier&);
void blockQualifierCheck(const TSourceLoc&, const TQualifier&, bool instanceName);
void fixBlockLocations(const TSourceLoc&, TQualifier&, TTypeList&, bool memberWithLocation, bool memberWithoutLocation);
void fixXfbOffsets(TQualifier&, TTypeList&);
void fixBlockUniformOffsets(TQualifier&, TTypeList&);
void fixBlockUniformLayoutMatrix(TQualifier&, TTypeList*, TTypeList*);
void fixBlockUniformLayoutPacking(TQualifier&, TTypeList*, TTypeList*);
void addQualifierToExisting(const TSourceLoc&, TQualifier, const TString& identifier);
void addQualifierToExisting(const TSourceLoc&, TQualifier, TIdentifierList&);
void invariantCheck(const TSourceLoc&, const TQualifier&);
void updateStandaloneQualifierDefaults(const TSourceLoc&, const TPublicType&);
void wrapupSwitchSubsequence(TIntermAggregate* statements, TIntermNode* branchNode);
TIntermNode* addSwitch(const TSourceLoc&, TIntermTyped* expression, TIntermAggregate* body);
const TTypeList* recordStructCopy(TStructRecord&, const TType*, const TType*);
#ifndef GLSLANG_WEB
TAttributeType attributeFromName(const TString& name) const;
@ -435,6 +471,22 @@ public:
void handleSwitchAttributes(const TAttributes& attributes, TIntermNode*);
// Determine loop control from attributes
void handleLoopAttributes(const TAttributes& attributes, TIntermNode*);
// Function attributes
void handleFunctionAttributes(const TSourceLoc&, const TAttributes&, TFunction*);
// GL_EXT_spirv_intrinsics
TSpirvRequirement* makeSpirvRequirement(const TSourceLoc& loc, const TString& name,
const TIntermAggregate* extensions, const TIntermAggregate* capabilities);
TSpirvRequirement* mergeSpirvRequirements(const TSourceLoc& loc, TSpirvRequirement* spirvReq1,
TSpirvRequirement* spirvReq2);
TSpirvTypeParameters* makeSpirvTypeParameters(const TSourceLoc& loc, const TIntermConstantUnion* constant);
TSpirvTypeParameters* makeSpirvTypeParameters(const TPublicType& type);
TSpirvTypeParameters* mergeSpirvTypeParameters(TSpirvTypeParameters* spirvTypeParams1,
TSpirvTypeParameters* spirvTypeParams2);
TSpirvInstruction* makeSpirvInstruction(const TSourceLoc& loc, const TString& name, const TString& value);
TSpirvInstruction* makeSpirvInstruction(const TSourceLoc& loc, const TString& name, int value);
TSpirvInstruction* mergeSpirvInstruction(const TSourceLoc& loc, TSpirvInstruction* spirvInst1,
TSpirvInstruction* spirvInst2);
#endif
void checkAndResizeMeshViewDim(const TSourceLoc&, TType&, bool isBlockMember);
@ -453,6 +505,15 @@ protected:
void finish() override;
#endif
virtual const char* getGlobalUniformBlockName() const override;
virtual void finalizeGlobalUniformBlockLayout(TVariable&) override;
virtual void setUniformBlockDefaults(TType& block) const override;
virtual const char* getAtomicCounterBlockName() const override;
virtual void finalizeAtomicCounterBlockLayout(TVariable&) override;
virtual void setAtomicCounterBlockDefaults(TType& block) const override;
virtual void setInvariant(const TSourceLoc& loc, const char* builtin) override;
public:
//
// Generally, bison productions, the scanner, and the PP need read/write access to these; just give them direct access
@ -477,12 +538,15 @@ protected:
TQualifier globalUniformDefaults;
TQualifier globalInputDefaults;
TQualifier globalOutputDefaults;
TQualifier globalSharedDefaults;
TString currentCaller; // name of last function body entered (not valid when at global scope)
#ifndef GLSLANG_WEB
int* atomicUintOffsets; // to become an array of the right size to hold an offset per binding point
bool anyIndexLimits;
TIdSetType inductiveLoopIds;
TVector<TIntermTyped*> needsIndexLimitationChecking;
TStructRecord matrixFixRecord;
TStructRecord packingFixRecord;
//
// Geometry shader input arrays:

Some files were not shown because too many files have changed in this diff Show more