- add 3rd party vulkan dependencies

- add stubs for a vulkan hw renderer backend
- add RAII wrappers for vulkan object types
- add builder classes to isolate vulkan boilerplate code
- add a swap chain class
This commit is contained in:
Magnus Norddahl 2019-02-20 21:21:57 +01:00
parent 32e65ff11d
commit c6b29846d0
133 changed files with 107089 additions and 2 deletions

View File

@ -292,6 +292,10 @@ mark_as_advanced( FORCE_INTERNAL_GME )
option(FORCE_INTERNAL_ASMJIT "Use internal asmjit" ON)
mark_as_advanced( FORCE_INTERNAL_ASMJIT )
add_subdirectory( glslang/glslang)
add_subdirectory( glslang/spirv )
add_subdirectory( glslang/OGLCompilersDLL )
# Fast math flags, required by some subprojects
set( ZD_FASTMATH_FLAG "" )
if( ZD_CMAKE_COMPILER_IS_GNUCXX_COMPATIBLE )

View File

@ -0,0 +1,14 @@
set(SOURCES InitializeDll.cpp InitializeDll.h)
add_library(OGLCompiler STATIC ${SOURCES})
set_property(TARGET OGLCompiler PROPERTY FOLDER glslang)
set_property(TARGET OGLCompiler PROPERTY POSITION_INDEPENDENT_CODE ON)
if(WIN32)
source_group("Source" FILES ${SOURCES})
endif(WIN32)
if(ENABLE_GLSLANG_INSTALL)
install(TARGETS OGLCompiler
ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR})
endif(ENABLE_GLSLANG_INSTALL)

View File

@ -0,0 +1,165 @@
//
// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
#define SH_EXPORTING
#include <cassert>
#include "InitializeDll.h"
#include "../glslang/Include/InitializeGlobals.h"
#include "../glslang/Public/ShaderLang.h"
#include "../glslang/Include/PoolAlloc.h"
namespace glslang {
OS_TLSIndex ThreadInitializeIndex = OS_INVALID_TLS_INDEX;
// Per-process initialization.
// Needs to be called at least once before parsing, etc. is done.
// Will also do thread initialization for the calling thread; other
// threads will need to do that explicitly.
bool InitProcess()
{
glslang::GetGlobalLock();
if (ThreadInitializeIndex != OS_INVALID_TLS_INDEX) {
//
// Function is re-entrant.
//
glslang::ReleaseGlobalLock();
return true;
}
ThreadInitializeIndex = OS_AllocTLSIndex();
if (ThreadInitializeIndex == OS_INVALID_TLS_INDEX) {
assert(0 && "InitProcess(): Failed to allocate TLS area for init flag");
glslang::ReleaseGlobalLock();
return false;
}
if (! InitializePoolIndex()) {
assert(0 && "InitProcess(): Failed to initialize global pool");
glslang::ReleaseGlobalLock();
return false;
}
if (! InitThread()) {
assert(0 && "InitProcess(): Failed to initialize thread");
glslang::ReleaseGlobalLock();
return false;
}
glslang::ReleaseGlobalLock();
return true;
}
// Per-thread scoped initialization.
// Must be called at least once by each new thread sharing the
// symbol tables, etc., needed to parse.
bool InitThread()
{
//
// This function is re-entrant
//
if (ThreadInitializeIndex == OS_INVALID_TLS_INDEX) {
assert(0 && "InitThread(): Process hasn't been initalised.");
return false;
}
if (OS_GetTLSValue(ThreadInitializeIndex) != 0)
return true;
if (! OS_SetTLSValue(ThreadInitializeIndex, (void *)1)) {
assert(0 && "InitThread(): Unable to set init flag.");
return false;
}
glslang::SetThreadPoolAllocator(nullptr);
return true;
}
// Not necessary to call this: InitThread() is reentrant, and the need
// to do per thread tear down has been removed.
//
// This is kept, with memory management removed, to satisfy any exiting
// calls to it that rely on it.
bool DetachThread()
{
bool success = true;
if (ThreadInitializeIndex == OS_INVALID_TLS_INDEX)
return true;
//
// Function is re-entrant and this thread may not have been initialized.
//
if (OS_GetTLSValue(ThreadInitializeIndex) != 0) {
if (!OS_SetTLSValue(ThreadInitializeIndex, (void *)0)) {
assert(0 && "DetachThread(): Unable to clear init flag.");
success = false;
}
}
return success;
}
// Not necessary to call this: InitProcess() is reentrant.
//
// This is kept, with memory management removed, to satisfy any exiting
// calls to it that rely on it.
//
// Users of glslang should call shFinalize() or glslang::FinalizeProcess() for
// process-scoped memory tear down.
bool DetachProcess()
{
bool success = true;
if (ThreadInitializeIndex == OS_INVALID_TLS_INDEX)
return true;
success = DetachThread();
OS_FreeTLSIndex(ThreadInitializeIndex);
ThreadInitializeIndex = OS_INVALID_TLS_INDEX;
return success;
}
} // end namespace glslang

View File

@ -0,0 +1,49 @@
//
// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
#ifndef __INITIALIZEDLL_H
#define __INITIALIZEDLL_H
#include "../glslang/OSDependent/osinclude.h"
namespace glslang {
bool InitProcess();
bool InitThread();
bool DetachThread(); // not called from standalone, perhaps other tools rely on parts of it
bool DetachProcess(); // not called from standalone, perhaps other tools rely on parts of it
} // end namespace glslang
#endif // __INITIALIZEDLL_H

View File

@ -0,0 +1,121 @@
if(WIN32)
add_subdirectory(OSDependent/Windows)
elseif(UNIX)
add_subdirectory(OSDependent/Unix)
else(WIN32)
message("unknown platform")
endif(WIN32)
set(SOURCES
MachineIndependent/glslang.y
MachineIndependent/glslang_tab.cpp
MachineIndependent/attribute.cpp
MachineIndependent/Constant.cpp
MachineIndependent/iomapper.cpp
MachineIndependent/InfoSink.cpp
MachineIndependent/Initialize.cpp
MachineIndependent/IntermTraverse.cpp
MachineIndependent/Intermediate.cpp
MachineIndependent/ParseContextBase.cpp
MachineIndependent/ParseHelper.cpp
MachineIndependent/PoolAlloc.cpp
MachineIndependent/RemoveTree.cpp
MachineIndependent/Scan.cpp
MachineIndependent/ShaderLang.cpp
MachineIndependent/SymbolTable.cpp
MachineIndependent/Versions.cpp
MachineIndependent/intermOut.cpp
MachineIndependent/limits.cpp
MachineIndependent/linkValidate.cpp
MachineIndependent/parseConst.cpp
MachineIndependent/reflection.cpp
MachineIndependent/preprocessor/Pp.cpp
MachineIndependent/preprocessor/PpAtom.cpp
MachineIndependent/preprocessor/PpContext.cpp
MachineIndependent/preprocessor/PpScanner.cpp
MachineIndependent/preprocessor/PpTokens.cpp
MachineIndependent/propagateNoContraction.cpp
GenericCodeGen/CodeGen.cpp
GenericCodeGen/Link.cpp)
set(HEADERS
Public/ShaderLang.h
Include/arrays.h
Include/BaseTypes.h
Include/Common.h
Include/ConstantUnion.h
Include/InfoSink.h
Include/InitializeGlobals.h
Include/intermediate.h
Include/PoolAlloc.h
Include/ResourceLimits.h
Include/revision.h
Include/ShHandle.h
Include/Types.h
MachineIndependent/attribute.h
MachineIndependent/glslang_tab.cpp.h
MachineIndependent/gl_types.h
MachineIndependent/Initialize.h
MachineIndependent/iomapper.h
MachineIndependent/LiveTraverser.h
MachineIndependent/localintermediate.h
MachineIndependent/ParseHelper.h
MachineIndependent/reflection.h
MachineIndependent/RemoveTree.h
MachineIndependent/Scan.h
MachineIndependent/ScanContext.h
MachineIndependent/SymbolTable.h
MachineIndependent/Versions.h
MachineIndependent/parseVersions.h
MachineIndependent/propagateNoContraction.h
MachineIndependent/preprocessor/PpContext.h
MachineIndependent/preprocessor/PpTokens.h)
# This might be useful for making grammar changes:
#
# find_package(BISON)
# add_custom_command(OUTPUT ${CMAKE_CURRENT_SOURCE_DIR}/MachineIndependent/glslang_tab.cpp ${CMAKE_CURRENT_SOURCE_DIR}/MachineIndependent/glslang_tab.cpp.h
# COMMAND ${BISON_EXECUTABLE} --defines=${CMAKE_CURRENT_SOURCE_DIR}/MachineIndependent/glslang_tab.cpp.h -t ${CMAKE_CURRENT_SOURCE_DIR}/MachineIndependent/glslang.y -o ${CMAKE_CURRENT_SOURCE_DIR}/MachineIndependent/glslang_tab.cpp
# MAIN_DEPENDENCY MachineIndependent/glslang.y
# WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR})
# set(BISON_GLSLParser_OUTPUT_SOURCE ${CMAKE_CURRENT_SOURCE_DIR}/MachineIndependent/glslang_tab.cpp)
add_library(glslang ${LIB_TYPE} ${BISON_GLSLParser_OUTPUT_SOURCE} ${SOURCES} ${HEADERS})
set_property(TARGET glslang PROPERTY FOLDER glslang)
set_property(TARGET glslang PROPERTY POSITION_INDEPENDENT_CODE ON)
target_link_libraries(glslang OGLCompiler OSDependent)
target_include_directories(glslang PUBLIC ..)
if(WIN32 AND BUILD_SHARED_LIBS)
set_target_properties(glslang PROPERTIES PREFIX "")
endif()
if(ENABLE_HLSL)
target_link_libraries(glslang HLSL)
endif()
if(WIN32)
source_group("Public" REGULAR_EXPRESSION "Public/*")
source_group("MachineIndependent" REGULAR_EXPRESSION "MachineIndependent/[^/]*")
source_group("Include" REGULAR_EXPRESSION "Include/[^/]*")
source_group("GenericCodeGen" REGULAR_EXPRESSION "GenericCodeGen/*")
source_group("MachineIndependent\\Preprocessor" REGULAR_EXPRESSION "MachineIndependent/preprocessor/*")
endif(WIN32)
if(ENABLE_GLSLANG_INSTALL)
if(BUILD_SHARED_LIBS)
install(TARGETS glslang
ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}
LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR})
else()
install(TARGETS glslang
ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR})
endif()
endif(ENABLE_GLSLANG_INSTALL)
if(ENABLE_GLSLANG_INSTALL)
foreach(file ${HEADERS})
get_filename_component(dir ${file} DIRECTORY)
install(FILES ${file} DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/glslang/${dir})
endforeach()
endif(ENABLE_GLSLANG_INSTALL)

View File

@ -0,0 +1,76 @@
//
// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
#include "../Include/Common.h"
#include "../Include/ShHandle.h"
#include "../MachineIndependent/Versions.h"
//
// Here is where real machine specific high-level data would be defined.
//
class TGenericCompiler : public TCompiler {
public:
TGenericCompiler(EShLanguage l, int dOptions) : TCompiler(l, infoSink), debugOptions(dOptions) { }
virtual bool compile(TIntermNode* root, int version = 0, EProfile profile = ENoProfile);
TInfoSink infoSink;
int debugOptions;
};
//
// This function must be provided to create the actual
// compile object used by higher level code. It returns
// a subclass of TCompiler.
//
TCompiler* ConstructCompiler(EShLanguage language, int debugOptions)
{
return new TGenericCompiler(language, debugOptions);
}
//
// Delete the compiler made by ConstructCompiler
//
void DeleteCompiler(TCompiler* compiler)
{
delete compiler;
}
//
// Generate code from the given parse tree
//
bool TGenericCompiler::compile(TIntermNode* /*root*/, int /*version*/, EProfile /*profile*/)
{
haveValidObjectCode = true;
return haveValidObjectCode;
}

View File

@ -0,0 +1,91 @@
//
// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
//
// The top level algorithms for linking multiple
// shaders together.
//
#include "../Include/Common.h"
#include "../Include/ShHandle.h"
//
// Actual link object, derived from the shader handle base classes.
//
class TGenericLinker : public TLinker {
public:
TGenericLinker(EShExecutable e, int dOptions) : TLinker(e, infoSink), debugOptions(dOptions) { }
bool link(TCompilerList&, TUniformMap*) { return true; }
void getAttributeBindings(ShBindingTable const **) const { }
TInfoSink infoSink;
int debugOptions;
};
//
// The internal view of a uniform/float object exchanged with the driver.
//
class TUniformLinkedMap : public TUniformMap {
public:
TUniformLinkedMap() { }
virtual int getLocation(const char*) { return 0; }
};
TShHandleBase* ConstructLinker(EShExecutable executable, int debugOptions)
{
return new TGenericLinker(executable, debugOptions);
}
void DeleteLinker(TShHandleBase* linker)
{
delete linker;
}
TUniformMap* ConstructUniformMap()
{
return new TUniformLinkedMap();
}
void DeleteUniformMap(TUniformMap* map)
{
delete map;
}
TShHandleBase* ConstructBindings()
{
return 0;
}
void DeleteBindingList(TShHandleBase* bindingList)
{
delete bindingList;
}

View File

@ -0,0 +1,464 @@
//
// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
// Copyright (C) 2012-2013 LunarG, Inc.
// Copyright (C) 2017 ARM Limited.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
#ifndef _BASICTYPES_INCLUDED_
#define _BASICTYPES_INCLUDED_
namespace glslang {
//
// Basic type. Arrays, vectors, sampler details, etc., are orthogonal to this.
//
enum TBasicType {
EbtVoid,
EbtFloat,
EbtDouble,
EbtFloat16,
EbtInt8,
EbtUint8,
EbtInt16,
EbtUint16,
EbtInt,
EbtUint,
EbtInt64,
EbtUint64,
EbtBool,
EbtAtomicUint,
EbtSampler,
EbtStruct,
EbtBlock,
// HLSL types that live only temporarily.
EbtString,
EbtNumTypes
};
//
// Storage qualifiers. Should align with different kinds of storage or
// resource or GLSL storage qualifier. Expansion is deprecated.
//
// N.B.: You probably DON'T want to add anything here, but rather just add it
// to the built-in variables. See the comment above TBuiltInVariable.
//
// A new built-in variable will normally be an existing qualifier, like 'in', 'out', etc.
// DO NOT follow the design pattern of, say EvqInstanceId, etc.
//
enum TStorageQualifier {
EvqTemporary, // For temporaries (within a function), read/write
EvqGlobal, // For globals read/write
EvqConst, // User-defined constant values, will be semantically constant and constant folded
EvqVaryingIn, // pipeline input, read only, also supercategory for all built-ins not included in this enum (see TBuiltInVariable)
EvqVaryingOut, // pipeline output, read/write, also supercategory for all built-ins not included in this enum (see TBuiltInVariable)
EvqUniform, // read only, shared with app
EvqBuffer, // read/write, shared with app
EvqShared, // compute shader's read/write 'shared' qualifier
// parameters
EvqIn, // also, for 'in' in the grammar before we know if it's a pipeline input or an 'in' parameter
EvqOut, // also, for 'out' in the grammar before we know if it's a pipeline output or an 'out' parameter
EvqInOut,
EvqConstReadOnly, // input; also other read-only types having neither a constant value nor constant-value semantics
// built-ins read by vertex shader
EvqVertexId,
EvqInstanceId,
// built-ins written by vertex shader
EvqPosition,
EvqPointSize,
EvqClipVertex,
// built-ins read by fragment shader
EvqFace,
EvqFragCoord,
EvqPointCoord,
// built-ins written by fragment shader
EvqFragColor,
EvqFragDepth,
// end of list
EvqLast
};
//
// Subcategories of the TStorageQualifier, simply to give a direct mapping
// between built-in variable names and an numerical value (the enum).
//
// For backward compatibility, there is some redundancy between the
// TStorageQualifier and these. Existing members should both be maintained accurately.
// However, any new built-in variable (and any existing non-redundant one)
// must follow the pattern that the specific built-in is here, and only its
// general qualifier is in TStorageQualifier.
//
// Something like gl_Position, which is sometimes 'in' and sometimes 'out'
// shows up as two different built-in variables in a single stage, but
// only has a single enum in TBuiltInVariable, so both the
// TStorageQualifier and the TBuitinVariable are needed to distinguish
// between them.
//
enum TBuiltInVariable {
EbvNone,
EbvNumWorkGroups,
EbvWorkGroupSize,
EbvWorkGroupId,
EbvLocalInvocationId,
EbvGlobalInvocationId,
EbvLocalInvocationIndex,
EbvNumSubgroups,
EbvSubgroupID,
EbvSubGroupSize,
EbvSubGroupInvocation,
EbvSubGroupEqMask,
EbvSubGroupGeMask,
EbvSubGroupGtMask,
EbvSubGroupLeMask,
EbvSubGroupLtMask,
EbvSubgroupSize2,
EbvSubgroupInvocation2,
EbvSubgroupEqMask2,
EbvSubgroupGeMask2,
EbvSubgroupGtMask2,
EbvSubgroupLeMask2,
EbvSubgroupLtMask2,
EbvVertexId,
EbvInstanceId,
EbvVertexIndex,
EbvInstanceIndex,
EbvBaseVertex,
EbvBaseInstance,
EbvDrawId,
EbvPosition,
EbvPointSize,
EbvClipVertex,
EbvClipDistance,
EbvCullDistance,
EbvNormal,
EbvVertex,
EbvMultiTexCoord0,
EbvMultiTexCoord1,
EbvMultiTexCoord2,
EbvMultiTexCoord3,
EbvMultiTexCoord4,
EbvMultiTexCoord5,
EbvMultiTexCoord6,
EbvMultiTexCoord7,
EbvFrontColor,
EbvBackColor,
EbvFrontSecondaryColor,
EbvBackSecondaryColor,
EbvTexCoord,
EbvFogFragCoord,
EbvInvocationId,
EbvPrimitiveId,
EbvLayer,
EbvViewportIndex,
EbvPatchVertices,
EbvTessLevelOuter,
EbvTessLevelInner,
EbvBoundingBox,
EbvTessCoord,
EbvColor,
EbvSecondaryColor,
EbvFace,
EbvFragCoord,
EbvPointCoord,
EbvFragColor,
EbvFragData,
EbvFragDepth,
EbvFragStencilRef,
EbvSampleId,
EbvSamplePosition,
EbvSampleMask,
EbvHelperInvocation,
#ifdef AMD_EXTENSIONS
EbvBaryCoordNoPersp,
EbvBaryCoordNoPerspCentroid,
EbvBaryCoordNoPerspSample,
EbvBaryCoordSmooth,
EbvBaryCoordSmoothCentroid,
EbvBaryCoordSmoothSample,
EbvBaryCoordPullModel,
#endif
EbvViewIndex,
EbvDeviceIndex,
#ifdef NV_EXTENSIONS
EbvViewportMaskNV,
EbvSecondaryPositionNV,
EbvSecondaryViewportMaskNV,
EbvPositionPerViewNV,
EbvViewportMaskPerViewNV,
EbvFragFullyCoveredNV,
#endif
// HLSL built-ins that live only temporarily, until they get remapped
// to one of the above.
EbvFragDepthGreater,
EbvFragDepthLesser,
EbvGsOutputStream,
EbvOutputPatch,
EbvInputPatch,
// structbuffer types
EbvAppendConsume, // no need to differentiate append and consume
EbvRWStructuredBuffer,
EbvStructuredBuffer,
EbvByteAddressBuffer,
EbvRWByteAddressBuffer,
EbvLast
};
// These will show up in error messages
__inline const char* GetStorageQualifierString(TStorageQualifier q)
{
switch (q) {
case EvqTemporary: return "temp"; break;
case EvqGlobal: return "global"; break;
case EvqConst: return "const"; break;
case EvqConstReadOnly: return "const (read only)"; break;
case EvqVaryingIn: return "in"; break;
case EvqVaryingOut: return "out"; break;
case EvqUniform: return "uniform"; break;
case EvqBuffer: return "buffer"; break;
case EvqShared: return "shared"; break;
case EvqIn: return "in"; break;
case EvqOut: return "out"; break;
case EvqInOut: return "inout"; break;
case EvqVertexId: return "gl_VertexId"; break;
case EvqInstanceId: return "gl_InstanceId"; break;
case EvqPosition: return "gl_Position"; break;
case EvqPointSize: return "gl_PointSize"; break;
case EvqClipVertex: return "gl_ClipVertex"; break;
case EvqFace: return "gl_FrontFacing"; break;
case EvqFragCoord: return "gl_FragCoord"; break;
case EvqPointCoord: return "gl_PointCoord"; break;
case EvqFragColor: return "fragColor"; break;
case EvqFragDepth: return "gl_FragDepth"; break;
default: return "unknown qualifier";
}
}
__inline const char* GetBuiltInVariableString(TBuiltInVariable v)
{
switch (v) {
case EbvNone: return "";
case EbvNumWorkGroups: return "NumWorkGroups";
case EbvWorkGroupSize: return "WorkGroupSize";
case EbvWorkGroupId: return "WorkGroupID";
case EbvLocalInvocationId: return "LocalInvocationID";
case EbvGlobalInvocationId: return "GlobalInvocationID";
case EbvLocalInvocationIndex: return "LocalInvocationIndex";
case EbvSubGroupSize: return "SubGroupSize";
case EbvSubGroupInvocation: return "SubGroupInvocation";
case EbvSubGroupEqMask: return "SubGroupEqMask";
case EbvSubGroupGeMask: return "SubGroupGeMask";
case EbvSubGroupGtMask: return "SubGroupGtMask";
case EbvSubGroupLeMask: return "SubGroupLeMask";
case EbvSubGroupLtMask: return "SubGroupLtMask";
case EbvVertexId: return "VertexId";
case EbvInstanceId: return "InstanceId";
case EbvVertexIndex: return "VertexIndex";
case EbvInstanceIndex: return "InstanceIndex";
case EbvBaseVertex: return "BaseVertex";
case EbvBaseInstance: return "BaseInstance";
case EbvDrawId: return "DrawId";
case EbvPosition: return "Position";
case EbvPointSize: return "PointSize";
case EbvClipVertex: return "ClipVertex";
case EbvClipDistance: return "ClipDistance";
case EbvCullDistance: return "CullDistance";
case EbvNormal: return "Normal";
case EbvVertex: return "Vertex";
case EbvMultiTexCoord0: return "MultiTexCoord0";
case EbvMultiTexCoord1: return "MultiTexCoord1";
case EbvMultiTexCoord2: return "MultiTexCoord2";
case EbvMultiTexCoord3: return "MultiTexCoord3";
case EbvMultiTexCoord4: return "MultiTexCoord4";
case EbvMultiTexCoord5: return "MultiTexCoord5";
case EbvMultiTexCoord6: return "MultiTexCoord6";
case EbvMultiTexCoord7: return "MultiTexCoord7";
case EbvFrontColor: return "FrontColor";
case EbvBackColor: return "BackColor";
case EbvFrontSecondaryColor: return "FrontSecondaryColor";
case EbvBackSecondaryColor: return "BackSecondaryColor";
case EbvTexCoord: return "TexCoord";
case EbvFogFragCoord: return "FogFragCoord";
case EbvInvocationId: return "InvocationID";
case EbvPrimitiveId: return "PrimitiveID";
case EbvLayer: return "Layer";
case EbvViewportIndex: return "ViewportIndex";
case EbvPatchVertices: return "PatchVertices";
case EbvTessLevelOuter: return "TessLevelOuter";
case EbvTessLevelInner: return "TessLevelInner";
case EbvBoundingBox: return "BoundingBox";
case EbvTessCoord: return "TessCoord";
case EbvColor: return "Color";
case EbvSecondaryColor: return "SecondaryColor";
case EbvFace: return "Face";
case EbvFragCoord: return "FragCoord";
case EbvPointCoord: return "PointCoord";
case EbvFragColor: return "FragColor";
case EbvFragData: return "FragData";
case EbvFragDepth: return "FragDepth";
case EbvFragStencilRef: return "FragStencilRef";
case EbvSampleId: return "SampleId";
case EbvSamplePosition: return "SamplePosition";
case EbvSampleMask: return "SampleMaskIn";
case EbvHelperInvocation: return "HelperInvocation";
#ifdef AMD_EXTENSIONS
case EbvBaryCoordNoPersp: return "BaryCoordNoPersp";
case EbvBaryCoordNoPerspCentroid: return "BaryCoordNoPerspCentroid";
case EbvBaryCoordNoPerspSample: return "BaryCoordNoPerspSample";
case EbvBaryCoordSmooth: return "BaryCoordSmooth";
case EbvBaryCoordSmoothCentroid: return "BaryCoordSmoothCentroid";
case EbvBaryCoordSmoothSample: return "BaryCoordSmoothSample";
case EbvBaryCoordPullModel: return "BaryCoordPullModel";
#endif
case EbvViewIndex: return "ViewIndex";
case EbvDeviceIndex: return "DeviceIndex";
#ifdef NV_EXTENSIONS
case EbvViewportMaskNV: return "ViewportMaskNV";
case EbvSecondaryPositionNV: return "SecondaryPositionNV";
case EbvSecondaryViewportMaskNV: return "SecondaryViewportMaskNV";
case EbvPositionPerViewNV: return "PositionPerViewNV";
case EbvViewportMaskPerViewNV: return "ViewportMaskPerViewNV";
case EbvFragFullyCoveredNV: return "FragFullyCoveredNV";
#endif
default: return "unknown built-in variable";
}
}
// In this enum, order matters; users can assume higher precision is a bigger value
// and EpqNone is 0.
enum TPrecisionQualifier {
EpqNone = 0,
EpqLow,
EpqMedium,
EpqHigh
};
__inline const char* GetPrecisionQualifierString(TPrecisionQualifier p)
{
switch (p) {
case EpqNone: return ""; break;
case EpqLow: return "lowp"; break;
case EpqMedium: return "mediump"; break;
case EpqHigh: return "highp"; break;
default: return "unknown precision qualifier";
}
}
__inline bool isTypeSignedInt(TBasicType type)
{
switch (type) {
case EbtInt8:
case EbtInt16:
case EbtInt:
case EbtInt64:
return true;
default:
return false;
}
}
__inline bool isTypeUnsignedInt(TBasicType type)
{
switch (type) {
case EbtUint8:
case EbtUint16:
case EbtUint:
case EbtUint64:
return true;
default:
return false;
}
}
__inline bool isTypeInt(TBasicType type)
{
return isTypeSignedInt(type) || isTypeUnsignedInt(type);
}
__inline bool isTypeFloat(TBasicType type)
{
switch (type) {
case EbtFloat:
case EbtDouble:
case EbtFloat16:
return true;
default:
return false;
}
}
__inline int getTypeRank(TBasicType type) {
int res = -1;
switch(type) {
case EbtInt8:
case EbtUint8:
res = 0;
break;
case EbtInt16:
case EbtUint16:
res = 1;
break;
case EbtInt:
case EbtUint:
res = 2;
break;
case EbtInt64:
case EbtUint64:
res = 3;
break;
default:
assert(false);
break;
}
return res;
}
} // end namespace glslang
#endif // _BASICTYPES_INCLUDED_

View File

@ -0,0 +1,278 @@
//
// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
// Copyright (C) 2012-2013 LunarG, Inc.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
#ifndef _COMMON_INCLUDED_
#define _COMMON_INCLUDED_
#if defined(__ANDROID__) || _MSC_VER < 1700
#include <sstream>
namespace std {
template<typename T>
std::string to_string(const T& val) {
std::ostringstream os;
os << val;
return os.str();
}
}
#endif
#if (defined(_MSC_VER) && _MSC_VER < 1900 /*vs2015*/) || defined MINGW_HAS_SECURE_API
#include <basetsd.h>
#ifndef snprintf
#define snprintf sprintf_s
#endif
#define safe_vsprintf(buf,max,format,args) vsnprintf_s((buf), (max), (max), (format), (args))
#elif defined (solaris)
#define safe_vsprintf(buf,max,format,args) vsnprintf((buf), (max), (format), (args))
#include <sys/int_types.h>
#define UINT_PTR uintptr_t
#else
#define safe_vsprintf(buf,max,format,args) vsnprintf((buf), (max), (format), (args))
#include <stdint.h>
#define UINT_PTR uintptr_t
#endif
#if defined(_MSC_VER) && _MSC_VER < 1800
#include <stdlib.h>
inline long long int strtoll (const char* str, char** endptr, int base)
{
return _strtoi64(str, endptr, base);
}
inline unsigned long long int strtoull (const char* str, char** endptr, int base)
{
return _strtoui64(str, endptr, base);
}
inline long long int atoll (const char* str)
{
return strtoll(str, NULL, 10);
}
#endif
#if defined(_MSC_VER)
#define strdup _strdup
#endif
/* windows only pragma */
#ifdef _MSC_VER
#pragma warning(disable : 4786) // Don't warn about too long identifiers
#pragma warning(disable : 4514) // unused inline method
#pragma warning(disable : 4201) // nameless union
#endif
#include <set>
#include <unordered_set>
#include <vector>
#include <map>
#include <unordered_map>
#include <list>
#include <algorithm>
#include <string>
#include <cstdio>
#include <cassert>
#include "PoolAlloc.h"
//
// Put POOL_ALLOCATOR_NEW_DELETE in base classes to make them use this scheme.
//
#define POOL_ALLOCATOR_NEW_DELETE(A) \
void* operator new(size_t s) { return (A).allocate(s); } \
void* operator new(size_t, void *_Where) { return (_Where); } \
void operator delete(void*) { } \
void operator delete(void *, void *) { } \
void* operator new[](size_t s) { return (A).allocate(s); } \
void* operator new[](size_t, void *_Where) { return (_Where); } \
void operator delete[](void*) { } \
void operator delete[](void *, void *) { }
namespace glslang {
//
// Pool version of string.
//
typedef pool_allocator<char> TStringAllocator;
typedef std::basic_string <char, std::char_traits<char>, TStringAllocator> TString;
} // end namespace glslang
// Repackage the std::hash for use by unordered map/set with a TString key.
namespace std {
template<> struct hash<glslang::TString> {
std::size_t operator()(const glslang::TString& s) const
{
const unsigned _FNV_offset_basis = 2166136261U;
const unsigned _FNV_prime = 16777619U;
unsigned _Val = _FNV_offset_basis;
size_t _Count = s.size();
const char* _First = s.c_str();
for (size_t _Next = 0; _Next < _Count; ++_Next)
{
_Val ^= (unsigned)_First[_Next];
_Val *= _FNV_prime;
}
return _Val;
}
};
}
namespace glslang {
inline TString* NewPoolTString(const char* s)
{
void* memory = GetThreadPoolAllocator().allocate(sizeof(TString));
return new(memory) TString(s);
}
template<class T> inline T* NewPoolObject(T*)
{
return new(GetThreadPoolAllocator().allocate(sizeof(T))) T;
}
template<class T> inline T* NewPoolObject(T, int instances)
{
return new(GetThreadPoolAllocator().allocate(instances * sizeof(T))) T[instances];
}
//
// Pool allocator versions of vectors, lists, and maps
//
template <class T> class TVector : public std::vector<T, pool_allocator<T> > {
public:
POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator())
typedef typename std::vector<T, pool_allocator<T> >::size_type size_type;
TVector() : std::vector<T, pool_allocator<T> >() {}
TVector(const pool_allocator<T>& a) : std::vector<T, pool_allocator<T> >(a) {}
TVector(size_type i) : std::vector<T, pool_allocator<T> >(i) {}
TVector(size_type i, const T& val) : std::vector<T, pool_allocator<T> >(i, val) {}
};
template <class T> class TList : public std::list<T, pool_allocator<T> > {
};
template <class K, class D, class CMP = std::less<K> >
class TMap : public std::map<K, D, CMP, pool_allocator<std::pair<K const, D> > > {
};
template <class K, class D, class HASH = std::hash<K>, class PRED = std::equal_to<K> >
class TUnorderedMap : public std::unordered_map<K, D, HASH, PRED, pool_allocator<std::pair<K const, D> > > {
};
//
// Persistent string memory. Should only be used for strings that survive
// across compiles/links.
//
typedef std::basic_string<char> TPersistString;
//
// templatized min and max functions.
//
template <class T> T Min(const T a, const T b) { return a < b ? a : b; }
template <class T> T Max(const T a, const T b) { return a > b ? a : b; }
//
// Create a TString object from an integer.
//
#if defined _MSC_VER || defined MINGW_HAS_SECURE_API
inline const TString String(const int i, const int base = 10)
{
char text[16]; // 32 bit ints are at most 10 digits in base 10
_itoa_s(i, text, sizeof(text), base);
return text;
}
#else
inline const TString String(const int i, const int /*base*/ = 10)
{
char text[16]; // 32 bit ints are at most 10 digits in base 10
// we assume base 10 for all cases
snprintf(text, sizeof(text), "%d", i);
return text;
}
#endif
struct TSourceLoc {
void init() { name = nullptr; string = 0; line = 0; column = 0; }
void init(int stringNum) { init(); string = stringNum; }
// Returns the name if it exists. Otherwise, returns the string number.
std::string getStringNameOrNum(bool quoteStringName = true) const
{
if (name != nullptr)
return quoteStringName ? ("\"" + std::string(name) + "\"") : name;
return std::to_string((long long)string);
}
const char* name; // descriptive name for this string
int string;
int line;
int column;
};
class TPragmaTable : public TMap<TString, TString> {
public:
POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator())
};
const int MaxTokenLength = 1024;
template <class T> bool IsPow2(T powerOf2)
{
if (powerOf2 <= 0)
return false;
return (powerOf2 & (powerOf2 - 1)) == 0;
}
// Round number up to a multiple of the given powerOf2, which is not
// a power, just a number that must be a power of 2.
template <class T> void RoundToPow2(T& number, int powerOf2)
{
assert(IsPow2(powerOf2));
number = (number + powerOf2 - 1) & ~(powerOf2 - 1);
}
template <class T> bool IsMultipleOfPow2(T number, int powerOf2)
{
assert(IsPow2(powerOf2));
return ! (number & (powerOf2 - 1));
}
} // end namespace glslang
#endif // _COMMON_INCLUDED_

View File

@ -0,0 +1,938 @@
//
// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
// Copyright (C) 2013 LunarG, Inc.
// Copyright (C) 2017 ARM Limited.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
#ifndef _CONSTANT_UNION_INCLUDED_
#define _CONSTANT_UNION_INCLUDED_
#include "../Include/Common.h"
#include "../Include/BaseTypes.h"
namespace glslang {
class TConstUnion {
public:
POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator())
TConstUnion() : iConst(0), type(EbtInt) { }
void setI8Const(signed char i)
{
i8Const = i;
type = EbtInt8;
}
void setU8Const(unsigned char u)
{
u8Const = u;
type = EbtUint8;
}
void setI16Const(signed short i)
{
i16Const = i;
type = EbtInt16;
}
void setU16Const(unsigned short u)
{
u16Const = u;
type = EbtUint16;
}
void setIConst(int i)
{
iConst = i;
type = EbtInt;
}
void setUConst(unsigned int u)
{
uConst = u;
type = EbtUint;
}
void setI64Const(long long i64)
{
i64Const = i64;
type = EbtInt64;
}
void setU64Const(unsigned long long u64)
{
u64Const = u64;
type = EbtUint64;
}
void setDConst(double d)
{
dConst = d;
type = EbtDouble;
}
void setBConst(bool b)
{
bConst = b;
type = EbtBool;
}
void setSConst(const TString* s)
{
sConst = s;
type = EbtString;
}
signed char getI8Const() const { return i8Const; }
unsigned char getU8Const() const { return u8Const; }
signed short getI16Const() const { return i16Const; }
unsigned short getU16Const() const { return u16Const; }
int getIConst() const { return iConst; }
unsigned int getUConst() const { return uConst; }
long long getI64Const() const { return i64Const; }
unsigned long long getU64Const() const { return u64Const; }
double getDConst() const { return dConst; }
bool getBConst() const { return bConst; }
const TString* getSConst() const { return sConst; }
bool operator==(const signed char i) const
{
if (i == i8Const)
return true;
return false;
}
bool operator==(const unsigned char u) const
{
if (u == u8Const)
return true;
return false;
}
bool operator==(const signed short i) const
{
if (i == i16Const)
return true;
return false;
}
bool operator==(const unsigned short u) const
{
if (u == u16Const)
return true;
return false;
}
bool operator==(const int i) const
{
if (i == iConst)
return true;
return false;
}
bool operator==(const unsigned int u) const
{
if (u == uConst)
return true;
return false;
}
bool operator==(const long long i64) const
{
if (i64 == i64Const)
return true;
return false;
}
bool operator==(const unsigned long long u64) const
{
if (u64 == u64Const)
return true;
return false;
}
bool operator==(const double d) const
{
if (d == dConst)
return true;
return false;
}
bool operator==(const bool b) const
{
if (b == bConst)
return true;
return false;
}
bool operator==(const TConstUnion& constant) const
{
if (constant.type != type)
return false;
switch (type) {
case EbtInt16:
if (constant.i16Const == i16Const)
return true;
break;
case EbtUint16:
if (constant.u16Const == u16Const)
return true;
break;
case EbtInt8:
if (constant.i8Const == i8Const)
return true;
break;
case EbtUint8:
if (constant.u8Const == u8Const)
return true;
break;
case EbtInt:
if (constant.iConst == iConst)
return true;
break;
case EbtUint:
if (constant.uConst == uConst)
return true;
break;
case EbtInt64:
if (constant.i64Const == i64Const)
return true;
break;
case EbtUint64:
if (constant.u64Const == u64Const)
return true;
break;
case EbtDouble:
if (constant.dConst == dConst)
return true;
break;
case EbtBool:
if (constant.bConst == bConst)
return true;
break;
default:
assert(false && "Default missing");
}
return false;
}
bool operator!=(const signed char i) const
{
return !operator==(i);
}
bool operator!=(const unsigned char u) const
{
return !operator==(u);
}
bool operator!=(const signed short i) const
{
return !operator==(i);
}
bool operator!=(const unsigned short u) const
{
return !operator==(u);
}
bool operator!=(const int i) const
{
return !operator==(i);
}
bool operator!=(const unsigned int u) const
{
return !operator==(u);
}
bool operator!=(const long long i) const
{
return !operator==(i);
}
bool operator!=(const unsigned long long u) const
{
return !operator==(u);
}
bool operator!=(const float f) const
{
return !operator==(f);
}
bool operator!=(const bool b) const
{
return !operator==(b);
}
bool operator!=(const TConstUnion& constant) const
{
return !operator==(constant);
}
bool operator>(const TConstUnion& constant) const
{
assert(type == constant.type);
switch (type) {
case EbtInt8:
if (i8Const > constant.i8Const)
return true;
return false;
case EbtUint8:
if (u8Const > constant.u8Const)
return true;
return false;
case EbtInt16:
if (i16Const > constant.i16Const)
return true;
return false;
case EbtUint16:
if (u16Const > constant.u16Const)
return true;
return false;
case EbtInt:
if (iConst > constant.iConst)
return true;
return false;
case EbtUint:
if (uConst > constant.uConst)
return true;
return false;
case EbtInt64:
if (i64Const > constant.i64Const)
return true;
return false;
case EbtUint64:
if (u64Const > constant.u64Const)
return true;
return false;
case EbtDouble:
if (dConst > constant.dConst)
return true;
return false;
default:
assert(false && "Default missing");
return false;
}
}
bool operator<(const TConstUnion& constant) const
{
assert(type == constant.type);
switch (type) {
case EbtInt8:
if (i8Const < constant.i8Const)
return true;
return false;
case EbtUint8:
if (u8Const < constant.u8Const)
return true;
return false;
case EbtInt16:
if (i16Const < constant.i16Const)
return true;
return false;
case EbtUint16:
if (u16Const < constant.u16Const)
return true;
return false;
case EbtInt:
if (iConst < constant.iConst)
return true;
return false;
case EbtUint:
if (uConst < constant.uConst)
return true;
return false;
case EbtInt64:
if (i64Const < constant.i64Const)
return true;
return false;
case EbtUint64:
if (u64Const < constant.u64Const)
return true;
return false;
case EbtDouble:
if (dConst < constant.dConst)
return true;
return false;
default:
assert(false && "Default missing");
return false;
}
}
TConstUnion operator+(const TConstUnion& constant) const
{
TConstUnion returnValue;
assert(type == constant.type);
switch (type) {
case EbtInt8: returnValue.setI8Const(i8Const + constant.i8Const); break;
case EbtInt16: returnValue.setI16Const(i16Const + constant.i16Const); break;
case EbtInt: returnValue.setIConst(iConst + constant.iConst); break;
case EbtInt64: returnValue.setI64Const(i64Const + constant.i64Const); break;
case EbtUint8: returnValue.setU8Const(u8Const + constant.u8Const); break;
case EbtUint16: returnValue.setU16Const(u16Const + constant.u16Const); break;
case EbtUint: returnValue.setUConst(uConst + constant.uConst); break;
case EbtUint64: returnValue.setU64Const(u64Const + constant.u64Const); break;
case EbtDouble: returnValue.setDConst(dConst + constant.dConst); break;
default: assert(false && "Default missing");
}
return returnValue;
}
TConstUnion operator-(const TConstUnion& constant) const
{
TConstUnion returnValue;
assert(type == constant.type);
switch (type) {
case EbtInt8: returnValue.setI8Const(i8Const - constant.i8Const); break;
case EbtInt16: returnValue.setI16Const(i16Const - constant.i16Const); break;
case EbtInt: returnValue.setIConst(iConst - constant.iConst); break;
case EbtInt64: returnValue.setI64Const(i64Const - constant.i64Const); break;
case EbtUint8: returnValue.setU8Const(u8Const - constant.u8Const); break;
case EbtUint16: returnValue.setU16Const(u16Const - constant.u16Const); break;
case EbtUint: returnValue.setUConst(uConst - constant.uConst); break;
case EbtUint64: returnValue.setU64Const(u64Const - constant.u64Const); break;
case EbtDouble: returnValue.setDConst(dConst - constant.dConst); break;
default: assert(false && "Default missing");
}
return returnValue;
}
TConstUnion operator*(const TConstUnion& constant) const
{
TConstUnion returnValue;
assert(type == constant.type);
switch (type) {
case EbtInt8: returnValue.setI8Const(i8Const * constant.i8Const); break;
case EbtInt16: returnValue.setI16Const(i16Const * constant.i16Const); break;
case EbtInt: returnValue.setIConst(iConst * constant.iConst); break;
case EbtInt64: returnValue.setI64Const(i64Const * constant.i64Const); break;
case EbtUint8: returnValue.setU8Const(u8Const * constant.u8Const); break;
case EbtUint16: returnValue.setU16Const(u16Const * constant.u16Const); break;
case EbtUint: returnValue.setUConst(uConst * constant.uConst); break;
case EbtUint64: returnValue.setU64Const(u64Const * constant.u64Const); break;
case EbtDouble: returnValue.setDConst(dConst * constant.dConst); break;
default: assert(false && "Default missing");
}
return returnValue;
}
TConstUnion operator%(const TConstUnion& constant) const
{
TConstUnion returnValue;
assert(type == constant.type);
switch (type) {
case EbtInt8: returnValue.setI8Const(i8Const % constant.i8Const); break;
case EbtInt16: returnValue.setI8Const(i8Const % constant.i16Const); break;
case EbtInt: returnValue.setIConst(iConst % constant.iConst); break;
case EbtInt64: returnValue.setI64Const(i64Const % constant.i64Const); break;
case EbtUint8: returnValue.setU8Const(u8Const % constant.u8Const); break;
case EbtUint16: returnValue.setU16Const(u16Const % constant.u16Const); break;
case EbtUint: returnValue.setUConst(uConst % constant.uConst); break;
case EbtUint64: returnValue.setU64Const(u64Const % constant.u64Const); break;
default: assert(false && "Default missing");
}
return returnValue;
}
TConstUnion operator>>(const TConstUnion& constant) const
{
TConstUnion returnValue;
switch (type) {
case EbtInt8:
switch (constant.type) {
case EbtInt8: returnValue.setI8Const(i8Const >> constant.i8Const); break;
case EbtUint8: returnValue.setI8Const(i8Const >> constant.u8Const); break;
case EbtInt16: returnValue.setI8Const(i8Const >> constant.i16Const); break;
case EbtUint16: returnValue.setI8Const(i8Const >> constant.u16Const); break;
case EbtInt: returnValue.setI8Const(i8Const >> constant.iConst); break;
case EbtUint: returnValue.setI8Const(i8Const >> constant.uConst); break;
case EbtInt64: returnValue.setI8Const(i8Const >> constant.i64Const); break;
case EbtUint64: returnValue.setI8Const(i8Const >> constant.u64Const); break;
default: assert(false && "Default missing");
}
break;
case EbtUint8:
switch (constant.type) {
case EbtInt8: returnValue.setU8Const(u8Const >> constant.i8Const); break;
case EbtUint8: returnValue.setU8Const(u8Const >> constant.u8Const); break;
case EbtInt16: returnValue.setU8Const(u8Const >> constant.i16Const); break;
case EbtUint16: returnValue.setU8Const(u8Const >> constant.u16Const); break;
case EbtInt: returnValue.setU8Const(u8Const >> constant.iConst); break;
case EbtUint: returnValue.setU8Const(u8Const >> constant.uConst); break;
case EbtInt64: returnValue.setU8Const(u8Const >> constant.i64Const); break;
case EbtUint64: returnValue.setU8Const(u8Const >> constant.u64Const); break;
default: assert(false && "Default missing");
}
break;
case EbtInt16:
switch (constant.type) {
case EbtInt8: returnValue.setI16Const(i16Const >> constant.i8Const); break;
case EbtUint8: returnValue.setI16Const(i16Const >> constant.u8Const); break;
case EbtInt16: returnValue.setI16Const(i16Const >> constant.i16Const); break;
case EbtUint16: returnValue.setI16Const(i16Const >> constant.u16Const); break;
case EbtInt: returnValue.setI16Const(i16Const >> constant.iConst); break;
case EbtUint: returnValue.setI16Const(i16Const >> constant.uConst); break;
case EbtInt64: returnValue.setI16Const(i16Const >> constant.i64Const); break;
case EbtUint64: returnValue.setI16Const(i16Const >> constant.u64Const); break;
default: assert(false && "Default missing");
}
break;
case EbtUint16:
switch (constant.type) {
case EbtInt8: returnValue.setU16Const(u16Const >> constant.i8Const); break;
case EbtUint8: returnValue.setU16Const(u16Const >> constant.u8Const); break;
case EbtInt16: returnValue.setU16Const(u16Const >> constant.i16Const); break;
case EbtUint16: returnValue.setU16Const(u16Const >> constant.u16Const); break;
case EbtInt: returnValue.setU16Const(u16Const >> constant.iConst); break;
case EbtUint: returnValue.setU16Const(u16Const >> constant.uConst); break;
case EbtInt64: returnValue.setU16Const(u16Const >> constant.i64Const); break;
case EbtUint64: returnValue.setU16Const(u16Const >> constant.u64Const); break;
default: assert(false && "Default missing");
}
break;
case EbtInt:
switch (constant.type) {
case EbtInt8: returnValue.setIConst(iConst >> constant.i8Const); break;
case EbtUint8: returnValue.setIConst(iConst >> constant.u8Const); break;
case EbtInt16: returnValue.setIConst(iConst >> constant.i16Const); break;
case EbtUint16: returnValue.setIConst(iConst >> constant.u16Const); break;
case EbtInt: returnValue.setIConst(iConst >> constant.iConst); break;
case EbtUint: returnValue.setIConst(iConst >> constant.uConst); break;
case EbtInt64: returnValue.setIConst(iConst >> constant.i64Const); break;
case EbtUint64: returnValue.setIConst(iConst >> constant.u64Const); break;
default: assert(false && "Default missing");
}
break;
case EbtUint:
switch (constant.type) {
case EbtInt8: returnValue.setUConst(uConst >> constant.i8Const); break;
case EbtUint8: returnValue.setUConst(uConst >> constant.u8Const); break;
case EbtInt16: returnValue.setUConst(uConst >> constant.i16Const); break;
case EbtUint16: returnValue.setUConst(uConst >> constant.u16Const); break;
case EbtInt: returnValue.setUConst(uConst >> constant.iConst); break;
case EbtUint: returnValue.setUConst(uConst >> constant.uConst); break;
case EbtInt64: returnValue.setUConst(uConst >> constant.i64Const); break;
case EbtUint64: returnValue.setUConst(uConst >> constant.u64Const); break;
default: assert(false && "Default missing");
}
break;
case EbtInt64:
switch (constant.type) {
case EbtInt8: returnValue.setI64Const(i64Const >> constant.i8Const); break;
case EbtUint8: returnValue.setI64Const(i64Const >> constant.u8Const); break;
case EbtInt16: returnValue.setI64Const(i64Const >> constant.i16Const); break;
case EbtUint16: returnValue.setI64Const(i64Const >> constant.u16Const); break;
case EbtInt: returnValue.setI64Const(i64Const >> constant.iConst); break;
case EbtUint: returnValue.setI64Const(i64Const >> constant.uConst); break;
case EbtInt64: returnValue.setI64Const(i64Const >> constant.i64Const); break;
case EbtUint64: returnValue.setI64Const(i64Const >> constant.u64Const); break;
default: assert(false && "Default missing");
}
break;
case EbtUint64:
switch (constant.type) {
case EbtInt8: returnValue.setU64Const(u64Const >> constant.i8Const); break;
case EbtUint8: returnValue.setU64Const(u64Const >> constant.u8Const); break;
case EbtInt16: returnValue.setU64Const(u64Const >> constant.i16Const); break;
case EbtUint16: returnValue.setU64Const(u64Const >> constant.u16Const); break;
case EbtInt: returnValue.setU64Const(u64Const >> constant.iConst); break;
case EbtUint: returnValue.setU64Const(u64Const >> constant.uConst); break;
case EbtInt64: returnValue.setU64Const(u64Const >> constant.i64Const); break;
case EbtUint64: returnValue.setU64Const(u64Const >> constant.u64Const); break;
default: assert(false && "Default missing");
}
break;
default: assert(false && "Default missing");
}
return returnValue;
}
TConstUnion operator<<(const TConstUnion& constant) const
{
TConstUnion returnValue;
switch (type) {
case EbtInt8:
switch (constant.type) {
case EbtInt8: returnValue.setI8Const(i8Const << constant.i8Const); break;
case EbtUint8: returnValue.setI8Const(i8Const << constant.u8Const); break;
case EbtInt16: returnValue.setI8Const(i8Const << constant.i16Const); break;
case EbtUint16: returnValue.setI8Const(i8Const << constant.u16Const); break;
case EbtInt: returnValue.setI8Const(i8Const << constant.iConst); break;
case EbtUint: returnValue.setI8Const(i8Const << constant.uConst); break;
case EbtInt64: returnValue.setI8Const(i8Const << constant.i64Const); break;
case EbtUint64: returnValue.setI8Const(i8Const << constant.u64Const); break;
default: assert(false && "Default missing");
}
break;
case EbtUint8:
switch (constant.type) {
case EbtInt8: returnValue.setU8Const(u8Const << constant.i8Const); break;
case EbtUint8: returnValue.setU8Const(u8Const << constant.u8Const); break;
case EbtInt16: returnValue.setU8Const(u8Const << constant.i16Const); break;
case EbtUint16: returnValue.setU8Const(u8Const << constant.u16Const); break;
case EbtInt: returnValue.setU8Const(u8Const << constant.iConst); break;
case EbtUint: returnValue.setU8Const(u8Const << constant.uConst); break;
case EbtInt64: returnValue.setU8Const(u8Const << constant.i64Const); break;
case EbtUint64: returnValue.setU8Const(u8Const << constant.u64Const); break;
default: assert(false && "Default missing");
}
break;
case EbtInt16:
switch (constant.type) {
case EbtInt8: returnValue.setI16Const(i16Const << constant.i8Const); break;
case EbtUint8: returnValue.setI16Const(i16Const << constant.u8Const); break;
case EbtInt16: returnValue.setI16Const(i16Const << constant.i16Const); break;
case EbtUint16: returnValue.setI16Const(i16Const << constant.u16Const); break;
case EbtInt: returnValue.setI16Const(i16Const << constant.iConst); break;
case EbtUint: returnValue.setI16Const(i16Const << constant.uConst); break;
case EbtInt64: returnValue.setI16Const(i16Const << constant.i64Const); break;
case EbtUint64: returnValue.setI16Const(i16Const << constant.u64Const); break;
default: assert(false && "Default missing");
}
break;
case EbtUint16:
switch (constant.type) {
case EbtInt8: returnValue.setU16Const(u16Const << constant.i8Const); break;
case EbtUint8: returnValue.setU16Const(u16Const << constant.u8Const); break;
case EbtInt16: returnValue.setU16Const(u16Const << constant.i16Const); break;
case EbtUint16: returnValue.setU16Const(u16Const << constant.u16Const); break;
case EbtInt: returnValue.setU16Const(u16Const << constant.iConst); break;
case EbtUint: returnValue.setU16Const(u16Const << constant.uConst); break;
case EbtInt64: returnValue.setU16Const(u16Const << constant.i64Const); break;
case EbtUint64: returnValue.setU16Const(u16Const << constant.u64Const); break;
default: assert(false && "Default missing");
}
break;
case EbtInt:
switch (constant.type) {
case EbtInt8: returnValue.setIConst(iConst << constant.i8Const); break;
case EbtUint8: returnValue.setIConst(iConst << constant.u8Const); break;
case EbtInt16: returnValue.setIConst(iConst << constant.i16Const); break;
case EbtUint16: returnValue.setIConst(iConst << constant.u16Const); break;
case EbtInt: returnValue.setIConst(iConst << constant.iConst); break;
case EbtUint: returnValue.setIConst(iConst << constant.uConst); break;
case EbtInt64: returnValue.setIConst(iConst << constant.i64Const); break;
case EbtUint64: returnValue.setIConst(iConst << constant.u64Const); break;
default: assert(false && "Default missing");
}
break;
case EbtUint:
switch (constant.type) {
case EbtInt8: returnValue.setUConst(uConst << constant.i8Const); break;
case EbtUint8: returnValue.setUConst(uConst << constant.u8Const); break;
case EbtInt16: returnValue.setUConst(uConst << constant.i16Const); break;
case EbtUint16: returnValue.setUConst(uConst << constant.u16Const); break;
case EbtInt: returnValue.setUConst(uConst << constant.iConst); break;
case EbtUint: returnValue.setUConst(uConst << constant.uConst); break;
case EbtInt64: returnValue.setUConst(uConst << constant.i64Const); break;
case EbtUint64: returnValue.setUConst(uConst << constant.u64Const); break;
default: assert(false && "Default missing");
}
break;
case EbtInt64:
switch (constant.type) {
case EbtInt8: returnValue.setI64Const(i64Const << constant.i8Const); break;
case EbtUint8: returnValue.setI64Const(i64Const << constant.u8Const); break;
case EbtInt16: returnValue.setI64Const(i64Const << constant.i16Const); break;
case EbtUint16: returnValue.setI64Const(i64Const << constant.u16Const); break;
case EbtInt: returnValue.setI64Const(i64Const << constant.iConst); break;
case EbtUint: returnValue.setI64Const(i64Const << constant.uConst); break;
case EbtInt64: returnValue.setI64Const(i64Const << constant.i64Const); break;
case EbtUint64: returnValue.setI64Const(i64Const << constant.u64Const); break;
default: assert(false && "Default missing");
}
break;
case EbtUint64:
switch (constant.type) {
case EbtInt8: returnValue.setU64Const(u64Const << constant.i8Const); break;
case EbtUint8: returnValue.setU64Const(u64Const << constant.u8Const); break;
case EbtInt16: returnValue.setU64Const(u64Const << constant.i16Const); break;
case EbtUint16: returnValue.setU64Const(u64Const << constant.u16Const); break;
case EbtInt: returnValue.setU64Const(u64Const << constant.iConst); break;
case EbtUint: returnValue.setU64Const(u64Const << constant.uConst); break;
case EbtInt64: returnValue.setU64Const(u64Const << constant.i64Const); break;
case EbtUint64: returnValue.setU64Const(u64Const << constant.u64Const); break;
default: assert(false && "Default missing");
}
break;
default: assert(false && "Default missing");
}
return returnValue;
}
TConstUnion operator&(const TConstUnion& constant) const
{
TConstUnion returnValue;
assert(type == constant.type);
switch (type) {
case EbtInt8: returnValue.setI8Const(i8Const & constant.i8Const); break;
case EbtUint8: returnValue.setU8Const(u8Const & constant.u8Const); break;
case EbtInt16: returnValue.setI16Const(i16Const & constant.i16Const); break;
case EbtUint16: returnValue.setU16Const(u16Const & constant.u16Const); break;
case EbtInt: returnValue.setIConst(iConst & constant.iConst); break;
case EbtUint: returnValue.setUConst(uConst & constant.uConst); break;
case EbtInt64: returnValue.setI64Const(i64Const & constant.i64Const); break;
case EbtUint64: returnValue.setU64Const(u64Const & constant.u64Const); break;
default: assert(false && "Default missing");
}
return returnValue;
}
TConstUnion operator|(const TConstUnion& constant) const
{
TConstUnion returnValue;
assert(type == constant.type);
switch (type) {
case EbtInt8: returnValue.setI8Const(i8Const | constant.i8Const); break;
case EbtUint8: returnValue.setU8Const(u8Const | constant.u8Const); break;
case EbtInt16: returnValue.setI16Const(i16Const | constant.i16Const); break;
case EbtUint16: returnValue.setU16Const(u16Const | constant.u16Const); break;
case EbtInt: returnValue.setIConst(iConst | constant.iConst); break;
case EbtUint: returnValue.setUConst(uConst | constant.uConst); break;
case EbtInt64: returnValue.setI64Const(i64Const | constant.i64Const); break;
case EbtUint64: returnValue.setU64Const(u64Const | constant.u64Const); break;
default: assert(false && "Default missing");
}
return returnValue;
}
TConstUnion operator^(const TConstUnion& constant) const
{
TConstUnion returnValue;
assert(type == constant.type);
switch (type) {
case EbtInt8: returnValue.setI8Const(i8Const ^ constant.i8Const); break;
case EbtUint8: returnValue.setU8Const(u8Const ^ constant.u8Const); break;
case EbtInt16: returnValue.setI16Const(i16Const ^ constant.i16Const); break;
case EbtUint16: returnValue.setU16Const(u16Const ^ constant.u16Const); break;
case EbtInt: returnValue.setIConst(iConst ^ constant.iConst); break;
case EbtUint: returnValue.setUConst(uConst ^ constant.uConst); break;
case EbtInt64: returnValue.setI64Const(i64Const ^ constant.i64Const); break;
case EbtUint64: returnValue.setU64Const(u64Const ^ constant.u64Const); break;
default: assert(false && "Default missing");
}
return returnValue;
}
TConstUnion operator~() const
{
TConstUnion returnValue;
switch (type) {
case EbtInt8: returnValue.setI8Const(~i8Const); break;
case EbtUint8: returnValue.setU8Const(~u8Const); break;
case EbtInt16: returnValue.setI16Const(~i16Const); break;
case EbtUint16: returnValue.setU16Const(~u16Const); break;
case EbtInt: returnValue.setIConst(~iConst); break;
case EbtUint: returnValue.setUConst(~uConst); break;
case EbtInt64: returnValue.setI64Const(~i64Const); break;
case EbtUint64: returnValue.setU64Const(~u64Const); break;
default: assert(false && "Default missing");
}
return returnValue;
}
TConstUnion operator&&(const TConstUnion& constant) const
{
TConstUnion returnValue;
assert(type == constant.type);
switch (type) {
case EbtBool: returnValue.setBConst(bConst && constant.bConst); break;
default: assert(false && "Default missing");
}
return returnValue;
}
TConstUnion operator||(const TConstUnion& constant) const
{
TConstUnion returnValue;
assert(type == constant.type);
switch (type) {
case EbtBool: returnValue.setBConst(bConst || constant.bConst); break;
default: assert(false && "Default missing");
}
return returnValue;
}
TBasicType getType() const { return type; }
private:
union {
signed char i8Const; // used for i8vec, scalar int8s
unsigned char u8Const; // used for u8vec, scalar uint8s
signed short i16Const; // used for i16vec, scalar int16s
unsigned short u16Const; // used for u16vec, scalar uint16s
int iConst; // used for ivec, scalar ints
unsigned int uConst; // used for uvec, scalar uints
long long i64Const; // used for i64vec, scalar int64s
unsigned long long u64Const; // used for u64vec, scalar uint64s
bool bConst; // used for bvec, scalar bools
double dConst; // used for vec, dvec, mat, dmat, scalar floats and doubles
const TString* sConst; // string constant
};
TBasicType type;
};
// Encapsulate having a pointer to an array of TConstUnion,
// which only needs to be allocated if its size is going to be
// bigger than 0.
//
// One convenience is being able to use [] to go inside the array, instead
// of C++ assuming it as an array of pointers to vectors.
//
// General usage is that the size is known up front, and it is
// created once with the proper size.
//
class TConstUnionArray {
public:
POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator())
TConstUnionArray() : unionArray(nullptr) { }
virtual ~TConstUnionArray() { }
explicit TConstUnionArray(int size)
{
if (size == 0)
unionArray = nullptr;
else
unionArray = new TConstUnionVector(size);
}
TConstUnionArray(const TConstUnionArray& a) : unionArray(a.unionArray) { }
TConstUnionArray(const TConstUnionArray& a, int start, int size)
{
unionArray = new TConstUnionVector(size);
for (int i = 0; i < size; ++i)
(*unionArray)[i] = a[start + i];
}
// Use this constructor for a smear operation
TConstUnionArray(int size, const TConstUnion& val)
{
unionArray = new TConstUnionVector(size, val);
}
int size() const { return unionArray ? (int)unionArray->size() : 0; }
TConstUnion& operator[](size_t index) { return (*unionArray)[index]; }
const TConstUnion& operator[](size_t index) const { return (*unionArray)[index]; }
bool operator==(const TConstUnionArray& rhs) const
{
// this includes the case that both are unallocated
if (unionArray == rhs.unionArray)
return true;
if (! unionArray || ! rhs.unionArray)
return false;
return *unionArray == *rhs.unionArray;
}
bool operator!=(const TConstUnionArray& rhs) const { return ! operator==(rhs); }
double dot(const TConstUnionArray& rhs)
{
assert(rhs.unionArray->size() == unionArray->size());
double sum = 0.0;
for (size_t comp = 0; comp < unionArray->size(); ++comp)
sum += (*this)[comp].getDConst() * rhs[comp].getDConst();
return sum;
}
bool empty() const { return unionArray == nullptr; }
protected:
typedef TVector<TConstUnion> TConstUnionVector;
TConstUnionVector* unionArray;
};
} // end namespace glslang
#endif // _CONSTANT_UNION_INCLUDED_

View File

@ -0,0 +1,144 @@
//
// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
#ifndef _INFOSINK_INCLUDED_
#define _INFOSINK_INCLUDED_
#include "../Include/Common.h"
#include <cmath>
namespace glslang {
//
// TPrefixType is used to centralize how info log messages start.
// See below.
//
enum TPrefixType {
EPrefixNone,
EPrefixWarning,
EPrefixError,
EPrefixInternalError,
EPrefixUnimplemented,
EPrefixNote
};
enum TOutputStream {
ENull = 0,
EDebugger = 0x01,
EStdOut = 0x02,
EString = 0x04,
};
//
// Encapsulate info logs for all objects that have them.
//
// The methods are a general set of tools for getting a variety of
// messages and types inserted into the log.
//
class TInfoSinkBase {
public:
TInfoSinkBase() : outputStream(4) {}
void erase() { sink.erase(); }
TInfoSinkBase& operator<<(const TPersistString& t) { append(t); return *this; }
TInfoSinkBase& operator<<(char c) { append(1, c); return *this; }
TInfoSinkBase& operator<<(const char* s) { append(s); return *this; }
TInfoSinkBase& operator<<(int n) { append(String(n)); return *this; }
TInfoSinkBase& operator<<(unsigned int n) { append(String(n)); return *this; }
TInfoSinkBase& operator<<(float n) { const int size = 40; char buf[size];
snprintf(buf, size, (fabs(n) > 1e-8 && fabs(n) < 1e8) || n == 0.0f ? "%f" : "%g", n);
append(buf);
return *this; }
TInfoSinkBase& operator+(const TPersistString& t) { append(t); return *this; }
TInfoSinkBase& operator+(const TString& t) { append(t); return *this; }
TInfoSinkBase& operator<<(const TString& t) { append(t); return *this; }
TInfoSinkBase& operator+(const char* s) { append(s); return *this; }
const char* c_str() const { return sink.c_str(); }
void prefix(TPrefixType message) {
switch(message) {
case EPrefixNone: break;
case EPrefixWarning: append("WARNING: "); break;
case EPrefixError: append("ERROR: "); break;
case EPrefixInternalError: append("INTERNAL ERROR: "); break;
case EPrefixUnimplemented: append("UNIMPLEMENTED: "); break;
case EPrefixNote: append("NOTE: "); break;
default: append("UNKNOWN ERROR: "); break;
}
}
void location(const TSourceLoc& loc) {
const int maxSize = 24;
char locText[maxSize];
snprintf(locText, maxSize, ":%d", loc.line);
append(loc.getStringNameOrNum(false).c_str());
append(locText);
append(": ");
}
void message(TPrefixType message, const char* s) {
prefix(message);
append(s);
append("\n");
}
void message(TPrefixType message, const char* s, const TSourceLoc& loc) {
prefix(message);
location(loc);
append(s);
append("\n");
}
void setOutputStream(int output = 4)
{
outputStream = output;
}
protected:
void append(const char* s);
void append(int count, char c);
void append(const TPersistString& t);
void append(const TString& t);
void checkMem(size_t growth) { if (sink.capacity() < sink.size() + growth + 2)
sink.reserve(sink.capacity() + sink.capacity() / 2); }
void appendToStream(const char* s);
TPersistString sink;
int outputStream;
};
} // end namespace glslang
class TInfoSink {
public:
glslang::TInfoSinkBase info;
glslang::TInfoSinkBase debug;
};
#endif // _INFOSINK_INCLUDED_

View File

@ -0,0 +1,44 @@
//
// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
#ifndef __INITIALIZE_GLOBALS_INCLUDED_
#define __INITIALIZE_GLOBALS_INCLUDED_
namespace glslang {
bool InitializePoolIndex();
} // end namespace glslang
#endif // __INITIALIZE_GLOBALS_INCLUDED_

View File

@ -0,0 +1,317 @@
//
// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
// Copyright (C) 2012-2013 LunarG, Inc.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
#ifndef _POOLALLOC_INCLUDED_
#define _POOLALLOC_INCLUDED_
#ifdef _DEBUG
# define GUARD_BLOCKS // define to enable guard block sanity checking
#endif
//
// This header defines an allocator that can be used to efficiently
// allocate a large number of small requests for heap memory, with the
// intention that they are not individually deallocated, but rather
// collectively deallocated at one time.
//
// This simultaneously
//
// * Makes each individual allocation much more efficient; the
// typical allocation is trivial.
// * Completely avoids the cost of doing individual deallocation.
// * Saves the trouble of tracking down and plugging a large class of leaks.
//
// Individual classes can use this allocator by supplying their own
// new and delete methods.
//
// STL containers can use this allocator by using the pool_allocator
// class as the allocator (second) template argument.
//
#include <cstddef>
#include <cstring>
#include <vector>
namespace glslang {
// If we are using guard blocks, we must track each individual
// allocation. If we aren't using guard blocks, these
// never get instantiated, so won't have any impact.
//
class TAllocation {
public:
TAllocation(size_t size, unsigned char* mem, TAllocation* prev = 0) :
size(size), mem(mem), prevAlloc(prev) {
// Allocations are bracketed:
// [allocationHeader][initialGuardBlock][userData][finalGuardBlock]
// This would be cleaner with if (guardBlockSize)..., but that
// makes the compiler print warnings about 0 length memsets,
// even with the if() protecting them.
# ifdef GUARD_BLOCKS
memset(preGuard(), guardBlockBeginVal, guardBlockSize);
memset(data(), userDataFill, size);
memset(postGuard(), guardBlockEndVal, guardBlockSize);
# endif
}
void check() const {
checkGuardBlock(preGuard(), guardBlockBeginVal, "before");
checkGuardBlock(postGuard(), guardBlockEndVal, "after");
}
void checkAllocList() const;
// Return total size needed to accommodate user buffer of 'size',
// plus our tracking data.
inline static size_t allocationSize(size_t size) {
return size + 2 * guardBlockSize + headerSize();
}
// Offset from surrounding buffer to get to user data buffer.
inline static unsigned char* offsetAllocation(unsigned char* m) {
return m + guardBlockSize + headerSize();
}
private:
void checkGuardBlock(unsigned char* blockMem, unsigned char val, const char* locText) const;
// Find offsets to pre and post guard blocks, and user data buffer
unsigned char* preGuard() const { return mem + headerSize(); }
unsigned char* data() const { return preGuard() + guardBlockSize; }
unsigned char* postGuard() const { return data() + size; }
size_t size; // size of the user data area
unsigned char* mem; // beginning of our allocation (pts to header)
TAllocation* prevAlloc; // prior allocation in the chain
const static unsigned char guardBlockBeginVal;
const static unsigned char guardBlockEndVal;
const static unsigned char userDataFill;
const static size_t guardBlockSize;
# ifdef GUARD_BLOCKS
inline static size_t headerSize() { return sizeof(TAllocation); }
# else
inline static size_t headerSize() { return 0; }
# endif
};
//
// There are several stacks. One is to track the pushing and popping
// of the user, and not yet implemented. The others are simply a
// repositories of free pages or used pages.
//
// Page stacks are linked together with a simple header at the beginning
// of each allocation obtained from the underlying OS. Multi-page allocations
// are returned to the OS. Individual page allocations are kept for future
// re-use.
//
// The "page size" used is not, nor must it match, the underlying OS
// page size. But, having it be about that size or equal to a set of
// pages is likely most optimal.
//
class TPoolAllocator {
public:
TPoolAllocator(int growthIncrement = 8*1024, int allocationAlignment = 16);
//
// Don't call the destructor just to free up the memory, call pop()
//
~TPoolAllocator();
//
// Call push() to establish a new place to pop memory too. Does not
// have to be called to get things started.
//
void push();
//
// Call pop() to free all memory allocated since the last call to push(),
// or if no last call to push, frees all memory since first allocation.
//
void pop();
//
// Call popAll() to free all memory allocated.
//
void popAll();
//
// Call allocate() to actually acquire memory. Returns 0 if no memory
// available, otherwise a properly aligned pointer to 'numBytes' of memory.
//
void* allocate(size_t numBytes);
//
// There is no deallocate. The point of this class is that
// deallocation can be skipped by the user of it, as the model
// of use is to simultaneously deallocate everything at once
// by calling pop(), and to not have to solve memory leak problems.
//
protected:
friend struct tHeader;
struct tHeader {
tHeader(tHeader* nextPage, size_t pageCount) :
#ifdef GUARD_BLOCKS
lastAllocation(0),
#endif
nextPage(nextPage), pageCount(pageCount) { }
~tHeader() {
#ifdef GUARD_BLOCKS
if (lastAllocation)
lastAllocation->checkAllocList();
#endif
}
#ifdef GUARD_BLOCKS
TAllocation* lastAllocation;
#endif
tHeader* nextPage;
size_t pageCount;
};
struct tAllocState {
size_t offset;
tHeader* page;
};
typedef std::vector<tAllocState> tAllocStack;
// Track allocations if and only if we're using guard blocks
#ifndef GUARD_BLOCKS
void* initializeAllocation(tHeader*, unsigned char* memory, size_t) {
#else
void* initializeAllocation(tHeader* block, unsigned char* memory, size_t numBytes) {
new(memory) TAllocation(numBytes, memory, block->lastAllocation);
block->lastAllocation = reinterpret_cast<TAllocation*>(memory);
#endif
// This is optimized entirely away if GUARD_BLOCKS is not defined.
return TAllocation::offsetAllocation(memory);
}
size_t pageSize; // granularity of allocation from the OS
size_t alignment; // all returned allocations will be aligned at
// this granularity, which will be a power of 2
size_t alignmentMask;
size_t headerSkip; // amount of memory to skip to make room for the
// header (basically, size of header, rounded
// up to make it aligned
size_t currentPageOffset; // next offset in top of inUseList to allocate from
tHeader* freeList; // list of popped memory
tHeader* inUseList; // list of all memory currently being used
tAllocStack stack; // stack of where to allocate from, to partition pool
int numCalls; // just an interesting statistic
size_t totalBytes; // just an interesting statistic
private:
TPoolAllocator& operator=(const TPoolAllocator&); // don't allow assignment operator
TPoolAllocator(const TPoolAllocator&); // don't allow default copy constructor
};
//
// There could potentially be many pools with pops happening at
// different times. But a simple use is to have a global pop
// with everyone using the same global allocator.
//
extern TPoolAllocator& GetThreadPoolAllocator();
void SetThreadPoolAllocator(TPoolAllocator* poolAllocator);
//
// This STL compatible allocator is intended to be used as the allocator
// parameter to templatized STL containers, like vector and map.
//
// It will use the pools for allocation, and not
// do any deallocation, but will still do destruction.
//
template<class T>
class pool_allocator {
public:
typedef size_t size_type;
typedef ptrdiff_t difference_type;
typedef T *pointer;
typedef const T *const_pointer;
typedef T& reference;
typedef const T& const_reference;
typedef T value_type;
template<class Other>
struct rebind {
typedef pool_allocator<Other> other;
};
pointer address(reference x) const { return &x; }
const_pointer address(const_reference x) const { return &x; }
pool_allocator() : allocator(GetThreadPoolAllocator()) { }
pool_allocator(TPoolAllocator& a) : allocator(a) { }
pool_allocator(const pool_allocator<T>& p) : allocator(p.allocator) { }
template<class Other>
pool_allocator(const pool_allocator<Other>& p) : allocator(p.getAllocator()) { }
pointer allocate(size_type n) {
return reinterpret_cast<pointer>(getAllocator().allocate(n * sizeof(T))); }
pointer allocate(size_type n, const void*) {
return reinterpret_cast<pointer>(getAllocator().allocate(n * sizeof(T))); }
void deallocate(void*, size_type) { }
void deallocate(pointer, size_type) { }
pointer _Charalloc(size_t n) {
return reinterpret_cast<pointer>(getAllocator().allocate(n)); }
void construct(pointer p, const T& val) { new ((void *)p) T(val); }
void destroy(pointer p) { p->T::~T(); }
bool operator==(const pool_allocator& rhs) const { return &getAllocator() == &rhs.getAllocator(); }
bool operator!=(const pool_allocator& rhs) const { return &getAllocator() != &rhs.getAllocator(); }
size_type max_size() const { return static_cast<size_type>(-1) / sizeof(T); }
size_type max_size(int size) const { return static_cast<size_type>(-1) / size; }
void setAllocator(TPoolAllocator* a) { allocator = *a; }
TPoolAllocator& getAllocator() const { return allocator; }
protected:
pool_allocator& operator=(const pool_allocator&) { return *this; }
TPoolAllocator& allocator;
};
} // end namespace glslang
#endif // _POOLALLOC_INCLUDED_

View File

@ -0,0 +1,140 @@
//
// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
// Copyright (C) 2013 LunarG, Inc.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
#ifndef _RESOURCE_LIMITS_INCLUDED_
#define _RESOURCE_LIMITS_INCLUDED_
struct TLimits {
bool nonInductiveForLoops;
bool whileLoops;
bool doWhileLoops;
bool generalUniformIndexing;
bool generalAttributeMatrixVectorIndexing;
bool generalVaryingIndexing;
bool generalSamplerIndexing;
bool generalVariableIndexing;
bool generalConstantMatrixVectorIndexing;
};
struct TBuiltInResource {
int maxLights;
int maxClipPlanes;
int maxTextureUnits;
int maxTextureCoords;
int maxVertexAttribs;
int maxVertexUniformComponents;
int maxVaryingFloats;
int maxVertexTextureImageUnits;
int maxCombinedTextureImageUnits;
int maxTextureImageUnits;
int maxFragmentUniformComponents;
int maxDrawBuffers;
int maxVertexUniformVectors;
int maxVaryingVectors;
int maxFragmentUniformVectors;
int maxVertexOutputVectors;
int maxFragmentInputVectors;
int minProgramTexelOffset;
int maxProgramTexelOffset;
int maxClipDistances;
int maxComputeWorkGroupCountX;
int maxComputeWorkGroupCountY;
int maxComputeWorkGroupCountZ;
int maxComputeWorkGroupSizeX;
int maxComputeWorkGroupSizeY;
int maxComputeWorkGroupSizeZ;
int maxComputeUniformComponents;
int maxComputeTextureImageUnits;
int maxComputeImageUniforms;
int maxComputeAtomicCounters;
int maxComputeAtomicCounterBuffers;
int maxVaryingComponents;
int maxVertexOutputComponents;
int maxGeometryInputComponents;
int maxGeometryOutputComponents;
int maxFragmentInputComponents;
int maxImageUnits;
int maxCombinedImageUnitsAndFragmentOutputs;
int maxCombinedShaderOutputResources;
int maxImageSamples;
int maxVertexImageUniforms;
int maxTessControlImageUniforms;
int maxTessEvaluationImageUniforms;
int maxGeometryImageUniforms;
int maxFragmentImageUniforms;
int maxCombinedImageUniforms;
int maxGeometryTextureImageUnits;
int maxGeometryOutputVertices;
int maxGeometryTotalOutputComponents;
int maxGeometryUniformComponents;
int maxGeometryVaryingComponents;
int maxTessControlInputComponents;
int maxTessControlOutputComponents;
int maxTessControlTextureImageUnits;
int maxTessControlUniformComponents;
int maxTessControlTotalOutputComponents;
int maxTessEvaluationInputComponents;
int maxTessEvaluationOutputComponents;
int maxTessEvaluationTextureImageUnits;
int maxTessEvaluationUniformComponents;
int maxTessPatchComponents;
int maxPatchVertices;
int maxTessGenLevel;
int maxViewports;
int maxVertexAtomicCounters;
int maxTessControlAtomicCounters;
int maxTessEvaluationAtomicCounters;
int maxGeometryAtomicCounters;
int maxFragmentAtomicCounters;
int maxCombinedAtomicCounters;
int maxAtomicCounterBindings;
int maxVertexAtomicCounterBuffers;
int maxTessControlAtomicCounterBuffers;
int maxTessEvaluationAtomicCounterBuffers;
int maxGeometryAtomicCounterBuffers;
int maxFragmentAtomicCounterBuffers;
int maxCombinedAtomicCounterBuffers;
int maxAtomicCounterBufferSize;
int maxTransformFeedbackBuffers;
int maxTransformFeedbackInterleavedComponents;
int maxCullDistances;
int maxCombinedClipAndCullDistances;
int maxSamples;
TLimits limits;
};
#endif // _RESOURCE_LIMITS_INCLUDED_

View File

@ -0,0 +1,176 @@
//
// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
#ifndef _SHHANDLE_INCLUDED_
#define _SHHANDLE_INCLUDED_
//
// Machine independent part of the compiler private objects
// sent as ShHandle to the driver.
//
// This should not be included by driver code.
//
#define SH_EXPORTING
#include "../Public/ShaderLang.h"
#include "../MachineIndependent/Versions.h"
#include "InfoSink.h"
class TCompiler;
class TLinker;
class TUniformMap;
//
// The base class used to back handles returned to the driver.
//
class TShHandleBase {
public:
TShHandleBase() { pool = new glslang::TPoolAllocator; }
virtual ~TShHandleBase() { delete pool; }
virtual TCompiler* getAsCompiler() { return 0; }
virtual TLinker* getAsLinker() { return 0; }
virtual TUniformMap* getAsUniformMap() { return 0; }
virtual glslang::TPoolAllocator* getPool() const { return pool; }
private:
glslang::TPoolAllocator* pool;
};
//
// The base class for the machine dependent linker to derive from
// for managing where uniforms live.
//
class TUniformMap : public TShHandleBase {
public:
TUniformMap() { }
virtual ~TUniformMap() { }
virtual TUniformMap* getAsUniformMap() { return this; }
virtual int getLocation(const char* name) = 0;
virtual TInfoSink& getInfoSink() { return infoSink; }
TInfoSink infoSink;
};
class TIntermNode;
//
// The base class for the machine dependent compiler to derive from
// for managing object code from the compile.
//
class TCompiler : public TShHandleBase {
public:
TCompiler(EShLanguage l, TInfoSink& sink) : infoSink(sink) , language(l), haveValidObjectCode(false) { }
virtual ~TCompiler() { }
EShLanguage getLanguage() { return language; }
virtual TInfoSink& getInfoSink() { return infoSink; }
virtual bool compile(TIntermNode* root, int version = 0, EProfile profile = ENoProfile) = 0;
virtual TCompiler* getAsCompiler() { return this; }
virtual bool linkable() { return haveValidObjectCode; }
TInfoSink& infoSink;
protected:
TCompiler& operator=(TCompiler&);
EShLanguage language;
bool haveValidObjectCode;
};
//
// Link operations are based on a list of compile results...
//
typedef glslang::TVector<TCompiler*> TCompilerList;
typedef glslang::TVector<TShHandleBase*> THandleList;
//
// The base class for the machine dependent linker to derive from
// to manage the resulting executable.
//
class TLinker : public TShHandleBase {
public:
TLinker(EShExecutable e, TInfoSink& iSink) :
infoSink(iSink),
executable(e),
haveReturnableObjectCode(false),
appAttributeBindings(0),
fixedAttributeBindings(0),
excludedAttributes(0),
excludedCount(0),
uniformBindings(0) { }
virtual TLinker* getAsLinker() { return this; }
virtual ~TLinker() { }
virtual bool link(TCompilerList&, TUniformMap*) = 0;
virtual bool link(THandleList&) { return false; }
virtual void setAppAttributeBindings(const ShBindingTable* t) { appAttributeBindings = t; }
virtual void setFixedAttributeBindings(const ShBindingTable* t) { fixedAttributeBindings = t; }
virtual void getAttributeBindings(ShBindingTable const **t) const = 0;
virtual void setExcludedAttributes(const int* attributes, int count) { excludedAttributes = attributes; excludedCount = count; }
virtual ShBindingTable* getUniformBindings() const { return uniformBindings; }
virtual const void* getObjectCode() const { return 0; } // a real compiler would be returning object code here
virtual TInfoSink& getInfoSink() { return infoSink; }
TInfoSink& infoSink;
protected:
TLinker& operator=(TLinker&);
EShExecutable executable;
bool haveReturnableObjectCode; // true when objectCode is acceptable to send to driver
const ShBindingTable* appAttributeBindings;
const ShBindingTable* fixedAttributeBindings;
const int* excludedAttributes;
int excludedCount;
ShBindingTable* uniformBindings; // created by the linker
};
//
// This is the interface between the machine independent code
// and the machine dependent code.
//
// The machine dependent code should derive from the classes
// above. Then Construct*() and Delete*() will create and
// destroy the machine dependent objects, which contain the
// above machine independent information.
//
TCompiler* ConstructCompiler(EShLanguage, int);
TShHandleBase* ConstructLinker(EShExecutable, int);
TShHandleBase* ConstructBindings();
void DeleteLinker(TShHandleBase*);
void DeleteBindingList(TShHandleBase* bindingList);
TUniformMap* ConstructUniformMap();
void DeleteCompiler(TCompiler*);
void DeleteUniformMap(TUniformMap*);
#endif // _SHHANDLE_INCLUDED_

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,339 @@
//
// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
// Copyright (C) 2012-2013 LunarG, Inc.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
//
// Implement types for tracking GLSL arrays, arrays of arrays, etc.
//
#ifndef _ARRAYS_INCLUDED
#define _ARRAYS_INCLUDED
#include <algorithm>
namespace glslang {
// This is used to mean there is no size yet (unsized), it is waiting to get a size from somewhere else.
const int UnsizedArraySize = 0;
class TIntermTyped;
extern bool SameSpecializationConstants(TIntermTyped*, TIntermTyped*);
// Specialization constants need both a nominal size and a node that defines
// the specialization constant being used. Array types are the same when their
// size and specialization constant nodes are the same.
struct TArraySize {
unsigned int size;
TIntermTyped* node; // nullptr means no specialization constant node
bool operator==(const TArraySize& rhs) const
{
if (size != rhs.size)
return false;
if (node == nullptr || rhs.node == nullptr)
return node == rhs.node;
return SameSpecializationConstants(node, rhs.node);
}
};
//
// TSmallArrayVector is used as the container for the set of sizes in TArraySizes.
// It has generic-container semantics, while TArraySizes has array-of-array semantics.
// That is, TSmallArrayVector should be more focused on mechanism and TArraySizes on policy.
//
struct TSmallArrayVector {
//
// TODO: memory: TSmallArrayVector is intended to be smaller.
// Almost all arrays could be handled by two sizes each fitting
// in 16 bits, needing a real vector only in the cases where there
// are more than 3 sizes or a size needing more than 16 bits.
//
POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator())
TSmallArrayVector() : sizes(nullptr) { }
virtual ~TSmallArrayVector() { dealloc(); }
// For breaking into two non-shared copies, independently modifiable.
TSmallArrayVector& operator=(const TSmallArrayVector& from)
{
if (from.sizes == nullptr)
sizes = nullptr;
else {
alloc();
*sizes = *from.sizes;
}
return *this;
}
int size() const
{
if (sizes == nullptr)
return 0;
return (int)sizes->size();
}
unsigned int frontSize() const
{
assert(sizes != nullptr && sizes->size() > 0);
return sizes->front().size;
}
TIntermTyped* frontNode() const
{
assert(sizes != nullptr && sizes->size() > 0);
return sizes->front().node;
}
void changeFront(unsigned int s)
{
assert(sizes != nullptr);
// this should only happen for implicitly sized arrays, not specialization constants
assert(sizes->front().node == nullptr);
sizes->front().size = s;
}
void push_back(unsigned int e, TIntermTyped* n)
{
alloc();
TArraySize pair = { e, n };
sizes->push_back(pair);
}
void push_back(const TSmallArrayVector& newDims)
{
alloc();
sizes->insert(sizes->end(), newDims.sizes->begin(), newDims.sizes->end());
}
void pop_front()
{
assert(sizes != nullptr && sizes->size() > 0);
if (sizes->size() == 1)
dealloc();
else
sizes->erase(sizes->begin());
}
// 'this' should currently not be holding anything, and copyNonFront
// will make it hold a copy of all but the first element of rhs.
// (This would be useful for making a type that is dereferenced by
// one dimension.)
void copyNonFront(const TSmallArrayVector& rhs)
{
assert(sizes == nullptr);
if (rhs.size() > 1) {
alloc();
sizes->insert(sizes->begin(), rhs.sizes->begin() + 1, rhs.sizes->end());
}
}
unsigned int getDimSize(int i) const
{
assert(sizes != nullptr && (int)sizes->size() > i);
return (*sizes)[i].size;
}
void setDimSize(int i, unsigned int size) const
{
assert(sizes != nullptr && (int)sizes->size() > i);
assert((*sizes)[i].node == nullptr);
(*sizes)[i].size = size;
}
TIntermTyped* getDimNode(int i) const
{
assert(sizes != nullptr && (int)sizes->size() > i);
return (*sizes)[i].node;
}
bool operator==(const TSmallArrayVector& rhs) const
{
if (sizes == nullptr && rhs.sizes == nullptr)
return true;
if (sizes == nullptr || rhs.sizes == nullptr)
return false;
return *sizes == *rhs.sizes;
}
bool operator!=(const TSmallArrayVector& rhs) const { return ! operator==(rhs); }
protected:
TSmallArrayVector(const TSmallArrayVector&);
void alloc()
{
if (sizes == nullptr)
sizes = new TVector<TArraySize>;
}
void dealloc()
{
delete sizes;
sizes = nullptr;
}
TVector<TArraySize>* sizes; // will either hold such a pointer, or in the future, hold the two array sizes
};
//
// Represent an array, or array of arrays, to arbitrary depth. This is not
// done through a hierarchy of types in a type tree, rather all contiguous arrayness
// in the type hierarchy is localized into this single cumulative object.
//
// The arrayness in TTtype is a pointer, so that it can be non-allocated and zero
// for the vast majority of types that are non-array types.
//
// Order Policy: these are all identical:
// - left to right order within a contiguous set of ...[..][..][..]... in the source language
// - index order 0, 1, 2, ... within the 'sizes' member below
// - outer-most to inner-most
//
struct TArraySizes {
POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator())
TArraySizes() : implicitArraySize(1), variablyIndexed(false) { }
// For breaking into two non-shared copies, independently modifiable.
TArraySizes& operator=(const TArraySizes& from)
{
implicitArraySize = from.implicitArraySize;
variablyIndexed = from.variablyIndexed;
sizes = from.sizes;
return *this;
}
// translate from array-of-array semantics to container semantics
int getNumDims() const { return sizes.size(); }
int getDimSize(int dim) const { return sizes.getDimSize(dim); }
TIntermTyped* getDimNode(int dim) const { return sizes.getDimNode(dim); }
void setDimSize(int dim, int size) { sizes.setDimSize(dim, size); }
int getOuterSize() const { return sizes.frontSize(); }
TIntermTyped* getOuterNode() const { return sizes.frontNode(); }
int getCumulativeSize() const
{
int size = 1;
for (int d = 0; d < sizes.size(); ++d) {
// this only makes sense in paths that have a known array size
assert(sizes.getDimSize(d) != UnsizedArraySize);
size *= sizes.getDimSize(d);
}
return size;
}
void addInnerSize() { addInnerSize((unsigned)UnsizedArraySize); }
void addInnerSize(int s) { addInnerSize((unsigned)s, nullptr); }
void addInnerSize(int s, TIntermTyped* n) { sizes.push_back((unsigned)s, n); }
void addInnerSize(TArraySize pair) { sizes.push_back(pair.size, pair.node); }
void addInnerSizes(const TArraySizes& s) { sizes.push_back(s.sizes); }
void changeOuterSize(int s) { sizes.changeFront((unsigned)s); }
int getImplicitSize() const { return implicitArraySize; }
void updateImplicitSize(int s) { implicitArraySize = std::max(implicitArraySize, s); }
bool isInnerUnsized() const
{
for (int d = 1; d < sizes.size(); ++d) {
if (sizes.getDimSize(d) == (unsigned)UnsizedArraySize)
return true;
}
return false;
}
bool clearInnerUnsized()
{
for (int d = 1; d < sizes.size(); ++d) {
if (sizes.getDimSize(d) == (unsigned)UnsizedArraySize)
setDimSize(d, 1);
}
return false;
}
bool isInnerSpecialization() const
{
for (int d = 1; d < sizes.size(); ++d) {
if (sizes.getDimNode(d) != nullptr)
return true;
}
return false;
}
bool isOuterSpecialization()
{
return sizes.getDimNode(0) != nullptr;
}
bool hasUnsized() const { return getOuterSize() == UnsizedArraySize || isInnerUnsized(); }
bool isSized() const { return getOuterSize() != UnsizedArraySize; }
void dereference() { sizes.pop_front(); }
void copyDereferenced(const TArraySizes& rhs)
{
assert(sizes.size() == 0);
if (rhs.sizes.size() > 1)
sizes.copyNonFront(rhs.sizes);
}
bool sameInnerArrayness(const TArraySizes& rhs) const
{
if (sizes.size() != rhs.sizes.size())
return false;
for (int d = 1; d < sizes.size(); ++d) {
if (sizes.getDimSize(d) != rhs.sizes.getDimSize(d) ||
sizes.getDimNode(d) != rhs.sizes.getDimNode(d))
return false;
}
return true;
}
void setVariablyIndexed() { variablyIndexed = true; }
bool isVariablyIndexed() const { return variablyIndexed; }
bool operator==(const TArraySizes& rhs) { return sizes == rhs.sizes; }
bool operator!=(const TArraySizes& rhs) { return sizes != rhs.sizes; }
protected:
TSmallArrayVector sizes;
TArraySizes(const TArraySizes&);
// For tracking maximum referenced compile-time constant index.
// Applies only to the outer-most dimension. Potentially becomes
// the implicit size of the array, if not variably indexed and
// otherwise legal.
int implicitArraySize;
bool variablyIndexed; // true if array is indexed with a non compile-time constant
};
} // end namespace glslang
#endif // _ARRAYS_INCLUDED_

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,3 @@
// This header is generated by the make-revision script.
#define GLSLANG_PATCH_LEVEL 2801

View File

@ -0,0 +1,13 @@
// The file revision.h should be updated to the latest version, somehow, on
// check-in, if glslang has changed.
//
// revision.template is the source for revision.h when using SubWCRev as the
// method of updating revision.h. You don't have to do it this way, the
// requirement is only that revision.h gets updated.
//
// revision.h is under source control so that not all consumers of glslang
// source have to figure out how to create revision.h just to get a build
// going. However, if it is not updated, it can be a version behind.
#define GLSLANG_REVISION "$WCREV$"
#define GLSLANG_DATE "$WCDATE$"

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,113 @@
//
// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
#include "../Include/InfoSink.h"
#include <cstring>
namespace glslang {
void TInfoSinkBase::append(const char* s)
{
if (outputStream & EString) {
if (s == nullptr)
sink.append("(null)");
else {
checkMem(strlen(s));
sink.append(s);
}
}
//#ifdef _WIN32
// if (outputStream & EDebugger)
// OutputDebugString(s);
//#endif
if (outputStream & EStdOut)
fprintf(stdout, "%s", s);
}
void TInfoSinkBase::append(int count, char c)
{
if (outputStream & EString) {
checkMem(count);
sink.append(count, c);
}
//#ifdef _WIN32
// if (outputStream & EDebugger) {
// char str[2];
// str[0] = c;
// str[1] = '\0';
// OutputDebugString(str);
// }
//#endif
if (outputStream & EStdOut)
fprintf(stdout, "%c", c);
}
void TInfoSinkBase::append(const TPersistString& t)
{
if (outputStream & EString) {
checkMem(t.size());
sink.append(t);
}
//#ifdef _WIN32
// if (outputStream & EDebugger)
// OutputDebugString(t.c_str());
//#endif
if (outputStream & EStdOut)
fprintf(stdout, "%s", t.c_str());
}
void TInfoSinkBase::append(const TString& t)
{
if (outputStream & EString) {
checkMem(t.size());
sink.append(t.c_str());
}
//#ifdef _WIN32
// if (outputStream & EDebugger)
// OutputDebugString(t.c_str());
//#endif
if (outputStream & EStdOut)
fprintf(stdout, "%s", t.c_str());
}
} // end namespace glslang

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,110 @@
//
// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
// Copyright (C) 2013-2016 LunarG, Inc.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
#ifndef _INITIALIZE_INCLUDED_
#define _INITIALIZE_INCLUDED_
#include "../Include/ResourceLimits.h"
#include "../Include/Common.h"
#include "../Include/ShHandle.h"
#include "SymbolTable.h"
#include "Versions.h"
namespace glslang {
//
// This is made to hold parseable strings for almost all the built-in
// functions and variables for one specific combination of version
// and profile. (Some still need to be added programmatically.)
// This is a base class for language-specific derivations, which
// can be used for language independent builtins.
//
// The strings are organized by
// commonBuiltins: intersection of all stages' built-ins, processed just once
// stageBuiltins[]: anything a stage needs that's not in commonBuiltins
//
class TBuiltInParseables {
public:
POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator())
TBuiltInParseables();
virtual ~TBuiltInParseables();
virtual void initialize(int version, EProfile, const SpvVersion& spvVersion) = 0;
virtual void initialize(const TBuiltInResource& resources, int version, EProfile, const SpvVersion& spvVersion, EShLanguage) = 0;
virtual const TString& getCommonString() const { return commonBuiltins; }
virtual const TString& getStageString(EShLanguage language) const { return stageBuiltins[language]; }
virtual void identifyBuiltIns(int version, EProfile profile, const SpvVersion& spvVersion, EShLanguage language, TSymbolTable& symbolTable) = 0;
virtual void identifyBuiltIns(int version, EProfile profile, const SpvVersion& spvVersion, EShLanguage language, TSymbolTable& symbolTable, const TBuiltInResource &resources) = 0;
protected:
TString commonBuiltins;
TString stageBuiltins[EShLangCount];
};
//
// This is a GLSL specific derivation of TBuiltInParseables. To present a stable
// interface and match other similar code, it is called TBuiltIns, rather
// than TBuiltInParseablesGlsl.
//
class TBuiltIns : public TBuiltInParseables {
public:
POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator())
TBuiltIns();
virtual ~TBuiltIns();
void initialize(int version, EProfile, const SpvVersion& spvVersion);
void initialize(const TBuiltInResource& resources, int version, EProfile, const SpvVersion& spvVersion, EShLanguage);
void identifyBuiltIns(int version, EProfile profile, const SpvVersion& spvVersion, EShLanguage language, TSymbolTable& symbolTable);
void identifyBuiltIns(int version, EProfile profile, const SpvVersion& spvVersion, EShLanguage language, TSymbolTable& symbolTable, const TBuiltInResource &resources);
protected:
void add2ndGenerationSamplingImaging(int version, EProfile profile, const SpvVersion& spvVersion);
void addSubpassSampling(TSampler, const TString& typeName, int version, EProfile profile);
void addQueryFunctions(TSampler, const TString& typeName, int version, EProfile profile);
void addImageFunctions(TSampler, const TString& typeName, int version, EProfile profile);
void addSamplingFunctions(TSampler, const TString& typeName, int version, EProfile profile);
void addGatherFunctions(TSampler, const TString& typeName, int version, EProfile profile);
// Helpers for making textual representations of the permutations
// of texturing/imaging functions.
const char* postfixes[5];
const char* prefixes[EbtNumTypes];
int dimMap[EsdNumDims];
};
} // end namespace glslang
#endif // _INITIALIZE_INCLUDED_

View File

@ -0,0 +1,302 @@
//
// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
// Copyright (C) 2013 LunarG, Inc.
// Copyright (c) 2002-2010 The ANGLE Project Authors.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
#include "../Include/intermediate.h"
namespace glslang {
//
// Traverse the intermediate representation tree, and
// call a node type specific function for each node.
// Done recursively through the member function Traverse().
// Node types can be skipped if their function to call is 0,
// but their subtree will still be traversed.
// Nodes with children can have their whole subtree skipped
// if preVisit is turned on and the type specific function
// returns false.
//
// preVisit, postVisit, and rightToLeft control what order
// nodes are visited in.
//
//
// Traversal functions for terminals are straightforward....
//
void TIntermMethod::traverse(TIntermTraverser*)
{
// Tree should always resolve all methods as a non-method.
}
void TIntermSymbol::traverse(TIntermTraverser *it)
{
it->visitSymbol(this);
}
void TIntermConstantUnion::traverse(TIntermTraverser *it)
{
it->visitConstantUnion(this);
}
//
// Traverse a binary node.
//
void TIntermBinary::traverse(TIntermTraverser *it)
{
bool visit = true;
//
// visit the node before children if pre-visiting.
//
if (it->preVisit)
visit = it->visitBinary(EvPreVisit, this);
//
// Visit the children, in the right order.
//
if (visit) {
it->incrementDepth(this);
if (it->rightToLeft) {
if (right)
right->traverse(it);
if (it->inVisit)
visit = it->visitBinary(EvInVisit, this);
if (visit && left)
left->traverse(it);
} else {
if (left)
left->traverse(it);
if (it->inVisit)
visit = it->visitBinary(EvInVisit, this);
if (visit && right)
right->traverse(it);
}
it->decrementDepth();
}
//
// Visit the node after the children, if requested and the traversal
// hasn't been canceled yet.
//
if (visit && it->postVisit)
it->visitBinary(EvPostVisit, this);
}
//
// Traverse a unary node. Same comments in binary node apply here.
//
void TIntermUnary::traverse(TIntermTraverser *it)
{
bool visit = true;
if (it->preVisit)
visit = it->visitUnary(EvPreVisit, this);
if (visit) {
it->incrementDepth(this);
operand->traverse(it);
it->decrementDepth();
}
if (visit && it->postVisit)
it->visitUnary(EvPostVisit, this);
}
//
// Traverse an aggregate node. Same comments in binary node apply here.
//
void TIntermAggregate::traverse(TIntermTraverser *it)
{
bool visit = true;
if (it->preVisit)
visit = it->visitAggregate(EvPreVisit, this);
if (visit) {
it->incrementDepth(this);
if (it->rightToLeft) {
for (TIntermSequence::reverse_iterator sit = sequence.rbegin(); sit != sequence.rend(); sit++) {
(*sit)->traverse(it);
if (visit && it->inVisit) {
if (*sit != sequence.front())
visit = it->visitAggregate(EvInVisit, this);
}
}
} else {
for (TIntermSequence::iterator sit = sequence.begin(); sit != sequence.end(); sit++) {
(*sit)->traverse(it);
if (visit && it->inVisit) {
if (*sit != sequence.back())
visit = it->visitAggregate(EvInVisit, this);
}
}
}
it->decrementDepth();
}
if (visit && it->postVisit)
it->visitAggregate(EvPostVisit, this);
}
//
// Traverse a selection node. Same comments in binary node apply here.
//
void TIntermSelection::traverse(TIntermTraverser *it)
{
bool visit = true;
if (it->preVisit)
visit = it->visitSelection(EvPreVisit, this);
if (visit) {
it->incrementDepth(this);
if (it->rightToLeft) {
if (falseBlock)
falseBlock->traverse(it);
if (trueBlock)
trueBlock->traverse(it);
condition->traverse(it);
} else {
condition->traverse(it);
if (trueBlock)
trueBlock->traverse(it);
if (falseBlock)
falseBlock->traverse(it);
}
it->decrementDepth();
}
if (visit && it->postVisit)
it->visitSelection(EvPostVisit, this);
}
//
// Traverse a loop node. Same comments in binary node apply here.
//
void TIntermLoop::traverse(TIntermTraverser *it)
{
bool visit = true;
if (it->preVisit)
visit = it->visitLoop(EvPreVisit, this);
if (visit) {
it->incrementDepth(this);
if (it->rightToLeft) {
if (terminal)
terminal->traverse(it);
if (body)
body->traverse(it);
if (test)
test->traverse(it);
} else {
if (test)
test->traverse(it);
if (body)
body->traverse(it);
if (terminal)
terminal->traverse(it);
}
it->decrementDepth();
}
if (visit && it->postVisit)
it->visitLoop(EvPostVisit, this);
}
//
// Traverse a branch node. Same comments in binary node apply here.
//
void TIntermBranch::traverse(TIntermTraverser *it)
{
bool visit = true;
if (it->preVisit)
visit = it->visitBranch(EvPreVisit, this);
if (visit && expression) {
it->incrementDepth(this);
expression->traverse(it);
it->decrementDepth();
}
if (visit && it->postVisit)
it->visitBranch(EvPostVisit, this);
}
//
// Traverse a switch node.
//
void TIntermSwitch::traverse(TIntermTraverser* it)
{
bool visit = true;
if (it->preVisit)
visit = it->visitSwitch(EvPreVisit, this);
if (visit) {
it->incrementDepth(this);
if (it->rightToLeft) {
body->traverse(it);
condition->traverse(it);
} else {
condition->traverse(it);
body->traverse(it);
}
it->decrementDepth();
}
if (visit && it->postVisit)
it->visitSwitch(EvPostVisit, this);
}
} // end namespace glslang

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,138 @@
//
// Copyright (C) 2016 LunarG, Inc.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
#pragma once
#include "../Include/Common.h"
#include "reflection.h"
#include "localintermediate.h"
#include "gl_types.h"
#include <list>
#include <unordered_set>
namespace glslang {
//
// The traverser: mostly pass through, except
// - processing function-call nodes to push live functions onto the stack of functions to process
// - processing selection nodes to trim semantically dead code
//
// This is in the glslang namespace directly so it can be a friend of TReflection.
// This can be derived from to implement reflection database traversers or
// binding mappers: anything that wants to traverse the live subset of the tree.
//
class TLiveTraverser : public TIntermTraverser {
public:
TLiveTraverser(const TIntermediate& i, bool traverseAll = false,
bool preVisit = true, bool inVisit = false, bool postVisit = false) :
TIntermTraverser(preVisit, inVisit, postVisit),
intermediate(i), traverseAll(traverseAll)
{ }
//
// Given a function name, find its subroot in the tree, and push it onto the stack of
// functions left to process.
//
void pushFunction(const TString& name)
{
TIntermSequence& globals = intermediate.getTreeRoot()->getAsAggregate()->getSequence();
for (unsigned int f = 0; f < globals.size(); ++f) {
TIntermAggregate* candidate = globals[f]->getAsAggregate();
if (candidate && candidate->getOp() == EOpFunction && candidate->getName() == name) {
functions.push_back(candidate);
break;
}
}
}
typedef std::list<TIntermAggregate*> TFunctionStack;
TFunctionStack functions;
protected:
// To catch which function calls are not dead, and hence which functions must be visited.
virtual bool visitAggregate(TVisit, TIntermAggregate* node)
{
if (!traverseAll)
if (node->getOp() == EOpFunctionCall)
addFunctionCall(node);
return true; // traverse this subtree
}
// To prune semantically dead paths.
virtual bool visitSelection(TVisit /* visit */, TIntermSelection* node)
{
if (traverseAll)
return true; // traverse all code
TIntermConstantUnion* constant = node->getCondition()->getAsConstantUnion();
if (constant) {
// cull the path that is dead
if (constant->getConstArray()[0].getBConst() == true && node->getTrueBlock())
node->getTrueBlock()->traverse(this);
if (constant->getConstArray()[0].getBConst() == false && node->getFalseBlock())
node->getFalseBlock()->traverse(this);
return false; // don't traverse any more, we did it all above
} else
return true; // traverse the whole subtree
}
// Track live functions as well as uniforms, so that we don't visit dead functions
// and only visit each function once.
void addFunctionCall(TIntermAggregate* call)
{
// // just use the map to ensure we process each function at most once
if (liveFunctions.find(call->getName()) == liveFunctions.end()) {
liveFunctions.insert(call->getName());
pushFunction(call->getName());
}
}
const TIntermediate& intermediate;
typedef std::unordered_set<TString> TLiveFunctions;
TLiveFunctions liveFunctions;
bool traverseAll;
private:
// prevent copy & copy construct
TLiveTraverser(TLiveTraverser&);
TLiveTraverser& operator=(TLiveTraverser&);
};
} // namespace glslang

View File

@ -0,0 +1,613 @@
//
// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
// Copyright (C) 2016 Google, Inc.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Implement the TParseContextBase class.
#include <cstdarg>
#include "ParseHelper.h"
extern int yyparse(glslang::TParseContext*);
namespace glslang {
//
// Used to output syntax, parsing, and semantic errors.
//
void TParseContextBase::outputMessage(const TSourceLoc& loc, const char* szReason,
const char* szToken,
const char* szExtraInfoFormat,
TPrefixType prefix, va_list args)
{
const int maxSize = MaxTokenLength + 200;
char szExtraInfo[maxSize];
safe_vsprintf(szExtraInfo, maxSize, szExtraInfoFormat, args);
infoSink.info.prefix(prefix);
infoSink.info.location(loc);
infoSink.info << "'" << szToken << "' : " << szReason << " " << szExtraInfo << "\n";
if (prefix == EPrefixError) {
++numErrors;
}
}
void C_DECL TParseContextBase::error(const TSourceLoc& loc, const char* szReason, const char* szToken,
const char* szExtraInfoFormat, ...)
{
if (messages & EShMsgOnlyPreprocessor)
return;
va_list args;
va_start(args, szExtraInfoFormat);
outputMessage(loc, szReason, szToken, szExtraInfoFormat, EPrefixError, args);
va_end(args);
if ((messages & EShMsgCascadingErrors) == 0)
currentScanner->setEndOfInput();
}
void C_DECL TParseContextBase::warn(const TSourceLoc& loc, const char* szReason, const char* szToken,
const char* szExtraInfoFormat, ...)
{
if (suppressWarnings())
return;
va_list args;
va_start(args, szExtraInfoFormat);
outputMessage(loc, szReason, szToken, szExtraInfoFormat, EPrefixWarning, args);
va_end(args);
}
void C_DECL TParseContextBase::ppError(const TSourceLoc& loc, const char* szReason, const char* szToken,
const char* szExtraInfoFormat, ...)
{
va_list args;
va_start(args, szExtraInfoFormat);
outputMessage(loc, szReason, szToken, szExtraInfoFormat, EPrefixError, args);
va_end(args);
if ((messages & EShMsgCascadingErrors) == 0)
currentScanner->setEndOfInput();
}
void C_DECL TParseContextBase::ppWarn(const TSourceLoc& loc, const char* szReason, const char* szToken,
const char* szExtraInfoFormat, ...)
{
va_list args;
va_start(args, szExtraInfoFormat);
outputMessage(loc, szReason, szToken, szExtraInfoFormat, EPrefixWarning, args);
va_end(args);
}
//
// Both test and if necessary, spit out an error, to see if the node is really
// an l-value that can be operated on this way.
//
// Returns true if there was an error.
//
bool TParseContextBase::lValueErrorCheck(const TSourceLoc& loc, const char* op, TIntermTyped* node)
{
TIntermBinary* binaryNode = node->getAsBinaryNode();
if (binaryNode) {
switch(binaryNode->getOp()) {
case EOpIndexDirect:
case EOpIndexIndirect: // fall through
case EOpIndexDirectStruct: // fall through
case EOpVectorSwizzle:
case EOpMatrixSwizzle:
return lValueErrorCheck(loc, op, binaryNode->getLeft());
default:
break;
}
error(loc, " l-value required", op, "", "");
return true;
}
const char* symbol = nullptr;
TIntermSymbol* symNode = node->getAsSymbolNode();
if (symNode != nullptr)
symbol = symNode->getName().c_str();
const char* message = nullptr;
switch (node->getQualifier().storage) {
case EvqConst: message = "can't modify a const"; break;
case EvqConstReadOnly: message = "can't modify a const"; break;
case EvqUniform: message = "can't modify a uniform"; break;
case EvqBuffer:
if (node->getQualifier().readonly)
message = "can't modify a readonly buffer";
break;
default:
//
// Type that can't be written to?
//
switch (node->getBasicType()) {
case EbtSampler:
message = "can't modify a sampler";
break;
case EbtAtomicUint:
message = "can't modify an atomic_uint";
break;
case EbtVoid:
message = "can't modify void";
break;
default:
break;
}
}
if (message == nullptr && binaryNode == nullptr && symNode == nullptr) {
error(loc, " l-value required", op, "", "");
return true;
}
//
// Everything else is okay, no error.
//
if (message == nullptr)
return false;
//
// If we get here, we have an error and a message.
//
if (symNode)
error(loc, " l-value required", op, "\"%s\" (%s)", symbol, message);
else
error(loc, " l-value required", op, "(%s)", message);
return true;
}
// Test for and give an error if the node can't be read from.
void TParseContextBase::rValueErrorCheck(const TSourceLoc& loc, const char* op, TIntermTyped* node)
{
if (! node)
return;
TIntermBinary* binaryNode = node->getAsBinaryNode();
if (binaryNode) {
switch(binaryNode->getOp()) {
case EOpIndexDirect:
case EOpIndexIndirect:
case EOpIndexDirectStruct:
case EOpVectorSwizzle:
case EOpMatrixSwizzle:
rValueErrorCheck(loc, op, binaryNode->getLeft());
default:
break;
}
return;
}
TIntermSymbol* symNode = node->getAsSymbolNode();
if (symNode && symNode->getQualifier().writeonly)
error(loc, "can't read from writeonly object: ", op, symNode->getName().c_str());
}
// Add 'symbol' to the list of deferred linkage symbols, which
// are later processed in finish(), at which point the symbol
// must still be valid.
// It is okay if the symbol's type will be subsequently edited;
// the modifications will be tracked.
// Order is preserved, to avoid creating novel forward references.
void TParseContextBase::trackLinkage(TSymbol& symbol)
{
if (!parsingBuiltins)
linkageSymbols.push_back(&symbol);
}
// Ensure index is in bounds, correct if necessary.
// Give an error if not.
void TParseContextBase::checkIndex(const TSourceLoc& loc, const TType& type, int& index)
{
if (index < 0) {
error(loc, "", "[", "index out of range '%d'", index);
index = 0;
} else if (type.isArray()) {
if (type.isSizedArray() && index >= type.getOuterArraySize()) {
error(loc, "", "[", "array index out of range '%d'", index);
index = type.getOuterArraySize() - 1;
}
} else if (type.isVector()) {
if (index >= type.getVectorSize()) {
error(loc, "", "[", "vector index out of range '%d'", index);
index = type.getVectorSize() - 1;
}
} else if (type.isMatrix()) {
if (index >= type.getMatrixCols()) {
error(loc, "", "[", "matrix index out of range '%d'", index);
index = type.getMatrixCols() - 1;
}
}
}
// Make a shared symbol have a non-shared version that can be edited by the current
// compile, such that editing its type will not change the shared version and will
// effect all nodes already sharing it (non-shallow type),
// or adopting its full type after being edited (shallow type).
void TParseContextBase::makeEditable(TSymbol*& symbol)
{
// copyUp() does a deep copy of the type.
symbol = symbolTable.copyUp(symbol);
// Save it (deferred, so it can be edited first) in the AST for linker use.
if (symbol)
trackLinkage(*symbol);
}
// Return a writable version of the variable 'name'.
//
// Return nullptr if 'name' is not found. This should mean
// something is seriously wrong (e.g., compiler asking self for
// built-in that doesn't exist).
TVariable* TParseContextBase::getEditableVariable(const char* name)
{
bool builtIn;
TSymbol* symbol = symbolTable.find(name, &builtIn);
assert(symbol != nullptr);
if (symbol == nullptr)
return nullptr;
if (builtIn)
makeEditable(symbol);
return symbol->getAsVariable();
}
// Select the best matching function for 'call' from 'candidateList'.
//
// Assumptions
//
// There is no exact match, so a selection algorithm needs to run. That is, the
// language-specific handler should check for exact match first, to
// decide what to do, before calling this selector.
//
// Input
//
// * list of candidate signatures to select from
// * the call
// * a predicate function convertible(from, to) that says whether or not type
// 'from' can implicitly convert to type 'to' (it includes the case of what
// the calling language would consider a matching type with no conversion
// needed)
// * a predicate function better(from1, from2, to1, to2) that says whether or
// not a conversion from <-> to2 is considered better than a conversion
// from <-> to1 (both in and out directions need testing, as declared by the
// formal parameter)
//
// Output
//
// * best matching candidate (or none, if no viable candidates found)
// * whether there was a tie for the best match (ambiguous overload selection,
// caller's choice for how to report)
//
const TFunction* TParseContextBase::selectFunction(
const TVector<const TFunction*> candidateList,
const TFunction& call,
std::function<bool(const TType& from, const TType& to, TOperator op, int arg)> convertible,
std::function<bool(const TType& from, const TType& to1, const TType& to2)> better,
/* output */ bool& tie)
{
//
// Operation
//
// 1. Prune the input list of candidates down to a list of viable candidates,
// where each viable candidate has
//
// * at least as many parameters as there are calling arguments, with any
// remaining parameters being optional or having default values
// * each parameter is true under convertible(A, B), where A is the calling
// type for in and B is the formal type, and in addition, for out B is the
// calling type and A is the formal type
//
// 2. If there are no viable candidates, return with no match.
//
// 3. If there is only one viable candidate, it is the best match.
//
// 4. If there are multiple viable candidates, select the first viable candidate
// as the incumbent. Compare the incumbent to the next viable candidate, and if
// that candidate is better (bullets below), make it the incumbent. Repeat, with
// a linear walk through the viable candidate list. The final incumbent will be
// returned as the best match. A viable candidate is better than the incumbent if
//
// * it has a function argument with a better(...) conversion than the incumbent,
// for all directions needed by in and out
// * the incumbent has no argument with a better(...) conversion then the
// candidate, for either in or out (as needed)
//
// 5. Check for ambiguity by comparing the best match against all other viable
// candidates. If any other viable candidate has a function argument with a
// better(...) conversion than the best candidate (for either in or out
// directions), return that there was a tie for best.
//
tie = false;
// 1. prune to viable...
TVector<const TFunction*> viableCandidates;
for (auto it = candidateList.begin(); it != candidateList.end(); ++it) {
const TFunction& candidate = *(*it);
// to even be a potential match, number of arguments must be >= the number of
// fixed (non-default) parameters, and <= the total (including parameter with defaults).
if (call.getParamCount() < candidate.getFixedParamCount() ||
call.getParamCount() > candidate.getParamCount())
continue;
// see if arguments are convertible
bool viable = true;
// The call can have fewer parameters than the candidate, if some have defaults.
const int paramCount = std::min(call.getParamCount(), candidate.getParamCount());
for (int param = 0; param < paramCount; ++param) {
if (candidate[param].type->getQualifier().isParamInput()) {
if (! convertible(*call[param].type, *candidate[param].type, candidate.getBuiltInOp(), param)) {
viable = false;
break;
}
}
if (candidate[param].type->getQualifier().isParamOutput()) {
if (! convertible(*candidate[param].type, *call[param].type, candidate.getBuiltInOp(), param)) {
viable = false;
break;
}
}
}
if (viable)
viableCandidates.push_back(&candidate);
}
// 2. none viable...
if (viableCandidates.size() == 0)
return nullptr;
// 3. only one viable...
if (viableCandidates.size() == 1)
return viableCandidates.front();
// 4. find best...
const auto betterParam = [&call, &better](const TFunction& can1, const TFunction& can2) -> bool {
// is call -> can2 better than call -> can1 for any parameter
bool hasBetterParam = false;
for (int param = 0; param < call.getParamCount(); ++param) {
if (better(*call[param].type, *can1[param].type, *can2[param].type)) {
hasBetterParam = true;
break;
}
}
return hasBetterParam;
};
const auto equivalentParams = [&call, &better](const TFunction& can1, const TFunction& can2) -> bool {
// is call -> can2 equivalent to call -> can1 for all the call parameters?
for (int param = 0; param < call.getParamCount(); ++param) {
if (better(*call[param].type, *can1[param].type, *can2[param].type) ||
better(*call[param].type, *can2[param].type, *can1[param].type))
return false;
}
return true;
};
const TFunction* incumbent = viableCandidates.front();
for (auto it = viableCandidates.begin() + 1; it != viableCandidates.end(); ++it) {
const TFunction& candidate = *(*it);
if (betterParam(*incumbent, candidate) && ! betterParam(candidate, *incumbent))
incumbent = &candidate;
}
// 5. ambiguity...
for (auto it = viableCandidates.begin(); it != viableCandidates.end(); ++it) {
if (incumbent == *it)
continue;
const TFunction& candidate = *(*it);
// In the case of default parameters, it may have an identical initial set, which is
// also ambiguous
if (betterParam(*incumbent, candidate) || equivalentParams(*incumbent, candidate))
tie = true;
}
return incumbent;
}
//
// Look at a '.' field selector string and change it into numerical selectors
// for a vector or scalar.
//
// Always return some form of swizzle, so the result is always usable.
//
void TParseContextBase::parseSwizzleSelector(const TSourceLoc& loc, const TString& compString, int vecSize,
TSwizzleSelectors<TVectorSelector>& selector)
{
// Too long?
if (compString.size() > MaxSwizzleSelectors)
error(loc, "vector swizzle too long", compString.c_str(), "");
// Use this to test that all swizzle characters are from the same swizzle-namespace-set
enum {
exyzw,
ergba,
estpq,
} fieldSet[MaxSwizzleSelectors];
// Decode the swizzle string.
int size = std::min(MaxSwizzleSelectors, (int)compString.size());
for (int i = 0; i < size; ++i) {
switch (compString[i]) {
case 'x':
selector.push_back(0);
fieldSet[i] = exyzw;
break;
case 'r':
selector.push_back(0);
fieldSet[i] = ergba;
break;
case 's':
selector.push_back(0);
fieldSet[i] = estpq;
break;
case 'y':
selector.push_back(1);
fieldSet[i] = exyzw;
break;
case 'g':
selector.push_back(1);
fieldSet[i] = ergba;
break;
case 't':
selector.push_back(1);
fieldSet[i] = estpq;
break;
case 'z':
selector.push_back(2);
fieldSet[i] = exyzw;
break;
case 'b':
selector.push_back(2);
fieldSet[i] = ergba;
break;
case 'p':
selector.push_back(2);
fieldSet[i] = estpq;
break;
case 'w':
selector.push_back(3);
fieldSet[i] = exyzw;
break;
case 'a':
selector.push_back(3);
fieldSet[i] = ergba;
break;
case 'q':
selector.push_back(3);
fieldSet[i] = estpq;
break;
default:
error(loc, "unknown swizzle selection", compString.c_str(), "");
break;
}
}
// Additional error checking.
for (int i = 0; i < selector.size(); ++i) {
if (selector[i] >= vecSize) {
error(loc, "vector swizzle selection out of range", compString.c_str(), "");
selector.resize(i);
break;
}
if (i > 0 && fieldSet[i] != fieldSet[i-1]) {
error(loc, "vector swizzle selectors not from the same set", compString.c_str(), "");
selector.resize(i);
break;
}
}
// Ensure it is valid.
if (selector.size() == 0)
selector.push_back(0);
}
//
// Make the passed-in variable information become a member of the
// global uniform block. If this doesn't exist yet, make it.
//
void TParseContextBase::growGlobalUniformBlock(const TSourceLoc& loc, TType& memberType, const TString& memberName, TTypeList* typeList)
{
// Make the global block, if not yet made.
if (globalUniformBlock == nullptr) {
TQualifier blockQualifier;
blockQualifier.clear();
blockQualifier.storage = EvqUniform;
TType blockType(new TTypeList, *NewPoolTString(getGlobalUniformBlockName()), blockQualifier);
setUniformBlockDefaults(blockType);
globalUniformBlock = new TVariable(NewPoolTString(""), blockType, true);
firstNewMember = 0;
}
// Update with binding and set
globalUniformBlock->getWritableType().getQualifier().layoutBinding = globalUniformBinding;
globalUniformBlock->getWritableType().getQualifier().layoutSet = globalUniformSet;
// Add the requested member as a member to the global block.
TType* type = new TType;
type->shallowCopy(memberType);
type->setFieldName(memberName);
if (typeList)
type->setStruct(typeList);
TTypeLoc typeLoc = {type, loc};
globalUniformBlock->getType().getWritableStruct()->push_back(typeLoc);
// Insert into the symbol table.
if (firstNewMember == 0) {
// This is the first request; we need a normal symbol table insert
if (symbolTable.insert(*globalUniformBlock))
trackLinkage(*globalUniformBlock);
else
error(loc, "failed to insert the global constant buffer", "uniform", "");
} else {
// This is a follow-on request; we need to amend the first insert
symbolTable.amend(*globalUniformBlock, firstNewMember);
}
++firstNewMember;
}
void TParseContextBase::finish()
{
if (parsingBuiltins)
return;
// Transfer the linkage symbols to AST nodes, preserving order.
TIntermAggregate* linkage = new TIntermAggregate;
for (auto i = linkageSymbols.begin(); i != linkageSymbols.end(); ++i)
intermediate.addSymbolLinkageNode(linkage, **i);
intermediate.addSymbolLinkageNodes(linkage, getLanguage(), symbolTable);
}
} // end namespace glslang

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,504 @@
//
// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
// Copyright (C) 2012-2013 LunarG, Inc.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
//
// This header defines a two-level parse-helper hierarchy, derived from
// TParseVersions:
// - TParseContextBase: sharable across multiple parsers
// - TParseContext: GLSL specific helper
//
#ifndef _PARSER_HELPER_INCLUDED_
#define _PARSER_HELPER_INCLUDED_
#include <cstdarg>
#include <functional>
#include "parseVersions.h"
#include "../Include/ShHandle.h"
#include "SymbolTable.h"
#include "localintermediate.h"
#include "Scan.h"
#include "attribute.h"
namespace glslang {
struct TPragma {
TPragma(bool o, bool d) : optimize(o), debug(d) { }
bool optimize;
bool debug;
TPragmaTable pragmaTable;
};
class TScanContext;
class TPpContext;
typedef std::set<int> TIdSetType;
//
// Sharable code (as well as what's in TParseVersions) across
// parse helpers.
//
class TParseContextBase : public TParseVersions {
public:
TParseContextBase(TSymbolTable& symbolTable, TIntermediate& interm, bool parsingBuiltins, int version,
EProfile profile, const SpvVersion& spvVersion, EShLanguage language,
TInfoSink& infoSink, bool forwardCompatible, EShMessages messages,
const TString* entryPoint = nullptr)
: TParseVersions(interm, version, profile, spvVersion, language, infoSink, forwardCompatible, messages),
scopeMangler("::"),
symbolTable(symbolTable),
statementNestingLevel(0), loopNestingLevel(0), structNestingLevel(0), controlFlowNestingLevel(0),
postEntryPointReturn(false),
contextPragma(true, false),
parsingBuiltins(parsingBuiltins), scanContext(nullptr), ppContext(nullptr),
limits(resources.limits),
globalUniformBlock(nullptr),
globalUniformBinding(TQualifier::layoutBindingEnd),
globalUniformSet(TQualifier::layoutSetEnd)
{
if (entryPoint != nullptr)
sourceEntryPointName = *entryPoint;
}
virtual ~TParseContextBase() { }
virtual void C_DECL error(const TSourceLoc&, const char* szReason, const char* szToken,
const char* szExtraInfoFormat, ...);
virtual void C_DECL warn(const TSourceLoc&, const char* szReason, const char* szToken,
const char* szExtraInfoFormat, ...);
virtual void C_DECL ppError(const TSourceLoc&, const char* szReason, const char* szToken,
const char* szExtraInfoFormat, ...);
virtual void C_DECL ppWarn(const TSourceLoc&, const char* szReason, const char* szToken,
const char* szExtraInfoFormat, ...);
virtual void setLimits(const TBuiltInResource&) = 0;
void checkIndex(const TSourceLoc&, const TType&, int& index);
EShLanguage getLanguage() const { return language; }
void setScanContext(TScanContext* c) { scanContext = c; }
TScanContext* getScanContext() const { return scanContext; }
void setPpContext(TPpContext* c) { ppContext = c; }
TPpContext* getPpContext() const { return ppContext; }
virtual void setLineCallback(const std::function<void(int, int, bool, int, const char*)>& func) { lineCallback = func; }
virtual void setExtensionCallback(const std::function<void(int, const char*, const char*)>& func) { extensionCallback = func; }
virtual void setVersionCallback(const std::function<void(int, int, const char*)>& func) { versionCallback = func; }
virtual void setPragmaCallback(const std::function<void(int, const TVector<TString>&)>& func) { pragmaCallback = func; }
virtual void setErrorCallback(const std::function<void(int, const char*)>& func) { errorCallback = func; }
virtual void reservedPpErrorCheck(const TSourceLoc&, const char* name, const char* op) = 0;
virtual bool lineContinuationCheck(const TSourceLoc&, bool endOfComment) = 0;
virtual bool lineDirectiveShouldSetNextLine() const = 0;
virtual void handlePragma(const TSourceLoc&, const TVector<TString>&) = 0;
virtual bool parseShaderStrings(TPpContext&, TInputScanner& input, bool versionWillBeError = false) = 0;
virtual void notifyVersion(int line, int version, const char* type_string)
{
if (versionCallback)
versionCallback(line, version, type_string);
}
virtual void notifyErrorDirective(int line, const char* error_message)
{
if (errorCallback)
errorCallback(line, error_message);
}
virtual void notifyLineDirective(int curLineNo, int newLineNo, bool hasSource, int sourceNum, const char* sourceName)
{
if (lineCallback)
lineCallback(curLineNo, newLineNo, hasSource, sourceNum, sourceName);
}
virtual void notifyExtensionDirective(int line, const char* extension, const char* behavior)
{
if (extensionCallback)
extensionCallback(line, extension, behavior);
}
// Manage the global uniform block (default uniforms in GLSL, $Global in HLSL)
virtual void growGlobalUniformBlock(const TSourceLoc&, TType&, const TString& memberName, TTypeList* typeList = nullptr);
// Potentially rename shader entry point function
void renameShaderFunction(TString*& name) const
{
// Replace the entry point name given in the shader with the real entry point name,
// if there is a substitution.
if (name != nullptr && *name == sourceEntryPointName && intermediate.getEntryPointName().size() > 0)
name = NewPoolTString(intermediate.getEntryPointName().c_str());
}
virtual bool lValueErrorCheck(const TSourceLoc&, const char* op, TIntermTyped*);
virtual void rValueErrorCheck(const TSourceLoc&, const char* op, TIntermTyped*);
const char* const scopeMangler;
// Basic parsing state, easily accessible to the grammar
TSymbolTable& symbolTable; // symbol table that goes with the current language, version, and profile
int statementNestingLevel; // 0 if outside all flow control or compound statements
int loopNestingLevel; // 0 if outside all loops
int structNestingLevel; // 0 if outside blocks and structures
int controlFlowNestingLevel; // 0 if outside all flow control
const TType* currentFunctionType; // the return type of the function that's currently being parsed
bool functionReturnsValue; // true if a non-void function has a return
// if inside a function, true if the function is the entry point and this is after a return statement
bool postEntryPointReturn;
// case, node, case, case, node, ...; ensure only one node between cases; stack of them for nesting
TList<TIntermSequence*> switchSequenceStack;
// the statementNestingLevel the current switch statement is at, which must match the level of its case statements
TList<int> switchLevel;
struct TPragma contextPragma;
protected:
TParseContextBase(TParseContextBase&);
TParseContextBase& operator=(TParseContextBase&);
const bool parsingBuiltins; // true if parsing built-in symbols/functions
TVector<TSymbol*> linkageSymbols; // will be transferred to 'linkage', after all editing is done, order preserving
TScanContext* scanContext;
TPpContext* ppContext;
TBuiltInResource resources;
TLimits& limits;
TString sourceEntryPointName;
// These, if set, will be called when a line, pragma ... is preprocessed.
// They will be called with any parameters to the original directive.
std::function<void(int, int, bool, int, const char*)> lineCallback;
std::function<void(int, const TVector<TString>&)> pragmaCallback;
std::function<void(int, int, const char*)> versionCallback;
std::function<void(int, const char*, const char*)> extensionCallback;
std::function<void(int, const char*)> errorCallback;
// see implementation for detail
const TFunction* selectFunction(const TVector<const TFunction*>, const TFunction&,
std::function<bool(const TType&, const TType&, TOperator, int arg)>,
std::function<bool(const TType&, const TType&, const TType&)>,
/* output */ bool& tie);
virtual void parseSwizzleSelector(const TSourceLoc&, const TString&, int size,
TSwizzleSelectors<TVectorSelector>&);
// Manage the global uniform block (default uniforms in GLSL, $Global in HLSL)
TVariable* globalUniformBlock; // the actual block, inserted into the symbol table
unsigned int globalUniformBinding; // the block's binding number
unsigned int globalUniformSet; // the block's set number
int firstNewMember; // the index of the first member not yet inserted into the symbol table
// override this to set the language-specific name
virtual const char* getGlobalUniformBlockName() const { return ""; }
virtual void setUniformBlockDefaults(TType&) const { }
virtual void finalizeGlobalUniformBlockLayout(TVariable&) { }
virtual void outputMessage(const TSourceLoc&, const char* szReason, const char* szToken,
const char* szExtraInfoFormat, TPrefixType prefix,
va_list args);
virtual void trackLinkage(TSymbol& symbol);
virtual void makeEditable(TSymbol*&);
virtual TVariable* getEditableVariable(const char* name);
virtual void finish();
};
//
// Manage the state for when to respect precision qualifiers and when to warn about
// the defaults being different than might be expected.
//
class TPrecisionManager {
public:
TPrecisionManager() : obey(false), warn(false), explicitIntDefault(false), explicitFloatDefault(false){ }
virtual ~TPrecisionManager() {}
void respectPrecisionQualifiers() { obey = true; }
bool respectingPrecisionQualifiers() const { return obey; }
bool shouldWarnAboutDefaults() const { return warn; }
void defaultWarningGiven() { warn = false; }
void warnAboutDefaults() { warn = true; }
void explicitIntDefaultSeen()
{
explicitIntDefault = true;
if (explicitFloatDefault)
warn = false;
}
void explicitFloatDefaultSeen()
{
explicitFloatDefault = true;
if (explicitIntDefault)
warn = false;
}
protected:
bool obey; // respect precision qualifiers
bool warn; // need to give a warning about the defaults
bool explicitIntDefault; // user set the default for int/uint
bool explicitFloatDefault; // user set the default for float
};
//
// GLSL-specific parse helper. Should have GLSL in the name, but that's
// too big of a change for comparing branches at the moment, and perhaps
// impacts downstream consumers as well.
//
class TParseContext : public TParseContextBase {
public:
TParseContext(TSymbolTable&, TIntermediate&, bool parsingBuiltins, int version, EProfile, const SpvVersion& spvVersion, EShLanguage, TInfoSink&,
bool forwardCompatible = false, EShMessages messages = EShMsgDefault,
const TString* entryPoint = nullptr);
virtual ~TParseContext();
bool obeyPrecisionQualifiers() const { return precisionManager.respectingPrecisionQualifiers(); };
void setPrecisionDefaults();
void setLimits(const TBuiltInResource&) override;
bool parseShaderStrings(TPpContext&, TInputScanner& input, bool versionWillBeError = false) override;
void parserError(const char* s); // for bison's yyerror
void reservedErrorCheck(const TSourceLoc&, const TString&);
void reservedPpErrorCheck(const TSourceLoc&, const char* name, const char* op) override;
bool lineContinuationCheck(const TSourceLoc&, bool endOfComment) override;
bool lineDirectiveShouldSetNextLine() const override;
bool builtInName(const TString&);
void handlePragma(const TSourceLoc&, const TVector<TString>&) override;
TIntermTyped* handleVariable(const TSourceLoc&, TSymbol* symbol, const TString* string);
TIntermTyped* handleBracketDereference(const TSourceLoc&, TIntermTyped* base, TIntermTyped* index);
void handleIndexLimits(const TSourceLoc&, TIntermTyped* base, TIntermTyped* index);
void makeEditable(TSymbol*&) override;
bool isIoResizeArray(const TType&) const;
void fixIoArraySize(const TSourceLoc&, TType&);
void ioArrayCheck(const TSourceLoc&, const TType&, const TString& identifier);
void handleIoResizeArrayAccess(const TSourceLoc&, TIntermTyped* base);
void checkIoArraysConsistency(const TSourceLoc&, bool tailOnly = false);
int getIoArrayImplicitSize() const;
void checkIoArrayConsistency(const TSourceLoc&, int requiredSize, const char* feature, TType&, const TString&);
TIntermTyped* handleBinaryMath(const TSourceLoc&, const char* str, TOperator op, TIntermTyped* left, TIntermTyped* right);
TIntermTyped* handleUnaryMath(const TSourceLoc&, const char* str, TOperator op, TIntermTyped* childNode);
TIntermTyped* handleDotDereference(const TSourceLoc&, TIntermTyped* base, const TString& field);
void blockMemberExtensionCheck(const TSourceLoc&, const TIntermTyped* base, const TString& field);
TFunction* handleFunctionDeclarator(const TSourceLoc&, TFunction& function, bool prototype);
TIntermAggregate* handleFunctionDefinition(const TSourceLoc&, TFunction&);
TIntermTyped* handleFunctionCall(const TSourceLoc&, TFunction*, TIntermNode*);
TIntermTyped* handleBuiltInFunctionCall(TSourceLoc, TIntermNode* arguments, const TFunction& function);
void computeBuiltinPrecisions(TIntermTyped&, const TFunction&);
TIntermNode* handleReturnValue(const TSourceLoc&, TIntermTyped*);
void checkLocation(const TSourceLoc&, TOperator);
TIntermTyped* handleLengthMethod(const TSourceLoc&, TFunction*, TIntermNode*);
void addInputArgumentConversions(const TFunction&, TIntermNode*&) const;
TIntermTyped* addOutputArgumentConversions(const TFunction&, TIntermAggregate&) const;
void builtInOpCheck(const TSourceLoc&, const TFunction&, TIntermOperator&);
void nonOpBuiltInCheck(const TSourceLoc&, const TFunction&, TIntermAggregate&);
void userFunctionCallCheck(const TSourceLoc&, TIntermAggregate&);
void samplerConstructorLocationCheck(const TSourceLoc&, const char* token, TIntermNode*);
TFunction* handleConstructorCall(const TSourceLoc&, const TPublicType&);
void handlePrecisionQualifier(const TSourceLoc&, TQualifier&, TPrecisionQualifier);
void checkPrecisionQualifier(const TSourceLoc&, TPrecisionQualifier);
void assignError(const TSourceLoc&, const char* op, TString left, TString right);
void unaryOpError(const TSourceLoc&, const char* op, TString operand);
void binaryOpError(const TSourceLoc&, const char* op, TString left, TString right);
void variableCheck(TIntermTyped*& nodePtr);
bool lValueErrorCheck(const TSourceLoc&, const char* op, TIntermTyped*) override;
void rValueErrorCheck(const TSourceLoc&, const char* op, TIntermTyped*) override;
void constantValueCheck(TIntermTyped* node, const char* token);
void integerCheck(const TIntermTyped* node, const char* token);
void globalCheck(const TSourceLoc&, const char* token);
bool constructorError(const TSourceLoc&, TIntermNode*, TFunction&, TOperator, TType&);
bool constructorTextureSamplerError(const TSourceLoc&, const TFunction&);
void arraySizeCheck(const TSourceLoc&, TIntermTyped* expr, TArraySize&);
bool arrayQualifierError(const TSourceLoc&, const TQualifier&);
bool arrayError(const TSourceLoc&, const TType&);
void arraySizeRequiredCheck(const TSourceLoc&, const TArraySizes&);
void structArrayCheck(const TSourceLoc&, const TType& structure);
void arraySizesCheck(const TSourceLoc&, const TQualifier&, TArraySizes*, const TIntermTyped* initializer, bool lastMember);
void arrayOfArrayVersionCheck(const TSourceLoc&, const TArraySizes*);
bool voidErrorCheck(const TSourceLoc&, const TString&, TBasicType);
void boolCheck(const TSourceLoc&, const TIntermTyped*);
void boolCheck(const TSourceLoc&, const TPublicType&);
void samplerCheck(const TSourceLoc&, const TType&, const TString& identifier, TIntermTyped* initializer);
void atomicUintCheck(const TSourceLoc&, const TType&, const TString& identifier);
void transparentOpaqueCheck(const TSourceLoc&, const TType&, const TString& identifier);
void memberQualifierCheck(glslang::TPublicType&);
void globalQualifierFixCheck(const TSourceLoc&, TQualifier&);
void globalQualifierTypeCheck(const TSourceLoc&, const TQualifier&, const TPublicType&);
bool structQualifierErrorCheck(const TSourceLoc&, const TPublicType& pType);
void mergeQualifiers(const TSourceLoc&, TQualifier& dst, const TQualifier& src, bool force);
void setDefaultPrecision(const TSourceLoc&, TPublicType&, TPrecisionQualifier);
int computeSamplerTypeIndex(TSampler&);
TPrecisionQualifier getDefaultPrecision(TPublicType&);
void precisionQualifierCheck(const TSourceLoc&, TBasicType, TQualifier&);
void parameterTypeCheck(const TSourceLoc&, TStorageQualifier qualifier, const TType& type);
bool containsFieldWithBasicType(const TType& type ,TBasicType basicType);
TSymbol* redeclareBuiltinVariable(const TSourceLoc&, const TString&, const TQualifier&, const TShaderQualifiers&);
void redeclareBuiltinBlock(const TSourceLoc&, TTypeList& typeList, const TString& blockName, const TString* instanceName, TArraySizes* arraySizes);
void paramCheckFixStorage(const TSourceLoc&, const TStorageQualifier&, TType& type);
void paramCheckFix(const TSourceLoc&, const TQualifier&, TType& type);
void nestedBlockCheck(const TSourceLoc&);
void nestedStructCheck(const TSourceLoc&);
void arrayObjectCheck(const TSourceLoc&, const TType&, const char* op);
void opaqueCheck(const TSourceLoc&, const TType&, const char* op);
void storage16BitAssignmentCheck(const TSourceLoc&, const TType&, const char* op);
void specializationCheck(const TSourceLoc&, const TType&, const char* op);
void structTypeCheck(const TSourceLoc&, TPublicType&);
void inductiveLoopCheck(const TSourceLoc&, TIntermNode* init, TIntermLoop* loop);
void arrayLimitCheck(const TSourceLoc&, const TString&, int size);
void limitCheck(const TSourceLoc&, int value, const char* limit, const char* feature);
void inductiveLoopBodyCheck(TIntermNode*, int loopIndexId, TSymbolTable&);
void constantIndexExpressionCheck(TIntermNode*);
void setLayoutQualifier(const TSourceLoc&, TPublicType&, TString&);
void setLayoutQualifier(const TSourceLoc&, TPublicType&, TString&, const TIntermTyped*);
void mergeObjectLayoutQualifiers(TQualifier& dest, const TQualifier& src, bool inheritOnly);
void layoutObjectCheck(const TSourceLoc&, const TSymbol&);
void layoutMemberLocationArrayCheck(const TSourceLoc&, bool memberWithLocation, TArraySizes* arraySizes);
void layoutTypeCheck(const TSourceLoc&, const TType&);
void layoutQualifierCheck(const TSourceLoc&, const TQualifier&);
void checkNoShaderLayouts(const TSourceLoc&, const TShaderQualifiers&);
void fixOffset(const TSourceLoc&, TSymbol&);
const TFunction* findFunction(const TSourceLoc& loc, const TFunction& call, bool& builtIn);
const TFunction* findFunctionExact(const TSourceLoc& loc, const TFunction& call, bool& builtIn);
const TFunction* findFunction120(const TSourceLoc& loc, const TFunction& call, bool& builtIn);
const TFunction* findFunction400(const TSourceLoc& loc, const TFunction& call, bool& builtIn);
const TFunction* findFunctionExplicitTypes(const TSourceLoc& loc, const TFunction& call, bool& builtIn);
void declareTypeDefaults(const TSourceLoc&, const TPublicType&);
TIntermNode* declareVariable(const TSourceLoc&, TString& identifier, const TPublicType&, TArraySizes* typeArray = 0, TIntermTyped* initializer = 0);
TIntermTyped* addConstructor(const TSourceLoc&, TIntermNode*, const TType&);
TIntermTyped* constructAggregate(TIntermNode*, const TType&, int, const TSourceLoc&);
TIntermTyped* constructBuiltIn(const TType&, TOperator, TIntermTyped*, const TSourceLoc&, bool subset);
void declareBlock(const TSourceLoc&, TTypeList& typeList, const TString* instanceName = 0, TArraySizes* arraySizes = 0);
void blockStageIoCheck(const TSourceLoc&, const TQualifier&);
void blockQualifierCheck(const TSourceLoc&, const TQualifier&, bool instanceName);
void fixBlockLocations(const TSourceLoc&, TQualifier&, TTypeList&, bool memberWithLocation, bool memberWithoutLocation);
void fixBlockXfbOffsets(TQualifier&, TTypeList&);
void fixBlockUniformOffsets(TQualifier&, TTypeList&);
void addQualifierToExisting(const TSourceLoc&, TQualifier, const TString& identifier);
void addQualifierToExisting(const TSourceLoc&, TQualifier, TIdentifierList&);
void invariantCheck(const TSourceLoc&, const TQualifier&);
void updateStandaloneQualifierDefaults(const TSourceLoc&, const TPublicType&);
void wrapupSwitchSubsequence(TIntermAggregate* statements, TIntermNode* branchNode);
TIntermNode* addSwitch(const TSourceLoc&, TIntermTyped* expression, TIntermAggregate* body);
TAttributeType attributeFromName(const TString& name) const;
TAttributes* makeAttributes(const TString& identifier) const;
TAttributes* makeAttributes(const TString& identifier, TIntermNode* node) const;
TAttributes* mergeAttributes(TAttributes*, TAttributes*) const;
// Determine selection control from attributes
void handleSelectionAttributes(const TAttributes& attributes, TIntermNode*);
void handleSwitchAttributes(const TAttributes& attributes, TIntermNode*);
// Determine loop control from attributes
void handleLoopAttributes(const TAttributes& attributes, TIntermNode*);
protected:
void nonInitConstCheck(const TSourceLoc&, TString& identifier, TType& type);
void inheritGlobalDefaults(TQualifier& dst) const;
TVariable* makeInternalVariable(const char* name, const TType&) const;
TVariable* declareNonArray(const TSourceLoc&, const TString& identifier, const TType&);
void declareArray(const TSourceLoc&, const TString& identifier, const TType&, TSymbol*&);
void checkRuntimeSizable(const TSourceLoc&, const TIntermTyped&);
bool isRuntimeLength(const TIntermTyped&) const;
TIntermNode* executeInitializer(const TSourceLoc&, TIntermTyped* initializer, TVariable* variable);
TIntermTyped* convertInitializerList(const TSourceLoc&, const TType&, TIntermTyped* initializer);
void finish() override;
public:
//
// Generally, bison productions, the scanner, and the PP need read/write access to these; just give them direct access
//
// Current state of parsing
bool inMain; // if inside a function, true if the function is main
const TString* blockName;
TQualifier currentBlockQualifier;
TPrecisionQualifier defaultPrecision[EbtNumTypes];
TBuiltInResource resources;
TLimits& limits;
protected:
TParseContext(TParseContext&);
TParseContext& operator=(TParseContext&);
static const int maxSamplerIndex = EsdNumDims * (EbtNumTypes * (2 * 2 * 2 * 2 * 2)); // see computeSamplerTypeIndex()
TPrecisionQualifier defaultSamplerPrecision[maxSamplerIndex];
TPrecisionManager precisionManager;
TQualifier globalBufferDefaults;
TQualifier globalUniformDefaults;
TQualifier globalInputDefaults;
TQualifier globalOutputDefaults;
int* atomicUintOffsets; // to become an array of the right size to hold an offset per binding point
TString currentCaller; // name of last function body entered (not valid when at global scope)
TIdSetType inductiveLoopIds;
bool anyIndexLimits;
TVector<TIntermTyped*> needsIndexLimitationChecking;
//
// Geometry shader input arrays:
// - array sizing is based on input primitive and/or explicit size
//
// Tessellation control output arrays:
// - array sizing is based on output layout(vertices=...) and/or explicit size
//
// Both:
// - array sizing is retroactive
// - built-in block redeclarations interact with this
//
// Design:
// - use a per-context "resize-list", a list of symbols whose array sizes
// can be fixed
//
// - the resize-list starts empty at beginning of user-shader compilation, it does
// not have built-ins in it
//
// - on built-in array use: copyUp() symbol and add it to the resize-list
//
// - on user array declaration: add it to the resize-list
//
// - on block redeclaration: copyUp() symbol and add it to the resize-list
// * note, that appropriately gives an error if redeclaring a block that
// was already used and hence already copied-up
//
// - on seeing a layout declaration that sizes the array, fix everything in the
// resize-list, giving errors for mismatch
//
// - on seeing an array size declaration, give errors on mismatch between it and previous
// array-sizing declarations
//
TVector<TSymbol*> ioArraySymbolResizeList;
};
} // end namespace glslang
#endif // _PARSER_HELPER_INCLUDED_

View File

@ -0,0 +1,315 @@
//
// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
#include "../Include/Common.h"
#include "../Include/PoolAlloc.h"
#include "../Include/InitializeGlobals.h"
#include "../OSDependent/osinclude.h"
namespace glslang {
// Process-wide TLS index
OS_TLSIndex PoolIndex;
// Return the thread-specific current pool.
TPoolAllocator& GetThreadPoolAllocator()
{
return *static_cast<TPoolAllocator*>(OS_GetTLSValue(PoolIndex));
}
// Set the thread-specific current pool.
void SetThreadPoolAllocator(TPoolAllocator* poolAllocator)
{
OS_SetTLSValue(PoolIndex, poolAllocator);
}
// Process-wide set up of the TLS pool storage.
bool InitializePoolIndex()
{
// Allocate a TLS index.
if ((PoolIndex = OS_AllocTLSIndex()) == OS_INVALID_TLS_INDEX)
return false;
return true;
}
//
// Implement the functionality of the TPoolAllocator class, which
// is documented in PoolAlloc.h.
//
TPoolAllocator::TPoolAllocator(int growthIncrement, int allocationAlignment) :
pageSize(growthIncrement),
alignment(allocationAlignment),
freeList(nullptr),
inUseList(nullptr),
numCalls(0)
{
//
// Don't allow page sizes we know are smaller than all common
// OS page sizes.
//
if (pageSize < 4*1024)
pageSize = 4*1024;
//
// A large currentPageOffset indicates a new page needs to
// be obtained to allocate memory.
//
currentPageOffset = pageSize;
//
// Adjust alignment to be at least pointer aligned and
// power of 2.
//
size_t minAlign = sizeof(void*);
alignment &= ~(minAlign - 1);
if (alignment < minAlign)
alignment = minAlign;
size_t a = 1;
while (a < alignment)
a <<= 1;
alignment = a;
alignmentMask = a - 1;
//
// Align header skip
//
headerSkip = minAlign;
if (headerSkip < sizeof(tHeader)) {
headerSkip = (sizeof(tHeader) + alignmentMask) & ~alignmentMask;
}
push();
}
TPoolAllocator::~TPoolAllocator()
{
while (inUseList) {
tHeader* next = inUseList->nextPage;
inUseList->~tHeader();
delete [] reinterpret_cast<char*>(inUseList);
inUseList = next;
}
//
// Always delete the free list memory - it can't be being
// (correctly) referenced, whether the pool allocator was
// global or not. We should not check the guard blocks
// here, because we did it already when the block was
// placed into the free list.
//
while (freeList) {
tHeader* next = freeList->nextPage;
delete [] reinterpret_cast<char*>(freeList);
freeList = next;
}
}
const unsigned char TAllocation::guardBlockBeginVal = 0xfb;
const unsigned char TAllocation::guardBlockEndVal = 0xfe;
const unsigned char TAllocation::userDataFill = 0xcd;
# ifdef GUARD_BLOCKS
const size_t TAllocation::guardBlockSize = 16;
# else
const size_t TAllocation::guardBlockSize = 0;
# endif
//
// Check a single guard block for damage
//
#ifdef GUARD_BLOCKS
void TAllocation::checkGuardBlock(unsigned char* blockMem, unsigned char val, const char* locText) const
#else
void TAllocation::checkGuardBlock(unsigned char*, unsigned char, const char*) const
#endif
{
#ifdef GUARD_BLOCKS
for (size_t x = 0; x < guardBlockSize; x++) {
if (blockMem[x] != val) {
const int maxSize = 80;
char assertMsg[maxSize];
// We don't print the assert message. It's here just to be helpful.
snprintf(assertMsg, maxSize, "PoolAlloc: Damage %s %zu byte allocation at 0x%p\n",
locText, size, data());
assert(0 && "PoolAlloc: Damage in guard block");
}
}
#else
assert(guardBlockSize == 0);
#endif
}
void TPoolAllocator::push()
{
tAllocState state = { currentPageOffset, inUseList };
stack.push_back(state);
//
// Indicate there is no current page to allocate from.
//
currentPageOffset = pageSize;
}
//
// Do a mass-deallocation of all the individual allocations
// that have occurred since the last push(), or since the
// last pop(), or since the object's creation.
//
// The deallocated pages are saved for future allocations.
//
void TPoolAllocator::pop()
{
if (stack.size() < 1)
return;
tHeader* page = stack.back().page;
currentPageOffset = stack.back().offset;
while (inUseList != page) {
tHeader* nextInUse = inUseList->nextPage;
size_t pageCount = inUseList->pageCount;
// This technically ends the lifetime of the header as C++ object,
// but we will still control the memory and reuse it.
inUseList->~tHeader(); // currently, just a debug allocation checker
if (pageCount > 1) {
delete [] reinterpret_cast<char*>(inUseList);
} else {
inUseList->nextPage = freeList;
freeList = inUseList;
}
inUseList = nextInUse;
}
stack.pop_back();
}
//
// Do a mass-deallocation of all the individual allocations
// that have occurred.
//
void TPoolAllocator::popAll()
{
while (stack.size() > 0)
pop();
}
void* TPoolAllocator::allocate(size_t numBytes)
{
// If we are using guard blocks, all allocations are bracketed by
// them: [guardblock][allocation][guardblock]. numBytes is how
// much memory the caller asked for. allocationSize is the total
// size including guard blocks. In release build,
// guardBlockSize=0 and this all gets optimized away.
size_t allocationSize = TAllocation::allocationSize(numBytes);
//
// Just keep some interesting statistics.
//
++numCalls;
totalBytes += numBytes;
//
// Do the allocation, most likely case first, for efficiency.
// This step could be moved to be inline sometime.
//
if (currentPageOffset + allocationSize <= pageSize) {
//
// Safe to allocate from currentPageOffset.
//
unsigned char* memory = reinterpret_cast<unsigned char*>(inUseList) + currentPageOffset;
currentPageOffset += allocationSize;
currentPageOffset = (currentPageOffset + alignmentMask) & ~alignmentMask;
return initializeAllocation(inUseList, memory, numBytes);
}
if (allocationSize + headerSkip > pageSize) {
//
// Do a multi-page allocation. Don't mix these with the others.
// The OS is efficient and allocating and free-ing multiple pages.
//
size_t numBytesToAlloc = allocationSize + headerSkip;
tHeader* memory = reinterpret_cast<tHeader*>(::new char[numBytesToAlloc]);
if (memory == 0)
return 0;
// Use placement-new to initialize header
new(memory) tHeader(inUseList, (numBytesToAlloc + pageSize - 1) / pageSize);
inUseList = memory;
currentPageOffset = pageSize; // make next allocation come from a new page
// No guard blocks for multi-page allocations (yet)
return reinterpret_cast<void*>(reinterpret_cast<UINT_PTR>(memory) + headerSkip);
}
//
// Need a simple page to allocate from.
//
tHeader* memory;
if (freeList) {
memory = freeList;
freeList = freeList->nextPage;
} else {
memory = reinterpret_cast<tHeader*>(::new char[pageSize]);
if (memory == 0)
return 0;
}
// Use placement-new to initialize header
new(memory) tHeader(inUseList, 1);
inUseList = memory;
unsigned char* ret = reinterpret_cast<unsigned char*>(inUseList) + headerSkip;
currentPageOffset = (headerSkip + allocationSize + alignmentMask) & ~alignmentMask;
return initializeAllocation(inUseList, ret, numBytes);
}
//
// Check all allocations in a list for damage by calling check on each.
//
void TAllocation::checkAllocList() const
{
for (const TAllocation* alloc = this; alloc != 0; alloc = alloc->prevAlloc)
alloc->check();
}
} // end namespace glslang

View File

@ -0,0 +1,118 @@
//
// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
// Copyright (C) 2013 LunarG, Inc.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
#include "../Include/intermediate.h"
#include "RemoveTree.h"
namespace glslang {
//
// Code to recursively delete the intermediate tree.
//
struct TRemoveTraverser : TIntermTraverser {
TRemoveTraverser() : TIntermTraverser(false, false, true, false) {}
virtual void visitSymbol(TIntermSymbol* node)
{
delete node;
}
virtual bool visitBinary(TVisit /* visit*/ , TIntermBinary* node)
{
delete node;
return true;
}
virtual bool visitUnary(TVisit /* visit */, TIntermUnary* node)
{
delete node;
return true;
}
virtual bool visitAggregate(TVisit /* visit*/ , TIntermAggregate* node)
{
delete node;
return true;
}
virtual bool visitSelection(TVisit /* visit*/ , TIntermSelection* node)
{
delete node;
return true;
}
virtual bool visitSwitch(TVisit /* visit*/ , TIntermSwitch* node)
{
delete node;
return true;
}
virtual void visitConstantUnion(TIntermConstantUnion* node)
{
delete node;
}
virtual bool visitLoop(TVisit /* visit*/ , TIntermLoop* node)
{
delete node;
return true;
}
virtual bool visitBranch(TVisit /* visit*/ , TIntermBranch* node)
{
delete node;
return true;
}
};
//
// Entry point.
//
void RemoveAllTreeNodes(TIntermNode* root)
{
TRemoveTraverser it;
root->traverse(&it);
}
} // end namespace glslang

View File

@ -0,0 +1,41 @@
//
// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
#pragma once
namespace glslang {
void RemoveAllTreeNodes(TIntermNode*);
} // end namespace glslang

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,274 @@
//
// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
// Copyright (C) 2013 LunarG, Inc.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
#ifndef _GLSLANG_SCAN_INCLUDED_
#define _GLSLANG_SCAN_INCLUDED_
#include "Versions.h"
namespace glslang {
// Use a global end-of-input character, so no translation is needed across
// layers of encapsulation. Characters are all 8 bit, and positive, so there is
// no aliasing of character 255 onto -1, for example.
const int EndOfInput = -1;
//
// A character scanner that seamlessly, on read-only strings, reads across an
// array of strings without assuming null termination.
//
class TInputScanner {
public:
TInputScanner(int n, const char* const s[], size_t L[], const char* const* names = nullptr,
int b = 0, int f = 0, bool single = false) :
numSources(n),
// up to this point, common usage is "char*", but now we need positive 8-bit characters
sources(reinterpret_cast<const unsigned char* const *>(s)),
lengths(L), currentSource(0), currentChar(0), stringBias(b), finale(f), singleLogical(single),
endOfFileReached(false)
{
loc = new TSourceLoc[numSources];
for (int i = 0; i < numSources; ++i) {
loc[i].init(i - stringBias);
}
if (names != nullptr) {
for (int i = 0; i < numSources; ++i)
loc[i].name = names[i];
}
loc[currentSource].line = 1;
logicalSourceLoc.init(1);
logicalSourceLoc.name = loc[0].name;
}
virtual ~TInputScanner()
{
delete [] loc;
}
// retrieve the next character and advance one character
int get()
{
int ret = peek();
if (ret == EndOfInput)
return ret;
++loc[currentSource].column;
++logicalSourceLoc.column;
if (ret == '\n') {
++loc[currentSource].line;
++logicalSourceLoc.line;
logicalSourceLoc.column = 0;
loc[currentSource].column = 0;
}
advance();
return ret;
}
// retrieve the next character, no advance
int peek()
{
if (currentSource >= numSources) {
endOfFileReached = true;
return EndOfInput;
}
// Make sure we do not read off the end of a string.
// N.B. Sources can have a length of 0.
int sourceToRead = currentSource;
size_t charToRead = currentChar;
while(charToRead >= lengths[sourceToRead]) {
charToRead = 0;
sourceToRead += 1;
if (sourceToRead >= numSources) {
return EndOfInput;
}
}
// Here, we care about making negative valued characters positive
return sources[sourceToRead][charToRead];
}
// go back one character
void unget()
{
// Do not roll back once we've reached the end of the file.
if (endOfFileReached)
return;
if (currentChar > 0) {
--currentChar;
--loc[currentSource].column;
--logicalSourceLoc.column;
if (loc[currentSource].column < 0) {
// We've moved back past a new line. Find the
// previous newline (or start of the file) to compute
// the column count on the now current line.
size_t chIndex = currentChar;
while (chIndex > 0) {
if (sources[currentSource][chIndex] == '\n') {
break;
}
--chIndex;
}
logicalSourceLoc.column = (int)(currentChar - chIndex);
loc[currentSource].column = (int)(currentChar - chIndex);
}
} else {
do {
--currentSource;
} while (currentSource > 0 && lengths[currentSource] == 0);
if (lengths[currentSource] == 0) {
// set to 0 if we've backed up to the start of an empty string
currentChar = 0;
} else
currentChar = lengths[currentSource] - 1;
}
if (peek() == '\n') {
--loc[currentSource].line;
--logicalSourceLoc.line;
}
}
// for #line override
void setLine(int newLine)
{
logicalSourceLoc.line = newLine;
loc[getLastValidSourceIndex()].line = newLine;
}
// for #line override in filename based parsing
void setFile(const char* filename)
{
logicalSourceLoc.name = filename;
loc[getLastValidSourceIndex()].name = filename;
}
void setFile(const char* filename, int i)
{
if (i == getLastValidSourceIndex()) {
logicalSourceLoc.name = filename;
}
loc[i].name = filename;
}
void setString(int newString)
{
logicalSourceLoc.string = newString;
loc[getLastValidSourceIndex()].string = newString;
logicalSourceLoc.name = nullptr;
loc[getLastValidSourceIndex()].name = nullptr;
}
// for #include content indentation
void setColumn(int col)
{
logicalSourceLoc.column = col;
loc[getLastValidSourceIndex()].column = col;
}
void setEndOfInput()
{
endOfFileReached = true;
currentSource = numSources;
}
bool atEndOfInput() const { return endOfFileReached; }
const TSourceLoc& getSourceLoc() const
{
if (singleLogical) {
return logicalSourceLoc;
} else {
return loc[std::max(0, std::min(currentSource, numSources - finale - 1))];
}
}
// Returns the index (starting from 0) of the most recent valid source string we are reading from.
int getLastValidSourceIndex() const { return std::min(currentSource, numSources - 1); }
void consumeWhiteSpace(bool& foundNonSpaceTab);
bool consumeComment();
void consumeWhitespaceComment(bool& foundNonSpaceTab);
bool scanVersion(int& version, EProfile& profile, bool& notFirstToken);
protected:
// advance one character
void advance()
{
++currentChar;
if (currentChar >= lengths[currentSource]) {
++currentSource;
if (currentSource < numSources) {
loc[currentSource].string = loc[currentSource - 1].string + 1;
loc[currentSource].line = 1;
loc[currentSource].column = 0;
}
while (currentSource < numSources && lengths[currentSource] == 0) {
++currentSource;
if (currentSource < numSources) {
loc[currentSource].string = loc[currentSource - 1].string + 1;
loc[currentSource].line = 1;
loc[currentSource].column = 0;
}
}
currentChar = 0;
}
}
int numSources; // number of strings in source
const unsigned char* const *sources; // array of strings; must be converted to positive values on use, to avoid aliasing with -1 as EndOfInput
const size_t *lengths; // length of each string
int currentSource;
size_t currentChar;
// This is for reporting what string/line an error occurred on, and can be overridden by #line.
// It remembers the last state of each source string as it is left for the next one, so unget()
// can restore that state.
TSourceLoc* loc; // an array
int stringBias; // the first string that is the user's string number 0
int finale; // number of internal strings after user's last string
TSourceLoc logicalSourceLoc;
bool singleLogical; // treats the strings as a single logical string.
// locations will be reported from the first string.
// Set to true once peek() returns EndOfFile, so that we won't roll back
// once we've reached EndOfFile.
bool endOfFileReached;
};
} // end namespace glslang
#endif // _GLSLANG_SCAN_INCLUDED_

View File

@ -0,0 +1,92 @@
//
// Copyright (C) 2013 LunarG, Inc.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
//
// This holds context specific to the GLSL scanner, which
// sits between the preprocessor scanner and parser.
//
#pragma once
#include "ParseHelper.h"
namespace glslang {
class TPpContext;
class TPpToken;
class TParserToken;
class TScanContext {
public:
explicit TScanContext(TParseContextBase& pc) :
parseContext(pc),
afterType(false), afterStruct(false),
field(false) { }
virtual ~TScanContext() { }
static void fillInKeywordMap();
static void deleteKeywordMap();
int tokenize(TPpContext*, TParserToken&);
protected:
TScanContext(TScanContext&);
TScanContext& operator=(TScanContext&);
int tokenizeIdentifier();
int identifierOrType();
int reservedWord();
int identifierOrReserved(bool reserved);
int es30ReservedFromGLSL(int version);
int nonreservedKeyword(int esVersion, int nonEsVersion);
int precisionKeyword();
int matNxM();
int dMat();
int firstGenerationImage(bool inEs310);
int secondGenerationImage();
TParseContextBase& parseContext;
bool afterType; // true if we've recognized a type, so can only be looking for an identifier
bool afterStruct; // true if we've recognized the STRUCT keyword, so can only be looking for an identifier
bool field; // true if we're on a field, right after a '.'
TSourceLoc loc;
TParserToken* parserToken;
TPpToken* ppToken;
const char* tokenText;
int keyword;
};
} // end namespace glslang

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,385 @@
//
// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
// Copyright (C) 2012-2013 LunarG, Inc.
// Copyright (C) 2017 ARM Limited.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
//
// Symbol table for parsing. Most functionality and main ideas
// are documented in the header file.
//
#include "SymbolTable.h"
namespace glslang {
//
// TType helper function needs a place to live.
//
//
// Recursively generate mangled names.
//
void TType::buildMangledName(TString& mangledName) const
{
if (isMatrix())
mangledName += 'm';
else if (isVector())
mangledName += 'v';
switch (basicType) {
case EbtFloat: mangledName += 'f'; break;
case EbtDouble: mangledName += 'd'; break;
case EbtFloat16: mangledName += "f16"; break;
case EbtInt: mangledName += 'i'; break;
case EbtUint: mangledName += 'u'; break;
case EbtInt8: mangledName += "i8"; break;
case EbtUint8: mangledName += "u8"; break;
case EbtInt16: mangledName += "i16"; break;
case EbtUint16: mangledName += "u16"; break;
case EbtInt64: mangledName += "i64"; break;
case EbtUint64: mangledName += "u64"; break;
case EbtBool: mangledName += 'b'; break;
case EbtAtomicUint: mangledName += "au"; break;
case EbtSampler:
switch (sampler.type) {
#ifdef AMD_EXTENSIONS
case EbtFloat16: mangledName += "f16"; break;
#endif
case EbtInt: mangledName += "i"; break;
case EbtUint: mangledName += "u"; break;
default: break; // some compilers want this
}
if (sampler.image)
mangledName += "I"; // a normal image
else if (sampler.sampler)
mangledName += "p"; // a "pure" sampler
else if (!sampler.combined)
mangledName += "t"; // a "pure" texture
else
mangledName += "s"; // traditional combined sampler
if (sampler.arrayed)
mangledName += "A";
if (sampler.shadow)
mangledName += "S";
if (sampler.external)
mangledName += "E";
switch (sampler.dim) {
case Esd1D: mangledName += "1"; break;
case Esd2D: mangledName += "2"; break;
case Esd3D: mangledName += "3"; break;
case EsdCube: mangledName += "C"; break;
case EsdRect: mangledName += "R2"; break;
case EsdBuffer: mangledName += "B"; break;
case EsdSubpass: mangledName += "P"; break;
default: break; // some compilers want this
}
if (sampler.hasReturnStruct()) {
// Name mangle for sampler return struct uses struct table index.
mangledName += "-tx-struct";
char text[16]; // plenty enough space for the small integers.
snprintf(text, sizeof(text), "%d-", sampler.structReturnIndex);
mangledName += text;
} else {
switch (sampler.getVectorSize()) {
case 1: mangledName += "1"; break;
case 2: mangledName += "2"; break;
case 3: mangledName += "3"; break;
case 4: break; // default to prior name mangle behavior
}
}
if (sampler.ms)
mangledName += "M";
break;
case EbtStruct:
case EbtBlock:
if (basicType == EbtStruct)
mangledName += "struct-";
else
mangledName += "block-";
if (typeName)
mangledName += *typeName;
for (unsigned int i = 0; i < structure->size(); ++i) {
mangledName += '-';
(*structure)[i].type->buildMangledName(mangledName);
}
default:
break;
}
if (getVectorSize() > 0)
mangledName += static_cast<char>('0' + getVectorSize());
else {
mangledName += static_cast<char>('0' + getMatrixCols());
mangledName += static_cast<char>('0' + getMatrixRows());
}
if (arraySizes) {
const int maxSize = 11;
char buf[maxSize];
for (int i = 0; i < arraySizes->getNumDims(); ++i) {
if (arraySizes->getDimNode(i)) {
if (arraySizes->getDimNode(i)->getAsSymbolNode())
snprintf(buf, maxSize, "s%d", arraySizes->getDimNode(i)->getAsSymbolNode()->getId());
else
snprintf(buf, maxSize, "s%p", arraySizes->getDimNode(i));
} else
snprintf(buf, maxSize, "%d", arraySizes->getDimSize(i));
mangledName += '[';
mangledName += buf;
mangledName += ']';
}
}
}
//
// Dump functions.
//
void TVariable::dump(TInfoSink& infoSink) const
{
infoSink.debug << getName().c_str() << ": " << type.getStorageQualifierString() << " " << type.getBasicTypeString();
if (type.isArray()) {
infoSink.debug << "[0]";
}
infoSink.debug << "\n";
}
void TFunction::dump(TInfoSink& infoSink) const
{
infoSink.debug << getName().c_str() << ": " << returnType.getBasicTypeString() << " " << getMangledName().c_str() << "\n";
}
void TAnonMember::dump(TInfoSink& TInfoSink) const
{
TInfoSink.debug << "anonymous member " << getMemberNumber() << " of " << getAnonContainer().getName().c_str() << "\n";
}
void TSymbolTableLevel::dump(TInfoSink &infoSink) const
{
tLevel::const_iterator it;
for (it = level.begin(); it != level.end(); ++it)
(*it).second->dump(infoSink);
}
void TSymbolTable::dump(TInfoSink &infoSink) const
{
for (int level = currentLevel(); level >= 0; --level) {
infoSink.debug << "LEVEL " << level << "\n";
table[level]->dump(infoSink);
}
}
//
// Functions have buried pointers to delete.
//
TFunction::~TFunction()
{
for (TParamList::iterator i = parameters.begin(); i != parameters.end(); ++i)
delete (*i).type;
}
//
// Symbol table levels are a map of pointers to symbols that have to be deleted.
//
TSymbolTableLevel::~TSymbolTableLevel()
{
for (tLevel::iterator it = level.begin(); it != level.end(); ++it)
delete (*it).second;
delete [] defaultPrecision;
}
//
// Change all function entries in the table with the non-mangled name
// to be related to the provided built-in operation.
//
void TSymbolTableLevel::relateToOperator(const char* name, TOperator op)
{
tLevel::const_iterator candidate = level.lower_bound(name);
while (candidate != level.end()) {
const TString& candidateName = (*candidate).first;
TString::size_type parenAt = candidateName.find_first_of('(');
if (parenAt != candidateName.npos && candidateName.compare(0, parenAt, name) == 0) {
TFunction* function = (*candidate).second->getAsFunction();
function->relateToOperator(op);
} else
break;
++candidate;
}
}
// Make all function overloads of the given name require an extension(s).
// Should only be used for a version/profile that actually needs the extension(s).
void TSymbolTableLevel::setFunctionExtensions(const char* name, int num, const char* const extensions[])
{
tLevel::const_iterator candidate = level.lower_bound(name);
while (candidate != level.end()) {
const TString& candidateName = (*candidate).first;
TString::size_type parenAt = candidateName.find_first_of('(');
if (parenAt != candidateName.npos && candidateName.compare(0, parenAt, name) == 0) {
TSymbol* symbol = candidate->second;
symbol->setExtensions(num, extensions);
} else
break;
++candidate;
}
}
//
// Make all symbols in this table level read only.
//
void TSymbolTableLevel::readOnly()
{
for (tLevel::iterator it = level.begin(); it != level.end(); ++it)
(*it).second->makeReadOnly();
}
//
// Copy a symbol, but the copy is writable; call readOnly() afterward if that's not desired.
//
TSymbol::TSymbol(const TSymbol& copyOf)
{
name = NewPoolTString(copyOf.name->c_str());
uniqueId = copyOf.uniqueId;
writable = true;
}
TVariable::TVariable(const TVariable& copyOf) : TSymbol(copyOf)
{
type.deepCopy(copyOf.type);
userType = copyOf.userType;
numExtensions = 0;
extensions = 0;
if (copyOf.numExtensions != 0)
setExtensions(copyOf.numExtensions, copyOf.extensions);
if (! copyOf.constArray.empty()) {
assert(! copyOf.type.isStruct());
TConstUnionArray newArray(copyOf.constArray, 0, copyOf.constArray.size());
constArray = newArray;
}
// don't support specialization-constant subtrees in cloned tables
constSubtree = nullptr;
}
TVariable* TVariable::clone() const
{
TVariable *variable = new TVariable(*this);
return variable;
}
TFunction::TFunction(const TFunction& copyOf) : TSymbol(copyOf)
{
for (unsigned int i = 0; i < copyOf.parameters.size(); ++i) {
TParameter param;
parameters.push_back(param);
parameters.back().copyParam(copyOf.parameters[i]);
}
numExtensions = 0;
extensions = 0;
if (copyOf.extensions != 0)
setExtensions(copyOf.numExtensions, copyOf.extensions);
returnType.deepCopy(copyOf.returnType);
mangledName = copyOf.mangledName;
op = copyOf.op;
defined = copyOf.defined;
prototyped = copyOf.prototyped;
implicitThis = copyOf.implicitThis;
illegalImplicitThis = copyOf.illegalImplicitThis;
defaultParamCount = copyOf.defaultParamCount;
}
TFunction* TFunction::clone() const
{
TFunction *function = new TFunction(*this);
return function;
}
TAnonMember* TAnonMember::clone() const
{
// Anonymous members of a given block should be cloned at a higher level,
// where they can all be assured to still end up pointing to a single
// copy of the original container.
assert(0);
return 0;
}
TSymbolTableLevel* TSymbolTableLevel::clone() const
{
TSymbolTableLevel *symTableLevel = new TSymbolTableLevel();
symTableLevel->anonId = anonId;
symTableLevel->thisLevel = thisLevel;
std::vector<bool> containerCopied(anonId, false);
tLevel::const_iterator iter;
for (iter = level.begin(); iter != level.end(); ++iter) {
const TAnonMember* anon = iter->second->getAsAnonMember();
if (anon) {
// Insert all the anonymous members of this same container at once,
// avoid inserting the other members in the future, once this has been done,
// allowing them to all be part of the same new container.
if (! containerCopied[anon->getAnonId()]) {
TVariable* container = anon->getAnonContainer().clone();
container->changeName(NewPoolTString(""));
// insert the whole container
symTableLevel->insert(*container, false);
containerCopied[anon->getAnonId()] = true;
}
} else
symTableLevel->insert(*iter->second->clone(), false);
}
return symTableLevel;
}
void TSymbolTable::copyTable(const TSymbolTable& copyOf)
{
assert(adoptedLevels == copyOf.adoptedLevels);
uniqueId = copyOf.uniqueId;
noBuiltInRedeclarations = copyOf.noBuiltInRedeclarations;
separateNameSpaces = copyOf.separateNameSpaces;
for (unsigned int i = copyOf.adoptedLevels; i < copyOf.table.size(); ++i)
table.push_back(copyOf.table[i]->clone());
}
} // end namespace glslang

View File

@ -0,0 +1,825 @@
//
// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
// Copyright (C) 2013 LunarG, Inc.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
#ifndef _SYMBOL_TABLE_INCLUDED_
#define _SYMBOL_TABLE_INCLUDED_
//
// Symbol table for parsing. Has these design characteristics:
//
// * Same symbol table can be used to compile many shaders, to preserve
// effort of creating and loading with the large numbers of built-in
// symbols.
//
// --> This requires a copy mechanism, so initial pools used to create
// the shared information can be popped. Done through "clone"
// methods.
//
// * Name mangling will be used to give each function a unique name
// so that symbol table lookups are never ambiguous. This allows
// a simpler symbol table structure.
//
// * Pushing and popping of scope, so symbol table will really be a stack
// of symbol tables. Searched from the top, with new inserts going into
// the top.
//
// * Constants: Compile time constant symbols will keep their values
// in the symbol table. The parser can substitute constants at parse
// time, including doing constant folding and constant propagation.
//
// * No temporaries: Temporaries made from operations (+, --, .xy, etc.)
// are tracked in the intermediate representation, not the symbol table.
//
#include "../Include/Common.h"
#include "../Include/intermediate.h"
#include "../Include/InfoSink.h"
namespace glslang {
//
// Symbol base class. (Can build functions or variables out of these...)
//
class TVariable;
class TFunction;
class TAnonMember;
class TSymbol {
public:
POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator())
explicit TSymbol(const TString *n) : name(n), numExtensions(0), extensions(0), writable(true) { }
virtual TSymbol* clone() const = 0;
virtual ~TSymbol() { } // rely on all symbol owned memory coming from the pool
virtual const TString& getName() const { return *name; }
virtual void changeName(const TString* newName) { name = newName; }
virtual void addPrefix(const char* prefix)
{
TString newName(prefix);
newName.append(*name);
changeName(NewPoolTString(newName.c_str()));
}
virtual const TString& getMangledName() const { return getName(); }
virtual TFunction* getAsFunction() { return 0; }
virtual const TFunction* getAsFunction() const { return 0; }
virtual TVariable* getAsVariable() { return 0; }
virtual const TVariable* getAsVariable() const { return 0; }
virtual const TAnonMember* getAsAnonMember() const { return 0; }
virtual const TType& getType() const = 0;
virtual TType& getWritableType() = 0;
virtual void setUniqueId(int id) { uniqueId = id; }
virtual int getUniqueId() const { return uniqueId; }
virtual void setExtensions(int num, const char* const exts[])
{
assert(extensions == 0);
assert(num > 0);
numExtensions = num;
extensions = NewPoolObject(exts[0], num);
for (int e = 0; e < num; ++e)
extensions[e] = exts[e];
}
virtual int getNumExtensions() const { return numExtensions; }
virtual const char** getExtensions() const { return extensions; }
virtual void dump(TInfoSink &infoSink) const = 0;
virtual bool isReadOnly() const { return ! writable; }
virtual void makeReadOnly() { writable = false; }
protected:
explicit TSymbol(const TSymbol&);
TSymbol& operator=(const TSymbol&);
const TString *name;
unsigned int uniqueId; // For cross-scope comparing during code generation
// For tracking what extensions must be present
// (don't use if correct version/profile is present).
int numExtensions;
const char** extensions; // an array of pointers to existing constant char strings
//
// N.B.: Non-const functions that will be generally used should assert on this,
// to avoid overwriting shared symbol-table information.
//
bool writable;
};
//
// Variable class, meaning a symbol that's not a function.
//
// There could be a separate class hierarchy for Constant variables;
// Only one of int, bool, or float, (or none) is correct for
// any particular use, but it's easy to do this way, and doesn't
// seem worth having separate classes, and "getConst" can't simply return
// different values for different types polymorphically, so this is
// just simple and pragmatic.
//
class TVariable : public TSymbol {
public:
TVariable(const TString *name, const TType& t, bool uT = false )
: TSymbol(name),
userType(uT),
constSubtree(nullptr),
anonId(-1) { type.shallowCopy(t); }
virtual TVariable* clone() const;
virtual ~TVariable() { }
virtual TVariable* getAsVariable() { return this; }
virtual const TVariable* getAsVariable() const { return this; }
virtual const TType& getType() const { return type; }
virtual TType& getWritableType() { assert(writable); return type; }
virtual bool isUserType() const { return userType; }
virtual const TConstUnionArray& getConstArray() const { return constArray; }
virtual TConstUnionArray& getWritableConstArray() { assert(writable); return constArray; }
virtual void setConstArray(const TConstUnionArray& array) { constArray = array; }
virtual void setConstSubtree(TIntermTyped* subtree) { constSubtree = subtree; }
virtual TIntermTyped* getConstSubtree() const { return constSubtree; }
virtual void setAnonId(int i) { anonId = i; }
virtual int getAnonId() const { return anonId; }
virtual void dump(TInfoSink &infoSink) const;
protected:
explicit TVariable(const TVariable&);
TVariable& operator=(const TVariable&);
TType type;
bool userType;
// we are assuming that Pool Allocator will free the memory allocated to unionArray
// when this object is destroyed
// TODO: these two should be a union
// A variable could be a compile-time constant, or a specialization
// constant, or neither, but never both.
TConstUnionArray constArray; // for compile-time constant value
TIntermTyped* constSubtree; // for specialization constant computation
int anonId; // the ID used for anonymous blocks: TODO: see if uniqueId could serve a dual purpose
};
//
// The function sub-class of symbols and the parser will need to
// share this definition of a function parameter.
//
struct TParameter {
TString *name;
TType* type;
TIntermTyped* defaultValue;
void copyParam(const TParameter& param)
{
if (param.name)
name = NewPoolTString(param.name->c_str());
else
name = 0;
type = param.type->clone();
defaultValue = param.defaultValue;
}
TBuiltInVariable getDeclaredBuiltIn() const { return type->getQualifier().declaredBuiltIn; }
};
//
// The function sub-class of a symbol.
//
class TFunction : public TSymbol {
public:
explicit TFunction(TOperator o) :
TSymbol(0),
op(o),
defined(false), prototyped(false), implicitThis(false), illegalImplicitThis(false), defaultParamCount(0) { }
TFunction(const TString *name, const TType& retType, TOperator tOp = EOpNull) :
TSymbol(name),
mangledName(*name + '('),
op(tOp),
defined(false), prototyped(false), implicitThis(false), illegalImplicitThis(false), defaultParamCount(0)
{
returnType.shallowCopy(retType);
declaredBuiltIn = retType.getQualifier().builtIn;
}
virtual TFunction* clone() const override;
virtual ~TFunction();
virtual TFunction* getAsFunction() override { return this; }
virtual const TFunction* getAsFunction() const override { return this; }
// Install 'p' as the (non-'this') last parameter.
// Non-'this' parameters are reflected in both the list of parameters and the
// mangled name.
virtual void addParameter(TParameter& p)
{
assert(writable);
parameters.push_back(p);
p.type->appendMangledName(mangledName);
if (p.defaultValue != nullptr)
defaultParamCount++;
}
// Install 'this' as the first parameter.
// 'this' is reflected in the list of parameters, but not the mangled name.
virtual void addThisParameter(TType& type, const char* name)
{
TParameter p = { NewPoolTString(name), new TType, nullptr };
p.type->shallowCopy(type);
parameters.insert(parameters.begin(), p);
}
virtual void addPrefix(const char* prefix) override
{
TSymbol::addPrefix(prefix);
mangledName.insert(0, prefix);
}
virtual void removePrefix(const TString& prefix)
{
assert(mangledName.compare(0, prefix.size(), prefix) == 0);
mangledName.erase(0, prefix.size());
}
virtual const TString& getMangledName() const override { return mangledName; }
virtual const TType& getType() const override { return returnType; }
virtual TBuiltInVariable getDeclaredBuiltInType() const { return declaredBuiltIn; }
virtual TType& getWritableType() override { return returnType; }
virtual void relateToOperator(TOperator o) { assert(writable); op = o; }
virtual TOperator getBuiltInOp() const { return op; }
virtual void setDefined() { assert(writable); defined = true; }
virtual bool isDefined() const { return defined; }
virtual void setPrototyped() { assert(writable); prototyped = true; }
virtual bool isPrototyped() const { return prototyped; }
virtual void setImplicitThis() { assert(writable); implicitThis = true; }
virtual bool hasImplicitThis() const { return implicitThis; }
virtual void setIllegalImplicitThis() { assert(writable); illegalImplicitThis = true; }
virtual bool hasIllegalImplicitThis() const { return illegalImplicitThis; }
// Return total number of parameters
virtual int getParamCount() const { return static_cast<int>(parameters.size()); }
// Return number of parameters with default values.
virtual int getDefaultParamCount() const { return defaultParamCount; }
// Return number of fixed parameters (without default values)
virtual int getFixedParamCount() const { return getParamCount() - getDefaultParamCount(); }
virtual TParameter& operator[](int i) { assert(writable); return parameters[i]; }
virtual const TParameter& operator[](int i) const { return parameters[i]; }
virtual void dump(TInfoSink &infoSink) const override;
protected:
explicit TFunction(const TFunction&);
TFunction& operator=(const TFunction&);
typedef TVector<TParameter> TParamList;
TParamList parameters;
TType returnType;
TBuiltInVariable declaredBuiltIn;
TString mangledName;
TOperator op;
bool defined;
bool prototyped;
bool implicitThis; // True if this function is allowed to see all members of 'this'
bool illegalImplicitThis; // True if this function is not supposed to have access to dynamic members of 'this',
// even if it finds member variables in the symbol table.
// This is important for a static member function that has member variables in scope,
// but is not allowed to use them, or see hidden symbols instead.
int defaultParamCount;
};
//
// Members of anonymous blocks are a kind of TSymbol. They are not hidden in
// the symbol table behind a container; rather they are visible and point to
// their anonymous container. (The anonymous container is found through the
// member, not the other way around.)
//
class TAnonMember : public TSymbol {
public:
TAnonMember(const TString* n, unsigned int m, const TVariable& a, int an) : TSymbol(n), anonContainer(a), memberNumber(m), anonId(an) { }
virtual TAnonMember* clone() const;
virtual ~TAnonMember() { }
virtual const TAnonMember* getAsAnonMember() const { return this; }
virtual const TVariable& getAnonContainer() const { return anonContainer; }
virtual unsigned int getMemberNumber() const { return memberNumber; }
virtual const TType& getType() const
{
const TTypeList& types = *anonContainer.getType().getStruct();
return *types[memberNumber].type;
}
virtual TType& getWritableType()
{
assert(writable);
const TTypeList& types = *anonContainer.getType().getStruct();
return *types[memberNumber].type;
}
virtual int getAnonId() const { return anonId; }
virtual void dump(TInfoSink &infoSink) const;
protected:
explicit TAnonMember(const TAnonMember&);
TAnonMember& operator=(const TAnonMember&);
const TVariable& anonContainer;
unsigned int memberNumber;
int anonId;
};
class TSymbolTableLevel {
public:
POOL_ALLOCATOR_NEW_DELETE(GetThreadPoolAllocator())
TSymbolTableLevel() : defaultPrecision(0), anonId(0), thisLevel(false) { }
~TSymbolTableLevel();
bool insert(TSymbol& symbol, bool separateNameSpaces)
{
//
// returning true means symbol was added to the table with no semantic errors
//
const TString& name = symbol.getName();
if (name == "") {
symbol.getAsVariable()->setAnonId(anonId++);
// An empty name means an anonymous container, exposing its members to the external scope.
// Give it a name and insert its members in the symbol table, pointing to the container.
char buf[20];
snprintf(buf, 20, "%s%d", AnonymousPrefix, symbol.getAsVariable()->getAnonId());
symbol.changeName(NewPoolTString(buf));
return insertAnonymousMembers(symbol, 0);
} else {
// Check for redefinition errors:
// - STL itself will tell us if there is a direct name collision, with name mangling, at this level
// - additionally, check for function-redefining-variable name collisions
const TString& insertName = symbol.getMangledName();
if (symbol.getAsFunction()) {
// make sure there isn't a variable of this name
if (! separateNameSpaces && level.find(name) != level.end())
return false;
// insert, and whatever happens is okay
level.insert(tLevelPair(insertName, &symbol));
return true;
} else
return level.insert(tLevelPair(insertName, &symbol)).second;
}
}
// Add more members to an already inserted aggregate object
bool amend(TSymbol& symbol, int firstNewMember)
{
// See insert() for comments on basic explanation of insert.
// This operates similarly, but more simply.
// Only supporting amend of anonymous blocks so far.
if (IsAnonymous(symbol.getName()))
return insertAnonymousMembers(symbol, firstNewMember);
else
return false;
}
bool insertAnonymousMembers(TSymbol& symbol, int firstMember)
{
const TTypeList& types = *symbol.getAsVariable()->getType().getStruct();
for (unsigned int m = firstMember; m < types.size(); ++m) {
TAnonMember* member = new TAnonMember(&types[m].type->getFieldName(), m, *symbol.getAsVariable(), symbol.getAsVariable()->getAnonId());
if (! level.insert(tLevelPair(member->getMangledName(), member)).second)
return false;
}
return true;
}
TSymbol* find(const TString& name) const
{
tLevel::const_iterator it = level.find(name);
if (it == level.end())
return 0;
else
return (*it).second;
}
void findFunctionNameList(const TString& name, TVector<const TFunction*>& list)
{
size_t parenAt = name.find_first_of('(');
TString base(name, 0, parenAt + 1);
tLevel::const_iterator begin = level.lower_bound(base);
base[parenAt] = ')'; // assume ')' is lexically after '('
tLevel::const_iterator end = level.upper_bound(base);
for (tLevel::const_iterator it = begin; it != end; ++it)
list.push_back(it->second->getAsFunction());
}
// See if there is already a function in the table having the given non-function-style name.
bool hasFunctionName(const TString& name) const
{
tLevel::const_iterator candidate = level.lower_bound(name);
if (candidate != level.end()) {
const TString& candidateName = (*candidate).first;
TString::size_type parenAt = candidateName.find_first_of('(');
if (parenAt != candidateName.npos && candidateName.compare(0, parenAt, name) == 0)
return true;
}
return false;
}
// See if there is a variable at this level having the given non-function-style name.
// Return true if name is found, and set variable to true if the name was a variable.
bool findFunctionVariableName(const TString& name, bool& variable) const
{
tLevel::const_iterator candidate = level.lower_bound(name);
if (candidate != level.end()) {
const TString& candidateName = (*candidate).first;
TString::size_type parenAt = candidateName.find_first_of('(');
if (parenAt == candidateName.npos) {
// not a mangled name
if (candidateName == name) {
// found a variable name match
variable = true;
return true;
}
} else {
// a mangled name
if (candidateName.compare(0, parenAt, name) == 0) {
// found a function name match
variable = false;
return true;
}
}
}
return false;
}
// Use this to do a lazy 'push' of precision defaults the first time
// a precision statement is seen in a new scope. Leave it at 0 for
// when no push was needed. Thus, it is not the current defaults,
// it is what to restore the defaults to when popping a level.
void setPreviousDefaultPrecisions(const TPrecisionQualifier *p)
{
// can call multiple times at one scope, will only latch on first call,
// as we're tracking the previous scope's values, not the current values
if (defaultPrecision != 0)
return;
defaultPrecision = new TPrecisionQualifier[EbtNumTypes];
for (int t = 0; t < EbtNumTypes; ++t)
defaultPrecision[t] = p[t];
}
void getPreviousDefaultPrecisions(TPrecisionQualifier *p)
{
// can be called for table level pops that didn't set the
// defaults
if (defaultPrecision == 0 || p == 0)
return;
for (int t = 0; t < EbtNumTypes; ++t)
p[t] = defaultPrecision[t];
}
void relateToOperator(const char* name, TOperator op);
void setFunctionExtensions(const char* name, int num, const char* const extensions[]);
void dump(TInfoSink &infoSink) const;
TSymbolTableLevel* clone() const;
void readOnly();
void setThisLevel() { thisLevel = true; }
bool isThisLevel() const { return thisLevel; }
protected:
explicit TSymbolTableLevel(TSymbolTableLevel&);
TSymbolTableLevel& operator=(TSymbolTableLevel&);
typedef std::map<TString, TSymbol*, std::less<TString>, pool_allocator<std::pair<const TString, TSymbol*> > > tLevel;
typedef const tLevel::value_type tLevelPair;
typedef std::pair<tLevel::iterator, bool> tInsertResult;
tLevel level; // named mappings
TPrecisionQualifier *defaultPrecision;
int anonId;
bool thisLevel; // True if this level of the symbol table is a structure scope containing member function
// that are supposed to see anonymous access to member variables.
};
class TSymbolTable {
public:
TSymbolTable() : uniqueId(0), noBuiltInRedeclarations(false), separateNameSpaces(false), adoptedLevels(0)
{
//
// This symbol table cannot be used until push() is called.
//
}
~TSymbolTable()
{
// this can be called explicitly; safest to code it so it can be called multiple times
// don't deallocate levels passed in from elsewhere
while (table.size() > adoptedLevels)
pop(0);
}
void adoptLevels(TSymbolTable& symTable)
{
for (unsigned int level = 0; level < symTable.table.size(); ++level) {
table.push_back(symTable.table[level]);
++adoptedLevels;
}
uniqueId = symTable.uniqueId;
noBuiltInRedeclarations = symTable.noBuiltInRedeclarations;
separateNameSpaces = symTable.separateNameSpaces;
}
//
// While level adopting is generic, the methods below enact a the following
// convention for levels:
// 0: common built-ins shared across all stages, all compiles, only one copy for all symbol tables
// 1: per-stage built-ins, shared across all compiles, but a different copy per stage
// 2: built-ins specific to a compile, like resources that are context-dependent, or redeclared built-ins
// 3: user-shader globals
//
protected:
static const int globalLevel = 3;
bool isSharedLevel(int level) { return level <= 1; } // exclude all per-compile levels
bool isBuiltInLevel(int level) { return level <= 2; } // exclude user globals
bool isGlobalLevel(int level) { return level <= globalLevel; } // include user globals
public:
bool isEmpty() { return table.size() == 0; }
bool atBuiltInLevel() { return isBuiltInLevel(currentLevel()); }
bool atGlobalLevel() { return isGlobalLevel(currentLevel()); }
void setNoBuiltInRedeclarations() { noBuiltInRedeclarations = true; }
void setSeparateNameSpaces() { separateNameSpaces = true; }
void push()
{
table.push_back(new TSymbolTableLevel);
}
// Make a new symbol-table level to represent the scope introduced by a structure
// containing member functions, such that the member functions can find anonymous
// references to member variables.
//
// 'thisSymbol' should have a name of "" to trigger anonymous structure-member
// symbol finds.
void pushThis(TSymbol& thisSymbol)
{
assert(thisSymbol.getName().size() == 0);
table.push_back(new TSymbolTableLevel);
table.back()->setThisLevel();
insert(thisSymbol);
}
void pop(TPrecisionQualifier *p)
{
table[currentLevel()]->getPreviousDefaultPrecisions(p);
delete table.back();
table.pop_back();
}
//
// Insert a visible symbol into the symbol table so it can
// be found later by name.
//
// Returns false if the was a name collision.
//
bool insert(TSymbol& symbol)
{
symbol.setUniqueId(++uniqueId);
// make sure there isn't a function of this variable name
if (! separateNameSpaces && ! symbol.getAsFunction() && table[currentLevel()]->hasFunctionName(symbol.getName()))
return false;
// check for not overloading or redefining a built-in function
if (noBuiltInRedeclarations) {
if (atGlobalLevel() && currentLevel() > 0) {
if (table[0]->hasFunctionName(symbol.getName()))
return false;
if (currentLevel() > 1 && table[1]->hasFunctionName(symbol.getName()))
return false;
}
}
return table[currentLevel()]->insert(symbol, separateNameSpaces);
}
// Add more members to an already inserted aggregate object
bool amend(TSymbol& symbol, int firstNewMember)
{
// See insert() for comments on basic explanation of insert.
// This operates similarly, but more simply.
return table[currentLevel()]->amend(symbol, firstNewMember);
}
//
// To allocate an internal temporary, which will need to be uniquely
// identified by the consumer of the AST, but never need to
// found by doing a symbol table search by name, hence allowed an
// arbitrary name in the symbol with no worry of collision.
//
void makeInternalVariable(TSymbol& symbol)
{
symbol.setUniqueId(++uniqueId);
}
//
// Copy a variable or anonymous member's structure from a shared level so that
// it can be added (soon after return) to the symbol table where it can be
// modified without impacting other users of the shared table.
//
TSymbol* copyUpDeferredInsert(TSymbol* shared)
{
if (shared->getAsVariable()) {
TSymbol* copy = shared->clone();
copy->setUniqueId(shared->getUniqueId());
return copy;
} else {
const TAnonMember* anon = shared->getAsAnonMember();
assert(anon);
TVariable* container = anon->getAnonContainer().clone();
container->changeName(NewPoolTString(""));
container->setUniqueId(anon->getAnonContainer().getUniqueId());
return container;
}
}
TSymbol* copyUp(TSymbol* shared)
{
TSymbol* copy = copyUpDeferredInsert(shared);
table[globalLevel]->insert(*copy, separateNameSpaces);
if (shared->getAsVariable())
return copy;
else {
// return the copy of the anonymous member
return table[globalLevel]->find(shared->getName());
}
}
// Normal find of a symbol, that can optionally say whether the symbol was found
// at a built-in level or the current top-scope level.
TSymbol* find(const TString& name, bool* builtIn = 0, bool* currentScope = 0, int* thisDepthP = 0)
{
int level = currentLevel();
TSymbol* symbol;
int thisDepth = 0;
do {
if (table[level]->isThisLevel())
++thisDepth;
symbol = table[level]->find(name);
--level;
} while (symbol == nullptr && level >= 0);
level++;
if (builtIn)
*builtIn = isBuiltInLevel(level);
if (currentScope)
*currentScope = isGlobalLevel(currentLevel()) || level == currentLevel(); // consider shared levels as "current scope" WRT user globals
if (thisDepthP != nullptr) {
if (! table[level]->isThisLevel())
thisDepth = 0;
*thisDepthP = thisDepth;
}
return symbol;
}
// Find of a symbol that returns how many layers deep of nested
// structures-with-member-functions ('this' scopes) deep the symbol was
// found in.
TSymbol* find(const TString& name, int& thisDepth)
{
int level = currentLevel();
TSymbol* symbol;
thisDepth = 0;
do {
if (table[level]->isThisLevel())
++thisDepth;
symbol = table[level]->find(name);
--level;
} while (symbol == 0 && level >= 0);
if (! table[level + 1]->isThisLevel())
thisDepth = 0;
return symbol;
}
bool isFunctionNameVariable(const TString& name) const
{
if (separateNameSpaces)
return false;
int level = currentLevel();
do {
bool variable;
bool found = table[level]->findFunctionVariableName(name, variable);
if (found)
return variable;
--level;
} while (level >= 0);
return false;
}
void findFunctionNameList(const TString& name, TVector<const TFunction*>& list, bool& builtIn)
{
// For user levels, return the set found in the first scope with a match
builtIn = false;
int level = currentLevel();
do {
table[level]->findFunctionNameList(name, list);
--level;
} while (list.empty() && level >= globalLevel);
if (! list.empty())
return;
// Gather across all built-in levels; they don't hide each other
builtIn = true;
do {
table[level]->findFunctionNameList(name, list);
--level;
} while (level >= 0);
}
void relateToOperator(const char* name, TOperator op)
{
for (unsigned int level = 0; level < table.size(); ++level)
table[level]->relateToOperator(name, op);
}
void setFunctionExtensions(const char* name, int num, const char* const extensions[])
{
for (unsigned int level = 0; level < table.size(); ++level)
table[level]->setFunctionExtensions(name, num, extensions);
}
void setVariableExtensions(const char* name, int num, const char* const extensions[])
{
TSymbol* symbol = find(TString(name));
if (symbol)
symbol->setExtensions(num, extensions);
}
int getMaxSymbolId() { return uniqueId; }
void dump(TInfoSink &infoSink) const;
void copyTable(const TSymbolTable& copyOf);
void setPreviousDefaultPrecisions(TPrecisionQualifier *p) { table[currentLevel()]->setPreviousDefaultPrecisions(p); }
void readOnly()
{
for (unsigned int level = 0; level < table.size(); ++level)
table[level]->readOnly();
}
protected:
TSymbolTable(TSymbolTable&);
TSymbolTable& operator=(TSymbolTableLevel&);
int currentLevel() const { return static_cast<int>(table.size()) - 1; }
std::vector<TSymbolTableLevel*> table;
int uniqueId; // for unique identification in code generation
bool noBuiltInRedeclarations;
bool separateNameSpaces;
unsigned int adoptedLevels;
};
} // end namespace glslang
#endif // _SYMBOL_TABLE_INCLUDED_

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,283 @@
//
// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
// Copyright (C) 2012-2013 LunarG, Inc.
// Copyright (C) 2017 ARM Limited.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
#ifndef _VERSIONS_INCLUDED_
#define _VERSIONS_INCLUDED_
//
// Help manage multiple profiles, versions, extensions etc.
//
//
// Profiles are set up for masking operations, so queries can be done on multiple
// profiles at the same time.
//
// Don't maintain an ordinal set of enums (0,1,2,3...) to avoid all possible
// defects from mixing the two different forms.
//
typedef enum {
EBadProfile = 0,
ENoProfile = (1 << 0), // only for desktop, before profiles showed up
ECoreProfile = (1 << 1),
ECompatibilityProfile = (1 << 2),
EEsProfile = (1 << 3)
} EProfile;
namespace glslang {
//
// Map from profile enum to externally readable text name.
//
inline const char* ProfileName(EProfile profile)
{
switch (profile) {
case ENoProfile: return "none";
case ECoreProfile: return "core";
case ECompatibilityProfile: return "compatibility";
case EEsProfile: return "es";
default: return "unknown profile";
}
}
//
// What source rules, validation rules, target language, etc. are needed
// desired for SPIR-V?
//
// 0 means a target or rule set is not enabled (ignore rules from that entity).
// Non-0 means to apply semantic rules arising from that version of its rule set.
// The union of all requested rule sets will be applied.
//
struct SpvVersion {
SpvVersion() : spv(0), vulkanGlsl(0), vulkan(0), openGl(0) {}
unsigned int spv; // the version of SPIR-V to target, as defined by "word 1" of the SPIR-V binary header
int vulkanGlsl; // the version of GLSL semantics for Vulkan, from GL_KHR_vulkan_glsl, for "#define VULKAN XXX"
int vulkan; // the version of Vulkan, for which SPIR-V execution environment rules to use
int openGl; // the version of GLSL semantics for OpenGL, from GL_ARB_gl_spirv, for "#define GL_SPIRV XXX"
};
//
// The behaviors from the GLSL "#extension extension_name : behavior"
//
typedef enum {
EBhMissing = 0,
EBhRequire,
EBhEnable,
EBhWarn,
EBhDisable,
EBhDisablePartial // use as initial state of an extension that is only partially implemented
} TExtensionBehavior;
//
// Symbolic names for extensions. Strings may be directly used when calling the
// functions, but better to have the compiler do spelling checks.
//
const char* const E_GL_OES_texture_3D = "GL_OES_texture_3D";
const char* const E_GL_OES_standard_derivatives = "GL_OES_standard_derivatives";
const char* const E_GL_EXT_frag_depth = "GL_EXT_frag_depth";
const char* const E_GL_OES_EGL_image_external = "GL_OES_EGL_image_external";
const char* const E_GL_OES_EGL_image_external_essl3 = "GL_OES_EGL_image_external_essl3";
const char* const E_GL_EXT_shader_texture_lod = "GL_EXT_shader_texture_lod";
const char* const E_GL_EXT_shadow_samplers = "GL_EXT_shadow_samplers";
const char* const E_GL_ARB_texture_rectangle = "GL_ARB_texture_rectangle";
const char* const E_GL_3DL_array_objects = "GL_3DL_array_objects";
const char* const E_GL_ARB_shading_language_420pack = "GL_ARB_shading_language_420pack";
const char* const E_GL_ARB_texture_gather = "GL_ARB_texture_gather";
const char* const E_GL_ARB_gpu_shader5 = "GL_ARB_gpu_shader5";
const char* const E_GL_ARB_separate_shader_objects = "GL_ARB_separate_shader_objects";
const char* const E_GL_ARB_compute_shader = "GL_ARB_compute_shader";
const char* const E_GL_ARB_tessellation_shader = "GL_ARB_tessellation_shader";
const char* const E_GL_ARB_enhanced_layouts = "GL_ARB_enhanced_layouts";
const char* const E_GL_ARB_texture_cube_map_array = "GL_ARB_texture_cube_map_array";
const char* const E_GL_ARB_shader_texture_lod = "GL_ARB_shader_texture_lod";
const char* const E_GL_ARB_explicit_attrib_location = "GL_ARB_explicit_attrib_location";
const char* const E_GL_ARB_shader_image_load_store = "GL_ARB_shader_image_load_store";
const char* const E_GL_ARB_shader_atomic_counters = "GL_ARB_shader_atomic_counters";
const char* const E_GL_ARB_shader_draw_parameters = "GL_ARB_shader_draw_parameters";
const char* const E_GL_ARB_shader_group_vote = "GL_ARB_shader_group_vote";
const char* const E_GL_ARB_derivative_control = "GL_ARB_derivative_control";
const char* const E_GL_ARB_shader_texture_image_samples = "GL_ARB_shader_texture_image_samples";
const char* const E_GL_ARB_viewport_array = "GL_ARB_viewport_array";
const char* const E_GL_ARB_gpu_shader_int64 = "GL_ARB_gpu_shader_int64";
const char* const E_GL_ARB_shader_ballot = "GL_ARB_shader_ballot";
const char* const E_GL_ARB_sparse_texture2 = "GL_ARB_sparse_texture2";
const char* const E_GL_ARB_sparse_texture_clamp = "GL_ARB_sparse_texture_clamp";
const char* const E_GL_ARB_shader_stencil_export = "GL_ARB_shader_stencil_export";
// const char* const E_GL_ARB_cull_distance = "GL_ARB_cull_distance"; // present for 4.5, but need extension control over block members
const char* const E_GL_ARB_post_depth_coverage = "GL_ARB_post_depth_coverage";
const char* const E_GL_ARB_shader_viewport_layer_array = "GL_ARB_shader_viewport_layer_array";
const char* const E_GL_KHR_shader_subgroup_basic = "GL_KHR_shader_subgroup_basic";
const char* const E_GL_KHR_shader_subgroup_vote = "GL_KHR_shader_subgroup_vote";
const char* const E_GL_KHR_shader_subgroup_arithmetic = "GL_KHR_shader_subgroup_arithmetic";
const char* const E_GL_KHR_shader_subgroup_ballot = "GL_KHR_shader_subgroup_ballot";
const char* const E_GL_KHR_shader_subgroup_shuffle = "GL_KHR_shader_subgroup_shuffle";
const char* const E_GL_KHR_shader_subgroup_shuffle_relative = "GL_KHR_shader_subgroup_shuffle_relative";
const char* const E_GL_KHR_shader_subgroup_clustered = "GL_KHR_shader_subgroup_clustered";
const char* const E_GL_KHR_shader_subgroup_quad = "GL_KHR_shader_subgroup_quad";
const char* const E_GL_EXT_shader_non_constant_global_initializers = "GL_EXT_shader_non_constant_global_initializers";
const char* const E_GL_EXT_shader_image_load_formatted = "GL_EXT_shader_image_load_formatted";
const char* const E_GL_EXT_shader_16bit_storage = "GL_EXT_shader_16bit_storage";
const char* const E_GL_EXT_shader_8bit_storage = "GL_EXT_shader_8bit_storage";
// EXT extensions
const char* const E_GL_EXT_device_group = "GL_EXT_device_group";
const char* const E_GL_EXT_multiview = "GL_EXT_multiview";
const char* const E_GL_EXT_post_depth_coverage = "GL_EXT_post_depth_coverage";
const char* const E_GL_EXT_control_flow_attributes = "GL_EXT_control_flow_attributes";
const char* const E_GL_EXT_nonuniform_qualifier = "GL_EXT_nonuniform_qualifier";
const char* const E_GL_EXT_samplerless_texture_functions = "GL_EXT_samplerless_texture_functions";
// Arrays of extensions for the above viewportEXTs duplications
const char* const post_depth_coverageEXTs[] = { E_GL_ARB_post_depth_coverage, E_GL_EXT_post_depth_coverage };
const int Num_post_depth_coverageEXTs = sizeof(post_depth_coverageEXTs) / sizeof(post_depth_coverageEXTs[0]);
// OVR extensions
const char* const E_GL_OVR_multiview = "GL_OVR_multiview";
const char* const E_GL_OVR_multiview2 = "GL_OVR_multiview2";
const char* const OVR_multiview_EXTs[] = { E_GL_OVR_multiview, E_GL_OVR_multiview2 };
const int Num_OVR_multiview_EXTs = sizeof(OVR_multiview_EXTs) / sizeof(OVR_multiview_EXTs[0]);
// #line and #include
const char* const E_GL_GOOGLE_cpp_style_line_directive = "GL_GOOGLE_cpp_style_line_directive";
const char* const E_GL_GOOGLE_include_directive = "GL_GOOGLE_include_directive";
#ifdef AMD_EXTENSIONS
const char* const E_GL_AMD_shader_ballot = "GL_AMD_shader_ballot";
const char* const E_GL_AMD_shader_trinary_minmax = "GL_AMD_shader_trinary_minmax";
const char* const E_GL_AMD_shader_explicit_vertex_parameter = "GL_AMD_shader_explicit_vertex_parameter";
const char* const E_GL_AMD_gcn_shader = "GL_AMD_gcn_shader";
const char* const E_GL_AMD_gpu_shader_half_float = "GL_AMD_gpu_shader_half_float";
const char* const E_GL_AMD_texture_gather_bias_lod = "GL_AMD_texture_gather_bias_lod";
const char* const E_GL_AMD_gpu_shader_int16 = "GL_AMD_gpu_shader_int16";
const char* const E_GL_AMD_shader_image_load_store_lod = "GL_AMD_shader_image_load_store_lod";
const char* const E_GL_AMD_shader_fragment_mask = "GL_AMD_shader_fragment_mask";
const char* const E_GL_AMD_gpu_shader_half_float_fetch = "GL_AMD_gpu_shader_half_float_fetch";
#endif
#ifdef NV_EXTENSIONS
const char* const E_GL_NV_sample_mask_override_coverage = "GL_NV_sample_mask_override_coverage";
const char* const E_SPV_NV_geometry_shader_passthrough = "GL_NV_geometry_shader_passthrough";
const char* const E_GL_NV_viewport_array2 = "GL_NV_viewport_array2";
const char* const E_GL_NV_stereo_view_rendering = "GL_NV_stereo_view_rendering";
const char* const E_GL_NVX_multiview_per_view_attributes = "GL_NVX_multiview_per_view_attributes";
const char* const E_GL_NV_shader_atomic_int64 = "GL_NV_shader_atomic_int64";
const char* const E_GL_NV_conservative_raster_underestimation = "GL_NV_conservative_raster_underestimation";
const char* const E_GL_NV_shader_noperspective_interpolation = "GL_NV_shader_noperspective_interpolation";
const char* const E_GL_NV_shader_subgroup_partitioned = "GL_NV_shader_subgroup_partitioned";
// Arrays of extensions for the above viewportEXTs duplications
const char* const viewportEXTs[] = { E_GL_ARB_shader_viewport_layer_array, E_GL_NV_viewport_array2 };
const int Num_viewportEXTs = sizeof(viewportEXTs) / sizeof(viewportEXTs[0]);
#endif
// AEP
const char* const E_GL_ANDROID_extension_pack_es31a = "GL_ANDROID_extension_pack_es31a";
const char* const E_GL_KHR_blend_equation_advanced = "GL_KHR_blend_equation_advanced";
const char* const E_GL_OES_sample_variables = "GL_OES_sample_variables";
const char* const E_GL_OES_shader_image_atomic = "GL_OES_shader_image_atomic";
const char* const E_GL_OES_shader_multisample_interpolation = "GL_OES_shader_multisample_interpolation";
const char* const E_GL_OES_texture_storage_multisample_2d_array = "GL_OES_texture_storage_multisample_2d_array";
const char* const E_GL_EXT_geometry_shader = "GL_EXT_geometry_shader";
const char* const E_GL_EXT_geometry_point_size = "GL_EXT_geometry_point_size";
const char* const E_GL_EXT_gpu_shader5 = "GL_EXT_gpu_shader5";
const char* const E_GL_EXT_primitive_bounding_box = "GL_EXT_primitive_bounding_box";
const char* const E_GL_EXT_shader_io_blocks = "GL_EXT_shader_io_blocks";
const char* const E_GL_EXT_tessellation_shader = "GL_EXT_tessellation_shader";
const char* const E_GL_EXT_tessellation_point_size = "GL_EXT_tessellation_point_size";
const char* const E_GL_EXT_texture_buffer = "GL_EXT_texture_buffer";
const char* const E_GL_EXT_texture_cube_map_array = "GL_EXT_texture_cube_map_array";
// OES matching AEP
const char* const E_GL_OES_geometry_shader = "GL_OES_geometry_shader";
const char* const E_GL_OES_geometry_point_size = "GL_OES_geometry_point_size";
const char* const E_GL_OES_gpu_shader5 = "GL_OES_gpu_shader5";
const char* const E_GL_OES_primitive_bounding_box = "GL_OES_primitive_bounding_box";
const char* const E_GL_OES_shader_io_blocks = "GL_OES_shader_io_blocks";
const char* const E_GL_OES_tessellation_shader = "GL_OES_tessellation_shader";
const char* const E_GL_OES_tessellation_point_size = "GL_OES_tessellation_point_size";
const char* const E_GL_OES_texture_buffer = "GL_OES_texture_buffer";
const char* const E_GL_OES_texture_cube_map_array = "GL_OES_texture_cube_map_array";
// KHX
const char* const E_GL_KHX_shader_explicit_arithmetic_types = "GL_KHX_shader_explicit_arithmetic_types";
const char* const E_GL_KHX_shader_explicit_arithmetic_types_int8 = "GL_KHX_shader_explicit_arithmetic_types_int8";
const char* const E_GL_KHX_shader_explicit_arithmetic_types_int16 = "GL_KHX_shader_explicit_arithmetic_types_int16";
const char* const E_GL_KHX_shader_explicit_arithmetic_types_int32 = "GL_KHX_shader_explicit_arithmetic_types_int32";
const char* const E_GL_KHX_shader_explicit_arithmetic_types_int64 = "GL_KHX_shader_explicit_arithmetic_types_int64";
const char* const E_GL_KHX_shader_explicit_arithmetic_types_float16 = "GL_KHX_shader_explicit_arithmetic_types_float16";
const char* const E_GL_KHX_shader_explicit_arithmetic_types_float32 = "GL_KHX_shader_explicit_arithmetic_types_float32";
const char* const E_GL_KHX_shader_explicit_arithmetic_types_float64 = "GL_KHX_shader_explicit_arithmetic_types_float64";
// Arrays of extensions for the above AEP duplications
const char* const AEP_geometry_shader[] = { E_GL_EXT_geometry_shader, E_GL_OES_geometry_shader };
const int Num_AEP_geometry_shader = sizeof(AEP_geometry_shader)/sizeof(AEP_geometry_shader[0]);
const char* const AEP_geometry_point_size[] = { E_GL_EXT_geometry_point_size, E_GL_OES_geometry_point_size };
const int Num_AEP_geometry_point_size = sizeof(AEP_geometry_point_size)/sizeof(AEP_geometry_point_size[0]);
const char* const AEP_gpu_shader5[] = { E_GL_EXT_gpu_shader5, E_GL_OES_gpu_shader5 };
const int Num_AEP_gpu_shader5 = sizeof(AEP_gpu_shader5)/sizeof(AEP_gpu_shader5[0]);
const char* const AEP_primitive_bounding_box[] = { E_GL_EXT_primitive_bounding_box, E_GL_OES_primitive_bounding_box };
const int Num_AEP_primitive_bounding_box = sizeof(AEP_primitive_bounding_box)/sizeof(AEP_primitive_bounding_box[0]);
const char* const AEP_shader_io_blocks[] = { E_GL_EXT_shader_io_blocks, E_GL_OES_shader_io_blocks };
const int Num_AEP_shader_io_blocks = sizeof(AEP_shader_io_blocks)/sizeof(AEP_shader_io_blocks[0]);
const char* const AEP_tessellation_shader[] = { E_GL_EXT_tessellation_shader, E_GL_OES_tessellation_shader };
const int Num_AEP_tessellation_shader = sizeof(AEP_tessellation_shader)/sizeof(AEP_tessellation_shader[0]);
const char* const AEP_tessellation_point_size[] = { E_GL_EXT_tessellation_point_size, E_GL_OES_tessellation_point_size };
const int Num_AEP_tessellation_point_size = sizeof(AEP_tessellation_point_size)/sizeof(AEP_tessellation_point_size[0]);
const char* const AEP_texture_buffer[] = { E_GL_EXT_texture_buffer, E_GL_OES_texture_buffer };
const int Num_AEP_texture_buffer = sizeof(AEP_texture_buffer)/sizeof(AEP_texture_buffer[0]);
const char* const AEP_texture_cube_map_array[] = { E_GL_EXT_texture_cube_map_array, E_GL_OES_texture_cube_map_array };
const int Num_AEP_texture_cube_map_array = sizeof(AEP_texture_cube_map_array)/sizeof(AEP_texture_cube_map_array[0]);
} // end namespace glslang
#endif // _VERSIONS_INCLUDED_

View File

@ -0,0 +1,257 @@
//
// Copyright (C) 2017 LunarG, Inc.
// Copyright (C) 2018 Google, Inc.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of Google, Inc., nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
#include "attribute.h"
#include "../Include/intermediate.h"
#include "ParseHelper.h"
namespace glslang {
// extract integers out of attribute arguments stored in attribute aggregate
bool TAttributeArgs::getInt(int& value, int argNum) const
{
const TConstUnion* intConst = getConstUnion(EbtInt, argNum);
if (intConst == nullptr)
return false;
value = intConst->getIConst();
return true;
}
// extract strings out of attribute arguments stored in attribute aggregate.
// convert to lower case if converToLower is true (for case-insensitive compare convenience)
bool TAttributeArgs::getString(TString& value, int argNum, bool convertToLower) const
{
const TConstUnion* stringConst = getConstUnion(EbtString, argNum);
if (stringConst == nullptr)
return false;
value = *stringConst->getSConst();
// Convenience.
if (convertToLower)
std::transform(value.begin(), value.end(), value.begin(), ::tolower);
return true;
}
// How many arguments were supplied?
int TAttributeArgs::size() const
{
return args == nullptr ? 0 : (int)args->getSequence().size();
}
// Helper to get attribute const union. Returns nullptr on failure.
const TConstUnion* TAttributeArgs::getConstUnion(TBasicType basicType, int argNum) const
{
if (args == nullptr)
return nullptr;
if (argNum >= (int)args->getSequence().size())
return nullptr;
const TConstUnion* constVal = &args->getSequence()[argNum]->getAsConstantUnion()->getConstArray()[0];
if (constVal == nullptr || constVal->getType() != basicType)
return nullptr;
return constVal;
}
// Implementation of TParseContext parts of attributes
TAttributeType TParseContext::attributeFromName(const TString& name) const
{
if (name == "branch" || name == "dont_flatten")
return EatBranch;
else if (name == "flatten")
return EatFlatten;
else if (name == "unroll")
return EatUnroll;
else if (name == "loop" || name == "dont_unroll")
return EatLoop;
else if (name == "dependency_infinite")
return EatDependencyInfinite;
else if (name == "dependency_length")
return EatDependencyLength;
else
return EatNone;
}
// Make an initial leaf for the grammar from a no-argument attribute
TAttributes* TParseContext::makeAttributes(const TString& identifier) const
{
TAttributes *attributes = nullptr;
attributes = NewPoolObject(attributes);
TAttributeArgs args = { attributeFromName(identifier), nullptr };
attributes->push_back(args);
return attributes;
}
// Make an initial leaf for the grammar from a one-argument attribute
TAttributes* TParseContext::makeAttributes(const TString& identifier, TIntermNode* node) const
{
TAttributes *attributes = nullptr;
attributes = NewPoolObject(attributes);
// for now, node is always a simple single expression, but other code expects
// a list, so make it so
TIntermAggregate* agg = intermediate.makeAggregate(node);
TAttributeArgs args = { attributeFromName(identifier), agg };
attributes->push_back(args);
return attributes;
}
// Merge two sets of attributes into a single set.
// The second argument is destructively consumed.
TAttributes* TParseContext::mergeAttributes(TAttributes* attr1, TAttributes* attr2) const
{
attr1->splice(attr1->end(), *attr2);
return attr1;
}
//
// Selection attributes
//
void TParseContext::handleSelectionAttributes(const TAttributes& attributes, TIntermNode* node)
{
TIntermSelection* selection = node->getAsSelectionNode();
if (selection == nullptr)
return;
for (auto it = attributes.begin(); it != attributes.end(); ++it) {
if (it->size() > 0) {
warn(node->getLoc(), "attribute with arguments not recognized, skipping", "", "");
continue;
}
switch (it->name) {
case EatFlatten:
selection->setFlatten();
break;
case EatBranch:
selection->setDontFlatten();
break;
default:
warn(node->getLoc(), "attribute does not apply to a selection", "", "");
break;
}
}
}
//
// Switch attributes
//
void TParseContext::handleSwitchAttributes(const TAttributes& attributes, TIntermNode* node)
{
TIntermSwitch* selection = node->getAsSwitchNode();
if (selection == nullptr)
return;
for (auto it = attributes.begin(); it != attributes.end(); ++it) {
if (it->size() > 0) {
warn(node->getLoc(), "attribute with arguments not recognized, skipping", "", "");
continue;
}
switch (it->name) {
case EatFlatten:
selection->setFlatten();
break;
case EatBranch:
selection->setDontFlatten();
break;
default:
warn(node->getLoc(), "attribute does not apply to a switch", "", "");
break;
}
}
}
//
// Loop attributes
//
void TParseContext::handleLoopAttributes(const TAttributes& attributes, TIntermNode* node)
{
TIntermLoop* loop = node->getAsLoopNode();
if (loop == nullptr) {
// the actual loop might be part of a sequence
TIntermAggregate* agg = node->getAsAggregate();
if (agg == nullptr)
return;
for (auto it = agg->getSequence().begin(); it != agg->getSequence().end(); ++it) {
loop = (*it)->getAsLoopNode();
if (loop != nullptr)
break;
}
if (loop == nullptr)
return;
}
for (auto it = attributes.begin(); it != attributes.end(); ++it) {
if (it->name != EatDependencyLength && it->size() > 0) {
warn(node->getLoc(), "attribute with arguments not recognized, skipping", "", "");
continue;
}
int value;
switch (it->name) {
case EatUnroll:
loop->setUnroll();
break;
case EatLoop:
loop->setDontUnroll();
break;
case EatDependencyInfinite:
loop->setLoopDependency(TIntermLoop::dependencyInfinite);
break;
case EatDependencyLength:
if (it->size() == 1 && it->getInt(value)) {
if (value <= 0)
error(node->getLoc(), "must be positive", "dependency_length", "");
loop->setLoopDependency(value);
} else
warn(node->getLoc(), "expected a single integer argument", "dependency_length", "");
break;
default:
warn(node->getLoc(), "attribute does not apply to a loop", "", "");
break;
}
}
}
} // end namespace glslang

View File

@ -0,0 +1,102 @@
//
// Copyright (C) 2017 LunarG, Inc.
// Copyright (C) 2018 Google, Inc.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
#ifndef _ATTRIBUTE_INCLUDED_
#define _ATTRIBUTE_INCLUDED_
#include "../Include/Common.h"
#include "../Include/ConstantUnion.h"
namespace glslang {
enum TAttributeType {
EatNone,
EatAllow_uav_condition,
EatBranch,
EatCall,
EatDomain,
EatEarlyDepthStencil,
EatFastOpt,
EatFlatten,
EatForceCase,
EatInstance,
EatMaxTessFactor,
EatNumThreads,
EatMaxVertexCount,
EatOutputControlPoints,
EatOutputTopology,
EatPartitioning,
EatPatchConstantFunc,
EatPatchSize,
EatUnroll,
EatLoop,
EatBinding,
EatGlobalBinding,
EatLocation,
EatInputAttachment,
EatBuiltIn,
EatPushConstant,
EatConstantId,
EatDependencyInfinite,
EatDependencyLength
};
class TIntermAggregate;
struct TAttributeArgs {
TAttributeType name;
const TIntermAggregate* args;
// Obtain attribute as integer
// Return false if it cannot be obtained
bool getInt(int& value, int argNum = 0) const;
// Obtain attribute as string, with optional to-lower transform
// Return false if it cannot be obtained
bool getString(TString& value, int argNum = 0, bool convertToLower = true) const;
// How many arguments were provided to the attribute?
int size() const;
protected:
const TConstUnion* getConstUnion(TBasicType basicType, int argNum) const;
};
typedef TList<TAttributeArgs> TAttributes;
} // end namespace glslang
#endif // _ATTRIBUTE_INCLUDED_

View File

@ -0,0 +1,214 @@
/*
** Copyright (c) 2013 The Khronos Group Inc.
**
** Permission is hereby granted, free of charge, to any person obtaining a
** copy of this software and/or associated documentation files (the
** "Materials"), to deal in the Materials without restriction, including
** without limitation the rights to use, copy, modify, merge, publish,
** distribute, sublicense, and/or sell copies of the Materials, and to
** permit persons to whom the Materials are furnished to do so, subject to
** the following conditions:
**
** The above copyright notice and this permission notice shall be included
** in all copies or substantial portions of the Materials.
**
** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
** EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
** MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
** IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
** CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
** TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
** MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
*/
#pragma once
#define GL_FLOAT 0x1406
#define GL_FLOAT_VEC2 0x8B50
#define GL_FLOAT_VEC3 0x8B51
#define GL_FLOAT_VEC4 0x8B52
#define GL_DOUBLE 0x140A
#define GL_DOUBLE_VEC2 0x8FFC
#define GL_DOUBLE_VEC3 0x8FFD
#define GL_DOUBLE_VEC4 0x8FFE
#define GL_INT 0x1404
#define GL_INT_VEC2 0x8B53
#define GL_INT_VEC3 0x8B54
#define GL_INT_VEC4 0x8B55
#define GL_UNSIGNED_INT 0x1405
#define GL_UNSIGNED_INT_VEC2 0x8DC6
#define GL_UNSIGNED_INT_VEC3 0x8DC7
#define GL_UNSIGNED_INT_VEC4 0x8DC8
#define GL_INT64_ARB 0x140E
#define GL_INT64_VEC2_ARB 0x8FE9
#define GL_INT64_VEC3_ARB 0x8FEA
#define GL_INT64_VEC4_ARB 0x8FEB
#define GL_UNSIGNED_INT64_ARB 0x140F
#define GL_UNSIGNED_INT64_VEC2_ARB 0x8FE5
#define GL_UNSIGNED_INT64_VEC3_ARB 0x8FE6
#define GL_UNSIGNED_INT64_VEC4_ARB 0x8FE7
#define GL_BOOL 0x8B56
#define GL_BOOL_VEC2 0x8B57
#define GL_BOOL_VEC3 0x8B58
#define GL_BOOL_VEC4 0x8B59
#define GL_FLOAT_MAT2 0x8B5A
#define GL_FLOAT_MAT3 0x8B5B
#define GL_FLOAT_MAT4 0x8B5C
#define GL_FLOAT_MAT2x3 0x8B65
#define GL_FLOAT_MAT2x4 0x8B66
#define GL_FLOAT_MAT3x2 0x8B67
#define GL_FLOAT_MAT3x4 0x8B68
#define GL_FLOAT_MAT4x2 0x8B69
#define GL_FLOAT_MAT4x3 0x8B6A
#define GL_DOUBLE_MAT2 0x8F46
#define GL_DOUBLE_MAT3 0x8F47
#define GL_DOUBLE_MAT4 0x8F48
#define GL_DOUBLE_MAT2x3 0x8F49
#define GL_DOUBLE_MAT2x4 0x8F4A
#define GL_DOUBLE_MAT3x2 0x8F4B
#define GL_DOUBLE_MAT3x4 0x8F4C
#define GL_DOUBLE_MAT4x2 0x8F4D
#define GL_DOUBLE_MAT4x3 0x8F4E
#ifdef AMD_EXTENSIONS
// Those constants are borrowed from extension NV_gpu_shader5
#define GL_FLOAT16_NV 0x8FF8
#define GL_FLOAT16_VEC2_NV 0x8FF9
#define GL_FLOAT16_VEC3_NV 0x8FFA
#define GL_FLOAT16_VEC4_NV 0x8FFB
#define GL_FLOAT16_MAT2_AMD 0x91C5
#define GL_FLOAT16_MAT3_AMD 0x91C6
#define GL_FLOAT16_MAT4_AMD 0x91C7
#define GL_FLOAT16_MAT2x3_AMD 0x91C8
#define GL_FLOAT16_MAT2x4_AMD 0x91C9
#define GL_FLOAT16_MAT3x2_AMD 0x91CA
#define GL_FLOAT16_MAT3x4_AMD 0x91CB
#define GL_FLOAT16_MAT4x2_AMD 0x91CC
#define GL_FLOAT16_MAT4x3_AMD 0x91CD
#endif
#define GL_SAMPLER_1D 0x8B5D
#define GL_SAMPLER_2D 0x8B5E
#define GL_SAMPLER_3D 0x8B5F
#define GL_SAMPLER_CUBE 0x8B60
#define GL_SAMPLER_BUFFER 0x8DC2
#define GL_SAMPLER_1D_ARRAY 0x8DC0
#define GL_SAMPLER_2D_ARRAY 0x8DC1
#define GL_SAMPLER_1D_ARRAY_SHADOW 0x8DC3
#define GL_SAMPLER_2D_ARRAY_SHADOW 0x8DC4
#define GL_SAMPLER_CUBE_SHADOW 0x8DC5
#define GL_SAMPLER_1D_SHADOW 0x8B61
#define GL_SAMPLER_2D_SHADOW 0x8B62
#define GL_SAMPLER_2D_RECT 0x8B63
#define GL_SAMPLER_2D_RECT_SHADOW 0x8B64
#define GL_SAMPLER_2D_MULTISAMPLE 0x9108
#define GL_SAMPLER_2D_MULTISAMPLE_ARRAY 0x910B
#define GL_SAMPLER_CUBE_MAP_ARRAY 0x900C
#define GL_SAMPLER_CUBE_MAP_ARRAY_SHADOW 0x900D
#define GL_SAMPLER_CUBE_MAP_ARRAY_ARB 0x900C
#define GL_SAMPLER_CUBE_MAP_ARRAY_SHADOW_ARB 0x900D
#ifdef AMD_EXTENSIONS
#define GL_FLOAT16_SAMPLER_1D_AMD 0x91CE
#define GL_FLOAT16_SAMPLER_2D_AMD 0x91CF
#define GL_FLOAT16_SAMPLER_3D_AMD 0x91D0
#define GL_FLOAT16_SAMPLER_CUBE_AMD 0x91D1
#define GL_FLOAT16_SAMPLER_2D_RECT_AMD 0x91D2
#define GL_FLOAT16_SAMPLER_1D_ARRAY_AMD 0x91D3
#define GL_FLOAT16_SAMPLER_2D_ARRAY_AMD 0x91D4
#define GL_FLOAT16_SAMPLER_CUBE_MAP_ARRAY_AMD 0x91D5
#define GL_FLOAT16_SAMPLER_BUFFER_AMD 0x91D6
#define GL_FLOAT16_SAMPLER_2D_MULTISAMPLE_AMD 0x91D7
#define GL_FLOAT16_SAMPLER_2D_MULTISAMPLE_ARRAY_AMD 0x91D8
#define GL_FLOAT16_SAMPLER_1D_SHADOW_AMD 0x91D9
#define GL_FLOAT16_SAMPLER_2D_SHADOW_AMD 0x91DA
#define GL_FLOAT16_SAMPLER_2D_RECT_SHADOW_AMD 0x91DB
#define GL_FLOAT16_SAMPLER_1D_ARRAY_SHADOW_AMD 0x91DC
#define GL_FLOAT16_SAMPLER_2D_ARRAY_SHADOW_AMD 0x91DD
#define GL_FLOAT16_SAMPLER_CUBE_SHADOW_AMD 0x91DE
#define GL_FLOAT16_SAMPLER_CUBE_MAP_ARRAY_SHADOW_AMD 0x91DF
#define GL_FLOAT16_IMAGE_1D_AMD 0x91E0
#define GL_FLOAT16_IMAGE_2D_AMD 0x91E1
#define GL_FLOAT16_IMAGE_3D_AMD 0x91E2
#define GL_FLOAT16_IMAGE_2D_RECT_AMD 0x91E3
#define GL_FLOAT16_IMAGE_CUBE_AMD 0x91E4
#define GL_FLOAT16_IMAGE_1D_ARRAY_AMD 0x91E5
#define GL_FLOAT16_IMAGE_2D_ARRAY_AMD 0x91E6
#define GL_FLOAT16_IMAGE_CUBE_MAP_ARRAY_AMD 0x91E7
#define GL_FLOAT16_IMAGE_BUFFER_AMD 0x91E8
#define GL_FLOAT16_IMAGE_2D_MULTISAMPLE_AMD 0x91E9
#define GL_FLOAT16_IMAGE_2D_MULTISAMPLE_ARRAY_AMD 0x91EA
#endif
#define GL_INT_SAMPLER_1D 0x8DC9
#define GL_INT_SAMPLER_2D 0x8DCA
#define GL_INT_SAMPLER_3D 0x8DCB
#define GL_INT_SAMPLER_CUBE 0x8DCC
#define GL_INT_SAMPLER_1D_ARRAY 0x8DCE
#define GL_INT_SAMPLER_2D_ARRAY 0x8DCF
#define GL_INT_SAMPLER_2D_RECT 0x8DCD
#define GL_INT_SAMPLER_BUFFER 0x8DD0
#define GL_INT_SAMPLER_2D_MULTISAMPLE 0x9109
#define GL_INT_SAMPLER_2D_MULTISAMPLE_ARRAY 0x910C
#define GL_INT_SAMPLER_CUBE_MAP_ARRAY 0x900E
#define GL_INT_SAMPLER_CUBE_MAP_ARRAY_ARB 0x900E
#define GL_UNSIGNED_INT_SAMPLER_1D 0x8DD1
#define GL_UNSIGNED_INT_SAMPLER_2D 0x8DD2
#define GL_UNSIGNED_INT_SAMPLER_3D 0x8DD3
#define GL_UNSIGNED_INT_SAMPLER_CUBE 0x8DD4
#define GL_UNSIGNED_INT_SAMPLER_1D_ARRAY 0x8DD6
#define GL_UNSIGNED_INT_SAMPLER_2D_ARRAY 0x8DD7
#define GL_UNSIGNED_INT_SAMPLER_2D_RECT 0x8DD5
#define GL_UNSIGNED_INT_SAMPLER_BUFFER 0x8DD8
#define GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE_ARRAY 0x910D
#define GL_UNSIGNED_INT_SAMPLER_CUBE_MAP_ARRAY 0x900F
#define GL_UNSIGNED_INT_SAMPLER_CUBE_MAP_ARRAY_ARB 0x900F
#define GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE 0x910A
#define GL_IMAGE_1D 0x904C
#define GL_IMAGE_2D 0x904D
#define GL_IMAGE_3D 0x904E
#define GL_IMAGE_2D_RECT 0x904F
#define GL_IMAGE_CUBE 0x9050
#define GL_IMAGE_BUFFER 0x9051
#define GL_IMAGE_1D_ARRAY 0x9052
#define GL_IMAGE_2D_ARRAY 0x9053
#define GL_IMAGE_CUBE_MAP_ARRAY 0x9054
#define GL_IMAGE_2D_MULTISAMPLE 0x9055
#define GL_IMAGE_2D_MULTISAMPLE_ARRAY 0x9056
#define GL_INT_IMAGE_1D 0x9057
#define GL_INT_IMAGE_2D 0x9058
#define GL_INT_IMAGE_3D 0x9059
#define GL_INT_IMAGE_2D_RECT 0x905A
#define GL_INT_IMAGE_CUBE 0x905B
#define GL_INT_IMAGE_BUFFER 0x905C
#define GL_INT_IMAGE_1D_ARRAY 0x905D
#define GL_INT_IMAGE_2D_ARRAY 0x905E
#define GL_INT_IMAGE_CUBE_MAP_ARRAY 0x905F
#define GL_INT_IMAGE_2D_MULTISAMPLE 0x9060
#define GL_INT_IMAGE_2D_MULTISAMPLE_ARRAY 0x9061
#define GL_UNSIGNED_INT_IMAGE_1D 0x9062
#define GL_UNSIGNED_INT_IMAGE_2D 0x9063
#define GL_UNSIGNED_INT_IMAGE_3D 0x9064
#define GL_UNSIGNED_INT_IMAGE_2D_RECT 0x9065
#define GL_UNSIGNED_INT_IMAGE_CUBE 0x9066
#define GL_UNSIGNED_INT_IMAGE_BUFFER 0x9067
#define GL_UNSIGNED_INT_IMAGE_1D_ARRAY 0x9068
#define GL_UNSIGNED_INT_IMAGE_2D_ARRAY 0x9069
#define GL_UNSIGNED_INT_IMAGE_CUBE_MAP_ARRAY 0x906A
#define GL_UNSIGNED_INT_IMAGE_2D_MULTISAMPLE 0x906B
#define GL_UNSIGNED_INT_IMAGE_2D_MULTISAMPLE_ARRAY 0x906C
#define GL_UNSIGNED_INT_ATOMIC_COUNTER 0x92DB

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,489 @@
/* A Bison parser, made by GNU Bison 3.0. */
/* Bison interface for Yacc-like parsers in C
Copyright (C) 1984, 1989-1990, 2000-2013 Free Software Foundation, Inc.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>. */
/* As a special exception, you may create a larger work that contains
part or all of the Bison parser skeleton and distribute that work
under terms of your choice, so long as that work isn't itself a
parser generator using the skeleton or a modified version thereof
as a parser skeleton. Alternatively, if you modify or redistribute
the parser skeleton itself, you may (at your option) remove this
special exception, which will cause the skeleton and the resulting
Bison output files to be licensed under the GNU General Public
License without this special exception.
This special exception was added by the Free Software Foundation in
version 2.2 of Bison. */
#ifndef YY_YY_MACHINEINDEPENDENT_GLSLANG_TAB_CPP_H_INCLUDED
# define YY_YY_MACHINEINDEPENDENT_GLSLANG_TAB_CPP_H_INCLUDED
/* Debug traces. */
#ifndef YYDEBUG
# define YYDEBUG 1
#endif
#if YYDEBUG
extern int yydebug;
#endif
/* Token type. */
#ifndef YYTOKENTYPE
# define YYTOKENTYPE
enum yytokentype
{
ATTRIBUTE = 258,
VARYING = 259,
FLOAT16_T = 260,
FLOAT = 261,
FLOAT32_T = 262,
DOUBLE = 263,
FLOAT64_T = 264,
CONST = 265,
BOOL = 266,
INT = 267,
UINT = 268,
INT64_T = 269,
UINT64_T = 270,
INT32_T = 271,
UINT32_T = 272,
INT16_T = 273,
UINT16_T = 274,
INT8_T = 275,
UINT8_T = 276,
BREAK = 277,
CONTINUE = 278,
DO = 279,
ELSE = 280,
FOR = 281,
IF = 282,
DISCARD = 283,
RETURN = 284,
SWITCH = 285,
CASE = 286,
DEFAULT = 287,
SUBROUTINE = 288,
BVEC2 = 289,
BVEC3 = 290,
BVEC4 = 291,
IVEC2 = 292,
IVEC3 = 293,
IVEC4 = 294,
UVEC2 = 295,
UVEC3 = 296,
UVEC4 = 297,
I64VEC2 = 298,
I64VEC3 = 299,
I64VEC4 = 300,
U64VEC2 = 301,
U64VEC3 = 302,
U64VEC4 = 303,
I32VEC2 = 304,
I32VEC3 = 305,
I32VEC4 = 306,
U32VEC2 = 307,
U32VEC3 = 308,
U32VEC4 = 309,
I16VEC2 = 310,
I16VEC3 = 311,
I16VEC4 = 312,
U16VEC2 = 313,
U16VEC3 = 314,
U16VEC4 = 315,
I8VEC2 = 316,
I8VEC3 = 317,
I8VEC4 = 318,
U8VEC2 = 319,
U8VEC3 = 320,
U8VEC4 = 321,
VEC2 = 322,
VEC3 = 323,
VEC4 = 324,
MAT2 = 325,
MAT3 = 326,
MAT4 = 327,
CENTROID = 328,
IN = 329,
OUT = 330,
INOUT = 331,
UNIFORM = 332,
PATCH = 333,
SAMPLE = 334,
BUFFER = 335,
SHARED = 336,
NONUNIFORM = 337,
COHERENT = 338,
VOLATILE = 339,
RESTRICT = 340,
READONLY = 341,
WRITEONLY = 342,
DVEC2 = 343,
DVEC3 = 344,
DVEC4 = 345,
DMAT2 = 346,
DMAT3 = 347,
DMAT4 = 348,
F16VEC2 = 349,
F16VEC3 = 350,
F16VEC4 = 351,
F16MAT2 = 352,
F16MAT3 = 353,
F16MAT4 = 354,
F32VEC2 = 355,
F32VEC3 = 356,
F32VEC4 = 357,
F32MAT2 = 358,
F32MAT3 = 359,
F32MAT4 = 360,
F64VEC2 = 361,
F64VEC3 = 362,
F64VEC4 = 363,
F64MAT2 = 364,
F64MAT3 = 365,
F64MAT4 = 366,
NOPERSPECTIVE = 367,
FLAT = 368,
SMOOTH = 369,
LAYOUT = 370,
__EXPLICITINTERPAMD = 371,
MAT2X2 = 372,
MAT2X3 = 373,
MAT2X4 = 374,
MAT3X2 = 375,
MAT3X3 = 376,
MAT3X4 = 377,
MAT4X2 = 378,
MAT4X3 = 379,
MAT4X4 = 380,
DMAT2X2 = 381,
DMAT2X3 = 382,
DMAT2X4 = 383,
DMAT3X2 = 384,
DMAT3X3 = 385,
DMAT3X4 = 386,
DMAT4X2 = 387,
DMAT4X3 = 388,
DMAT4X4 = 389,
F16MAT2X2 = 390,
F16MAT2X3 = 391,
F16MAT2X4 = 392,
F16MAT3X2 = 393,
F16MAT3X3 = 394,
F16MAT3X4 = 395,
F16MAT4X2 = 396,
F16MAT4X3 = 397,
F16MAT4X4 = 398,
F32MAT2X2 = 399,
F32MAT2X3 = 400,
F32MAT2X4 = 401,
F32MAT3X2 = 402,
F32MAT3X3 = 403,
F32MAT3X4 = 404,
F32MAT4X2 = 405,
F32MAT4X3 = 406,
F32MAT4X4 = 407,
F64MAT2X2 = 408,
F64MAT2X3 = 409,
F64MAT2X4 = 410,
F64MAT3X2 = 411,
F64MAT3X3 = 412,
F64MAT3X4 = 413,
F64MAT4X2 = 414,
F64MAT4X3 = 415,
F64MAT4X4 = 416,
ATOMIC_UINT = 417,
SAMPLER1D = 418,
SAMPLER2D = 419,
SAMPLER3D = 420,
SAMPLERCUBE = 421,
SAMPLER1DSHADOW = 422,
SAMPLER2DSHADOW = 423,
SAMPLERCUBESHADOW = 424,
SAMPLER1DARRAY = 425,
SAMPLER2DARRAY = 426,
SAMPLER1DARRAYSHADOW = 427,
SAMPLER2DARRAYSHADOW = 428,
ISAMPLER1D = 429,
ISAMPLER2D = 430,
ISAMPLER3D = 431,
ISAMPLERCUBE = 432,
ISAMPLER1DARRAY = 433,
ISAMPLER2DARRAY = 434,
USAMPLER1D = 435,
USAMPLER2D = 436,
USAMPLER3D = 437,
USAMPLERCUBE = 438,
USAMPLER1DARRAY = 439,
USAMPLER2DARRAY = 440,
SAMPLER2DRECT = 441,
SAMPLER2DRECTSHADOW = 442,
ISAMPLER2DRECT = 443,
USAMPLER2DRECT = 444,
SAMPLERBUFFER = 445,
ISAMPLERBUFFER = 446,
USAMPLERBUFFER = 447,
SAMPLERCUBEARRAY = 448,
SAMPLERCUBEARRAYSHADOW = 449,
ISAMPLERCUBEARRAY = 450,
USAMPLERCUBEARRAY = 451,
SAMPLER2DMS = 452,
ISAMPLER2DMS = 453,
USAMPLER2DMS = 454,
SAMPLER2DMSARRAY = 455,
ISAMPLER2DMSARRAY = 456,
USAMPLER2DMSARRAY = 457,
SAMPLEREXTERNALOES = 458,
F16SAMPLER1D = 459,
F16SAMPLER2D = 460,
F16SAMPLER3D = 461,
F16SAMPLER2DRECT = 462,
F16SAMPLERCUBE = 463,
F16SAMPLER1DARRAY = 464,
F16SAMPLER2DARRAY = 465,
F16SAMPLERCUBEARRAY = 466,
F16SAMPLERBUFFER = 467,
F16SAMPLER2DMS = 468,
F16SAMPLER2DMSARRAY = 469,
F16SAMPLER1DSHADOW = 470,
F16SAMPLER2DSHADOW = 471,
F16SAMPLER1DARRAYSHADOW = 472,
F16SAMPLER2DARRAYSHADOW = 473,
F16SAMPLER2DRECTSHADOW = 474,
F16SAMPLERCUBESHADOW = 475,
F16SAMPLERCUBEARRAYSHADOW = 476,
SAMPLER = 477,
SAMPLERSHADOW = 478,
TEXTURE1D = 479,
TEXTURE2D = 480,
TEXTURE3D = 481,
TEXTURECUBE = 482,
TEXTURE1DARRAY = 483,
TEXTURE2DARRAY = 484,
ITEXTURE1D = 485,
ITEXTURE2D = 486,
ITEXTURE3D = 487,
ITEXTURECUBE = 488,
ITEXTURE1DARRAY = 489,
ITEXTURE2DARRAY = 490,
UTEXTURE1D = 491,
UTEXTURE2D = 492,
UTEXTURE3D = 493,
UTEXTURECUBE = 494,
UTEXTURE1DARRAY = 495,
UTEXTURE2DARRAY = 496,
TEXTURE2DRECT = 497,
ITEXTURE2DRECT = 498,
UTEXTURE2DRECT = 499,
TEXTUREBUFFER = 500,
ITEXTUREBUFFER = 501,
UTEXTUREBUFFER = 502,
TEXTURECUBEARRAY = 503,
ITEXTURECUBEARRAY = 504,
UTEXTURECUBEARRAY = 505,
TEXTURE2DMS = 506,
ITEXTURE2DMS = 507,
UTEXTURE2DMS = 508,
TEXTURE2DMSARRAY = 509,
ITEXTURE2DMSARRAY = 510,
UTEXTURE2DMSARRAY = 511,
F16TEXTURE1D = 512,
F16TEXTURE2D = 513,
F16TEXTURE3D = 514,
F16TEXTURE2DRECT = 515,
F16TEXTURECUBE = 516,
F16TEXTURE1DARRAY = 517,
F16TEXTURE2DARRAY = 518,
F16TEXTURECUBEARRAY = 519,
F16TEXTUREBUFFER = 520,
F16TEXTURE2DMS = 521,
F16TEXTURE2DMSARRAY = 522,
SUBPASSINPUT = 523,
SUBPASSINPUTMS = 524,
ISUBPASSINPUT = 525,
ISUBPASSINPUTMS = 526,
USUBPASSINPUT = 527,
USUBPASSINPUTMS = 528,
F16SUBPASSINPUT = 529,
F16SUBPASSINPUTMS = 530,
IMAGE1D = 531,
IIMAGE1D = 532,
UIMAGE1D = 533,
IMAGE2D = 534,
IIMAGE2D = 535,
UIMAGE2D = 536,
IMAGE3D = 537,
IIMAGE3D = 538,
UIMAGE3D = 539,
IMAGE2DRECT = 540,
IIMAGE2DRECT = 541,
UIMAGE2DRECT = 542,
IMAGECUBE = 543,
IIMAGECUBE = 544,
UIMAGECUBE = 545,
IMAGEBUFFER = 546,
IIMAGEBUFFER = 547,
UIMAGEBUFFER = 548,
IMAGE1DARRAY = 549,
IIMAGE1DARRAY = 550,
UIMAGE1DARRAY = 551,
IMAGE2DARRAY = 552,
IIMAGE2DARRAY = 553,
UIMAGE2DARRAY = 554,
IMAGECUBEARRAY = 555,
IIMAGECUBEARRAY = 556,
UIMAGECUBEARRAY = 557,
IMAGE2DMS = 558,
IIMAGE2DMS = 559,
UIMAGE2DMS = 560,
IMAGE2DMSARRAY = 561,
IIMAGE2DMSARRAY = 562,
UIMAGE2DMSARRAY = 563,
F16IMAGE1D = 564,
F16IMAGE2D = 565,
F16IMAGE3D = 566,
F16IMAGE2DRECT = 567,
F16IMAGECUBE = 568,
F16IMAGE1DARRAY = 569,
F16IMAGE2DARRAY = 570,
F16IMAGECUBEARRAY = 571,
F16IMAGEBUFFER = 572,
F16IMAGE2DMS = 573,
F16IMAGE2DMSARRAY = 574,
STRUCT = 575,
VOID = 576,
WHILE = 577,
IDENTIFIER = 578,
TYPE_NAME = 579,
FLOATCONSTANT = 580,
DOUBLECONSTANT = 581,
INT16CONSTANT = 582,
UINT16CONSTANT = 583,
INT32CONSTANT = 584,
UINT32CONSTANT = 585,
INTCONSTANT = 586,
UINTCONSTANT = 587,
INT64CONSTANT = 588,
UINT64CONSTANT = 589,
BOOLCONSTANT = 590,
FLOAT16CONSTANT = 591,
LEFT_OP = 592,
RIGHT_OP = 593,
INC_OP = 594,
DEC_OP = 595,
LE_OP = 596,
GE_OP = 597,
EQ_OP = 598,
NE_OP = 599,
AND_OP = 600,
OR_OP = 601,
XOR_OP = 602,
MUL_ASSIGN = 603,
DIV_ASSIGN = 604,
ADD_ASSIGN = 605,
MOD_ASSIGN = 606,
LEFT_ASSIGN = 607,
RIGHT_ASSIGN = 608,
AND_ASSIGN = 609,
XOR_ASSIGN = 610,
OR_ASSIGN = 611,
SUB_ASSIGN = 612,
LEFT_PAREN = 613,
RIGHT_PAREN = 614,
LEFT_BRACKET = 615,
RIGHT_BRACKET = 616,
LEFT_BRACE = 617,
RIGHT_BRACE = 618,
DOT = 619,
COMMA = 620,
COLON = 621,
EQUAL = 622,
SEMICOLON = 623,
BANG = 624,
DASH = 625,
TILDE = 626,
PLUS = 627,
STAR = 628,
SLASH = 629,
PERCENT = 630,
LEFT_ANGLE = 631,
RIGHT_ANGLE = 632,
VERTICAL_BAR = 633,
CARET = 634,
AMPERSAND = 635,
QUESTION = 636,
INVARIANT = 637,
PRECISE = 638,
HIGH_PRECISION = 639,
MEDIUM_PRECISION = 640,
LOW_PRECISION = 641,
PRECISION = 642,
PACKED = 643,
RESOURCE = 644,
SUPERP = 645
};
#endif
/* Value type. */
#if ! defined YYSTYPE && ! defined YYSTYPE_IS_DECLARED
typedef union YYSTYPE YYSTYPE;
union YYSTYPE
{
#line 70 "MachineIndependent/glslang.y" /* yacc.c:1909 */
struct {
glslang::TSourceLoc loc;
union {
glslang::TString *string;
int i;
unsigned int u;
long long i64;
unsigned long long u64;
bool b;
double d;
};
glslang::TSymbol* symbol;
} lex;
struct {
glslang::TSourceLoc loc;
glslang::TOperator op;
union {
TIntermNode* intermNode;
glslang::TIntermNodePair nodePair;
glslang::TIntermTyped* intermTypedNode;
glslang::TAttributes* attributes;
};
union {
glslang::TPublicType type;
glslang::TFunction* function;
glslang::TParameter param;
glslang::TTypeLoc typeLine;
glslang::TTypeList* typeList;
glslang::TArraySizes* arraySizes;
glslang::TIdentifierList* identifierList;
};
} interm;
#line 480 "MachineIndependent/glslang_tab.cpp.h" /* yacc.c:1909 */
};
# define YYSTYPE_IS_TRIVIAL 1
# define YYSTYPE_IS_DECLARED 1
#endif
int yyparse (glslang::TParseContext* pParseContext);
#endif /* !YY_YY_MACHINEINDEPENDENT_GLSLANG_TAB_CPP_H_INCLUDED */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,808 @@
//
// Copyright (C) 2016-2017 LunarG, Inc.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
#include "../Include/Common.h"
#include "../Include/InfoSink.h"
#include "iomapper.h"
#include "LiveTraverser.h"
#include "localintermediate.h"
#include "gl_types.h"
#include <unordered_set>
#include <unordered_map>
//
// Map IO bindings.
//
// High-level algorithm for one stage:
//
// 1. Traverse all code (live+dead) to find the explicitly provided bindings.
//
// 2. Traverse (just) the live code to determine which non-provided bindings
// require auto-numbering. We do not auto-number dead ones.
//
// 3. Traverse all the code to apply the bindings:
// a. explicitly given bindings are offset according to their type
// b. implicit live bindings are auto-numbered into the holes, using
// any open binding slot.
// c. implicit dead bindings are left un-bound.
//
namespace glslang {
struct TVarEntryInfo
{
int id;
TIntermSymbol* symbol;
bool live;
int newBinding;
int newSet;
int newLocation;
int newComponent;
int newIndex;
struct TOrderById
{
inline bool operator()(const TVarEntryInfo& l, const TVarEntryInfo& r)
{
return l.id < r.id;
}
};
struct TOrderByPriority
{
// ordering:
// 1) has both binding and set
// 2) has binding but no set
// 3) has no binding but set
// 4) has no binding and no set
inline bool operator()(const TVarEntryInfo& l, const TVarEntryInfo& r)
{
const TQualifier& lq = l.symbol->getQualifier();
const TQualifier& rq = r.symbol->getQualifier();
// simple rules:
// has binding gives 2 points
// has set gives 1 point
// who has the most points is more important.
int lPoints = (lq.hasBinding() ? 2 : 0) + (lq.hasSet() ? 1 : 0);
int rPoints = (rq.hasBinding() ? 2 : 0) + (rq.hasSet() ? 1 : 0);
if (lPoints == rPoints)
return l.id < r.id;
return lPoints > rPoints;
}
};
};
typedef std::vector<TVarEntryInfo> TVarLiveMap;
class TVarGatherTraverser : public TLiveTraverser
{
public:
TVarGatherTraverser(const TIntermediate& i, bool traverseDeadCode, TVarLiveMap& inList, TVarLiveMap& outList, TVarLiveMap& uniformList)
: TLiveTraverser(i, traverseDeadCode, true, true, false)
, inputList(inList)
, outputList(outList)
, uniformList(uniformList)
{
}
virtual void visitSymbol(TIntermSymbol* base)
{
TVarLiveMap* target = nullptr;
if (base->getQualifier().storage == EvqVaryingIn)
target = &inputList;
else if (base->getQualifier().storage == EvqVaryingOut)
target = &outputList;
else if (base->getQualifier().isUniformOrBuffer() && !base->getQualifier().layoutPushConstant)
target = &uniformList;
if (target) {
TVarEntryInfo ent = { base->getId(), base, !traverseAll };
TVarLiveMap::iterator at = std::lower_bound(target->begin(), target->end(), ent, TVarEntryInfo::TOrderById());
if (at != target->end() && at->id == ent.id)
at->live = at->live || !traverseAll; // update live state
else
target->insert(at, ent);
}
}
private:
TVarLiveMap& inputList;
TVarLiveMap& outputList;
TVarLiveMap& uniformList;
};
class TVarSetTraverser : public TLiveTraverser
{
public:
TVarSetTraverser(const TIntermediate& i, const TVarLiveMap& inList, const TVarLiveMap& outList, const TVarLiveMap& uniformList)
: TLiveTraverser(i, true, true, true, false)
, inputList(inList)
, outputList(outList)
, uniformList(uniformList)
{
}
virtual void visitSymbol(TIntermSymbol* base)
{
const TVarLiveMap* source;
if (base->getQualifier().storage == EvqVaryingIn)
source = &inputList;
else if (base->getQualifier().storage == EvqVaryingOut)
source = &outputList;
else if (base->getQualifier().isUniformOrBuffer())
source = &uniformList;
else
return;
TVarEntryInfo ent = { base->getId() };
TVarLiveMap::const_iterator at = std::lower_bound(source->begin(), source->end(), ent, TVarEntryInfo::TOrderById());
if (at == source->end())
return;
if (at->id != ent.id)
return;
if (at->newBinding != -1)
base->getWritableType().getQualifier().layoutBinding = at->newBinding;
if (at->newSet != -1)
base->getWritableType().getQualifier().layoutSet = at->newSet;
if (at->newLocation != -1)
base->getWritableType().getQualifier().layoutLocation = at->newLocation;
if (at->newComponent != -1)
base->getWritableType().getQualifier().layoutComponent = at->newComponent;
if (at->newIndex != -1)
base->getWritableType().getQualifier().layoutIndex = at->newIndex;
}
private:
const TVarLiveMap& inputList;
const TVarLiveMap& outputList;
const TVarLiveMap& uniformList;
};
struct TNotifyUniformAdaptor
{
EShLanguage stage;
TIoMapResolver& resolver;
inline TNotifyUniformAdaptor(EShLanguage s, TIoMapResolver& r)
: stage(s)
, resolver(r)
{
}
inline void operator()(TVarEntryInfo& ent)
{
resolver.notifyBinding(stage, ent.symbol->getName().c_str(), ent.symbol->getType(), ent.live);
}
private:
TNotifyUniformAdaptor& operator=(TNotifyUniformAdaptor&);
};
struct TNotifyInOutAdaptor
{
EShLanguage stage;
TIoMapResolver& resolver;
inline TNotifyInOutAdaptor(EShLanguage s, TIoMapResolver& r)
: stage(s)
, resolver(r)
{
}
inline void operator()(TVarEntryInfo& ent)
{
resolver.notifyInOut(stage, ent.symbol->getName().c_str(), ent.symbol->getType(), ent.live);
}
private:
TNotifyInOutAdaptor& operator=(TNotifyInOutAdaptor&);
};
struct TResolverUniformAdaptor
{
TResolverUniformAdaptor(EShLanguage s, TIoMapResolver& r, TInfoSink& i, bool& e, TIntermediate& interm)
: stage(s)
, resolver(r)
, infoSink(i)
, error(e)
, intermediate(interm)
{
}
inline void operator()(TVarEntryInfo& ent)
{
ent.newLocation = -1;
ent.newComponent = -1;
ent.newBinding = -1;
ent.newSet = -1;
ent.newIndex = -1;
const bool isValid = resolver.validateBinding(stage, ent.symbol->getName().c_str(), ent.symbol->getType(),
ent.live);
if (isValid) {
ent.newBinding = resolver.resolveBinding(stage, ent.symbol->getName().c_str(), ent.symbol->getType(),
ent.live);
ent.newSet = resolver.resolveSet(stage, ent.symbol->getName().c_str(), ent.symbol->getType(), ent.live);
ent.newLocation = resolver.resolveUniformLocation(stage, ent.symbol->getName().c_str(),
ent.symbol->getType(), ent.live);
if (ent.newBinding != -1) {
if (ent.newBinding >= int(TQualifier::layoutBindingEnd)) {
TString err = "mapped binding out of range: " + ent.symbol->getName();
infoSink.info.message(EPrefixInternalError, err.c_str());
error = true;
}
}
if (ent.newSet != -1) {
if (ent.newSet >= int(TQualifier::layoutSetEnd)) {
TString err = "mapped set out of range: " + ent.symbol->getName();
infoSink.info.message(EPrefixInternalError, err.c_str());
error = true;
}
}
} else {
TString errorMsg = "Invalid binding: " + ent.symbol->getName();
infoSink.info.message(EPrefixInternalError, errorMsg.c_str());
error = true;
}
}
EShLanguage stage;
TIoMapResolver& resolver;
TInfoSink& infoSink;
bool& error;
TIntermediate& intermediate;
private:
TResolverUniformAdaptor& operator=(TResolverUniformAdaptor&);
};
struct TResolverInOutAdaptor
{
TResolverInOutAdaptor(EShLanguage s, TIoMapResolver& r, TInfoSink& i, bool& e, TIntermediate& interm)
: stage(s)
, resolver(r)
, infoSink(i)
, error(e)
, intermediate(interm)
{
}
inline void operator()(TVarEntryInfo& ent)
{
ent.newLocation = -1;
ent.newComponent = -1;
ent.newBinding = -1;
ent.newSet = -1;
ent.newIndex = -1;
const bool isValid = resolver.validateInOut(stage,
ent.symbol->getName().c_str(),
ent.symbol->getType(),
ent.live);
if (isValid) {
ent.newLocation = resolver.resolveInOutLocation(stage,
ent.symbol->getName().c_str(),
ent.symbol->getType(),
ent.live);
ent.newComponent = resolver.resolveInOutComponent(stage,
ent.symbol->getName().c_str(),
ent.symbol->getType(),
ent.live);
ent.newIndex = resolver.resolveInOutIndex(stage,
ent.symbol->getName().c_str(),
ent.symbol->getType(),
ent.live);
} else {
TString errorMsg = "Invalid shader In/Out variable semantic: ";
errorMsg += ent.symbol->getType().getQualifier().semanticName;
infoSink.info.message(EPrefixInternalError, errorMsg.c_str());
error = true;
}
}
EShLanguage stage;
TIoMapResolver& resolver;
TInfoSink& infoSink;
bool& error;
TIntermediate& intermediate;
private:
TResolverInOutAdaptor& operator=(TResolverInOutAdaptor&);
};
// Base class for shared TIoMapResolver services, used by several derivations.
struct TDefaultIoResolverBase : public glslang::TIoMapResolver
{
TDefaultIoResolverBase(const TIntermediate &intermediate) :
intermediate(intermediate),
nextUniformLocation(0),
nextInputLocation(0),
nextOutputLocation(0)
{ }
int getBaseBinding(TResourceType res, unsigned int set) const {
return selectBaseBinding(intermediate.getShiftBinding(res),
intermediate.getShiftBindingForSet(res, set));
}
const std::vector<std::string>& getResourceSetBinding() const { return intermediate.getResourceSetBinding(); }
bool doAutoBindingMapping() const { return intermediate.getAutoMapBindings(); }
bool doAutoLocationMapping() const { return intermediate.getAutoMapLocations(); }
typedef std::vector<int> TSlotSet;
typedef std::unordered_map<int, TSlotSet> TSlotSetMap;
TSlotSetMap slots;
TSlotSet::iterator findSlot(int set, int slot)
{
return std::lower_bound(slots[set].begin(), slots[set].end(), slot);
}
bool checkEmpty(int set, int slot)
{
TSlotSet::iterator at = findSlot(set, slot);
return !(at != slots[set].end() && *at == slot);
}
int reserveSlot(int set, int slot, int size = 1)
{
TSlotSet::iterator at = findSlot(set, slot);
// tolerate aliasing, by not double-recording aliases
// (policy about appropriateness of the alias is higher up)
for (int i = 0; i < size; i++) {
if (at == slots[set].end() || *at != slot + i)
at = slots[set].insert(at, slot + i);
++at;
}
return slot;
}
int getFreeSlot(int set, int base, int size = 1)
{
TSlotSet::iterator at = findSlot(set, base);
if (at == slots[set].end())
return reserveSlot(set, base, size);
// look for a big enough gap
for (; at != slots[set].end(); ++at) {
if (*at - base >= size)
break;
base = *at + 1;
}
return reserveSlot(set, base, size);
}
virtual bool validateBinding(EShLanguage /*stage*/, const char* /*name*/, const glslang::TType& type, bool /*is_live*/) override = 0;
virtual int resolveBinding(EShLanguage /*stage*/, const char* /*name*/, const glslang::TType& type, bool is_live) override = 0;
int resolveSet(EShLanguage /*stage*/, const char* /*name*/, const glslang::TType& type, bool /*is_live*/) override
{
if (type.getQualifier().hasSet())
return type.getQualifier().layoutSet;
// If a command line or API option requested a single descriptor set, use that (if not overrided by spaceN)
if (getResourceSetBinding().size() == 1)
return atoi(getResourceSetBinding()[0].c_str());
return 0;
}
int resolveUniformLocation(EShLanguage /*stage*/, const char* /*name*/, const glslang::TType& type, bool /*is_live*/) override
{
// kick out of not doing this
if (!doAutoLocationMapping())
return -1;
// no locations added if already present, a built-in variable, a block, or an opaque
if (type.getQualifier().hasLocation() || type.isBuiltIn() ||
type.getBasicType() == EbtBlock ||
type.getBasicType() == EbtAtomicUint ||
(type.containsOpaque() && intermediate.getSpv().openGl == 0))
return -1;
// no locations on blocks of built-in variables
if (type.isStruct()) {
if (type.getStruct()->size() < 1)
return -1;
if ((*type.getStruct())[0].type->isBuiltIn())
return -1;
}
int location = nextUniformLocation;
nextUniformLocation += TIntermediate::computeTypeUniformLocationSize(type);
return location;
}
bool validateInOut(EShLanguage /*stage*/, const char* /*name*/, const TType& /*type*/, bool /*is_live*/) override
{
return true;
}
int resolveInOutLocation(EShLanguage stage, const char* /*name*/, const TType& type, bool /*is_live*/) override
{
// kick out of not doing this
if (!doAutoLocationMapping())
return -1;
// no locations added if already present, or a built-in variable
if (type.getQualifier().hasLocation() || type.isBuiltIn())
return -1;
// no locations on blocks of built-in variables
if (type.isStruct()) {
if (type.getStruct()->size() < 1)
return -1;
if ((*type.getStruct())[0].type->isBuiltIn())
return -1;
}
// point to the right input or output location counter
int& nextLocation = type.getQualifier().isPipeInput() ? nextInputLocation : nextOutputLocation;
// Placeholder. This does not do proper cross-stage lining up, nor
// work with mixed location/no-location declarations.
int location = nextLocation;
int typeLocationSize;
// Dont take into account the outer-most array if the stages
// interface is automatically an array.
if (type.getQualifier().isArrayedIo(stage)) {
TType elementType(type, 0);
typeLocationSize = TIntermediate::computeTypeLocationSize(elementType, stage);
} else {
typeLocationSize = TIntermediate::computeTypeLocationSize(type, stage);
}
nextLocation += typeLocationSize;
return location;
}
int resolveInOutComponent(EShLanguage /*stage*/, const char* /*name*/, const TType& /*type*/, bool /*is_live*/) override
{
return -1;
}
int resolveInOutIndex(EShLanguage /*stage*/, const char* /*name*/, const TType& /*type*/, bool /*is_live*/) override
{
return -1;
}
void notifyBinding(EShLanguage, const char* /*name*/, const TType&, bool /*is_live*/) override {}
void notifyInOut(EShLanguage, const char* /*name*/, const TType&, bool /*is_live*/) override {}
void endNotifications(EShLanguage) override {}
void beginNotifications(EShLanguage) override {}
void beginResolve(EShLanguage) override {}
void endResolve(EShLanguage) override {}
protected:
TDefaultIoResolverBase(TDefaultIoResolverBase&);
TDefaultIoResolverBase& operator=(TDefaultIoResolverBase&);
const TIntermediate &intermediate;
int nextUniformLocation;
int nextInputLocation;
int nextOutputLocation;
// Return descriptor set specific base if there is one, and the generic base otherwise.
int selectBaseBinding(int base, int descriptorSetBase) const {
return descriptorSetBase != -1 ? descriptorSetBase : base;
}
static int getLayoutSet(const glslang::TType& type) {
if (type.getQualifier().hasSet())
return type.getQualifier().layoutSet;
else
return 0;
}
static bool isSamplerType(const glslang::TType& type) {
return type.getBasicType() == glslang::EbtSampler && type.getSampler().isPureSampler();
}
static bool isTextureType(const glslang::TType& type) {
return (type.getBasicType() == glslang::EbtSampler &&
(type.getSampler().isTexture() || type.getSampler().isSubpass()));
}
static bool isUboType(const glslang::TType& type) {
return type.getQualifier().storage == EvqUniform;
}
};
/*
* Basic implementation of glslang::TIoMapResolver that replaces the
* previous offset behavior.
* It does the same, uses the offsets for the corresponding uniform
* types. Also respects the EOptionAutoMapBindings flag and binds
* them if needed.
*/
/*
* Default resolver
*/
struct TDefaultIoResolver : public TDefaultIoResolverBase
{
TDefaultIoResolver(const TIntermediate &intermediate) : TDefaultIoResolverBase(intermediate) { }
bool validateBinding(EShLanguage /*stage*/, const char* /*name*/, const glslang::TType& /*type*/, bool /*is_live*/) override
{
return true;
}
int resolveBinding(EShLanguage /*stage*/, const char* /*name*/, const glslang::TType& type, bool is_live) override
{
const int set = getLayoutSet(type);
// On OpenGL arrays of opaque types take a seperate binding for each element
int numBindings = intermediate.getSpv().openGl != 0 && type.isSizedArray() ? type.getCumulativeArraySize() : 1;
if (type.getQualifier().hasBinding()) {
if (isImageType(type))
return reserveSlot(set, getBaseBinding(EResImage, set) + type.getQualifier().layoutBinding, numBindings);
if (isTextureType(type))
return reserveSlot(set, getBaseBinding(EResTexture, set) + type.getQualifier().layoutBinding, numBindings);
if (isSsboType(type))
return reserveSlot(set, getBaseBinding(EResSsbo, set) + type.getQualifier().layoutBinding, numBindings);
if (isSamplerType(type))
return reserveSlot(set, getBaseBinding(EResSampler, set) + type.getQualifier().layoutBinding, numBindings);
if (isUboType(type))
return reserveSlot(set, getBaseBinding(EResUbo, set) + type.getQualifier().layoutBinding, numBindings);
} else if (is_live && doAutoBindingMapping()) {
// find free slot, the caller did make sure it passes all vars with binding
// first and now all are passed that do not have a binding and needs one
if (isImageType(type))
return getFreeSlot(set, getBaseBinding(EResImage, set), numBindings);
if (isTextureType(type))
return getFreeSlot(set, getBaseBinding(EResTexture, set), numBindings);
if (isSsboType(type))
return getFreeSlot(set, getBaseBinding(EResSsbo, set), numBindings);
if (isSamplerType(type))
return getFreeSlot(set, getBaseBinding(EResSampler, set), numBindings);
if (isUboType(type))
return getFreeSlot(set, getBaseBinding(EResUbo, set), numBindings);
}
return -1;
}
protected:
static bool isImageType(const glslang::TType& type) {
return type.getBasicType() == glslang::EbtSampler && type.getSampler().isImage();
}
static bool isSsboType(const glslang::TType& type) {
return type.getQualifier().storage == EvqBuffer;
}
};
/********************************************************************************
The following IO resolver maps types in HLSL register space, as follows:
t - for shader resource views (SRV)
TEXTURE1D
TEXTURE1DARRAY
TEXTURE2D
TEXTURE2DARRAY
TEXTURE3D
TEXTURECUBE
TEXTURECUBEARRAY
TEXTURE2DMS
TEXTURE2DMSARRAY
STRUCTUREDBUFFER
BYTEADDRESSBUFFER
BUFFER
TBUFFER
s - for samplers
SAMPLER
SAMPLER1D
SAMPLER2D
SAMPLER3D
SAMPLERCUBE
SAMPLERSTATE
SAMPLERCOMPARISONSTATE
u - for unordered access views (UAV)
RWBYTEADDRESSBUFFER
RWSTRUCTUREDBUFFER
APPENDSTRUCTUREDBUFFER
CONSUMESTRUCTUREDBUFFER
RWBUFFER
RWTEXTURE1D
RWTEXTURE1DARRAY
RWTEXTURE2D
RWTEXTURE2DARRAY
RWTEXTURE3D
b - for constant buffer views (CBV)
CBUFFER
CONSTANTBUFFER
********************************************************************************/
struct TDefaultHlslIoResolver : public TDefaultIoResolverBase
{
TDefaultHlslIoResolver(const TIntermediate &intermediate) : TDefaultIoResolverBase(intermediate) { }
bool validateBinding(EShLanguage /*stage*/, const char* /*name*/, const glslang::TType& /*type*/, bool /*is_live*/) override
{
return true;
}
int resolveBinding(EShLanguage /*stage*/, const char* /*name*/, const glslang::TType& type, bool is_live) override
{
const int set = getLayoutSet(type);
if (type.getQualifier().hasBinding()) {
if (isUavType(type))
return reserveSlot(set, getBaseBinding(EResUav, set) + type.getQualifier().layoutBinding);
if (isSrvType(type))
return reserveSlot(set, getBaseBinding(EResTexture, set) + type.getQualifier().layoutBinding);
if (isSamplerType(type))
return reserveSlot(set, getBaseBinding(EResSampler, set) + type.getQualifier().layoutBinding);
if (isUboType(type))
return reserveSlot(set, getBaseBinding(EResUbo, set) + type.getQualifier().layoutBinding);
} else if (is_live && doAutoBindingMapping()) {
// find free slot, the caller did make sure it passes all vars with binding
// first and now all are passed that do not have a binding and needs one
if (isUavType(type))
return getFreeSlot(set, getBaseBinding(EResUav, set));
if (isSrvType(type))
return getFreeSlot(set, getBaseBinding(EResTexture, set));
if (isSamplerType(type))
return getFreeSlot(set, getBaseBinding(EResSampler, set));
if (isUboType(type))
return getFreeSlot(set, getBaseBinding(EResUbo, set));
}
return -1;
}
protected:
// Return true if this is a SRV (shader resource view) type:
static bool isSrvType(const glslang::TType& type) {
return isTextureType(type) || type.getQualifier().storage == EvqBuffer;
}
// Return true if this is a UAV (unordered access view) type:
static bool isUavType(const glslang::TType& type) {
if (type.getQualifier().readonly)
return false;
return (type.getBasicType() == glslang::EbtSampler && type.getSampler().isImage()) ||
(type.getQualifier().storage == EvqBuffer);
}
};
// Map I/O variables to provided offsets, and make bindings for
// unbound but live variables.
//
// Returns false if the input is too malformed to do this.
bool TIoMapper::addStage(EShLanguage stage, TIntermediate &intermediate, TInfoSink &infoSink, TIoMapResolver *resolver)
{
bool somethingToDo = !intermediate.getResourceSetBinding().empty() ||
intermediate.getAutoMapBindings() ||
intermediate.getAutoMapLocations();
for (int res = 0; res < EResCount; ++res) {
somethingToDo = somethingToDo ||
(intermediate.getShiftBinding(TResourceType(res)) != 0) ||
intermediate.hasShiftBindingForSet(TResourceType(res));
}
if (!somethingToDo && resolver == nullptr)
return true;
if (intermediate.getNumEntryPoints() != 1 || intermediate.isRecursive())
return false;
TIntermNode* root = intermediate.getTreeRoot();
if (root == nullptr)
return false;
// if no resolver is provided, use the default resolver with the given shifts and auto map settings
TDefaultIoResolver defaultResolver(intermediate);
TDefaultHlslIoResolver defaultHlslResolver(intermediate);
if (resolver == nullptr) {
// TODO: use a passed in IO mapper for this
if (intermediate.usingHlslIoMapping())
resolver = &defaultHlslResolver;
else
resolver = &defaultResolver;
}
TVarLiveMap inVarMap, outVarMap, uniformVarMap;
TVarGatherTraverser iter_binding_all(intermediate, true, inVarMap, outVarMap, uniformVarMap);
TVarGatherTraverser iter_binding_live(intermediate, false, inVarMap, outVarMap, uniformVarMap);
root->traverse(&iter_binding_all);
iter_binding_live.pushFunction(intermediate.getEntryPointMangledName().c_str());
while (!iter_binding_live.functions.empty()) {
TIntermNode* function = iter_binding_live.functions.back();
iter_binding_live.functions.pop_back();
function->traverse(&iter_binding_live);
}
// sort entries by priority. see TVarEntryInfo::TOrderByPriority for info.
std::sort(uniformVarMap.begin(), uniformVarMap.end(), TVarEntryInfo::TOrderByPriority());
bool hadError = false;
TNotifyInOutAdaptor inOutNotify(stage, *resolver);
TNotifyUniformAdaptor uniformNotify(stage, *resolver);
TResolverUniformAdaptor uniformResolve(stage, *resolver, infoSink, hadError, intermediate);
TResolverInOutAdaptor inOutResolve(stage, *resolver, infoSink, hadError, intermediate);
resolver->beginNotifications(stage);
std::for_each(inVarMap.begin(), inVarMap.end(), inOutNotify);
std::for_each(outVarMap.begin(), outVarMap.end(), inOutNotify);
std::for_each(uniformVarMap.begin(), uniformVarMap.end(), uniformNotify);
resolver->endNotifications(stage);
resolver->beginResolve(stage);
std::for_each(inVarMap.begin(), inVarMap.end(), inOutResolve);
std::for_each(outVarMap.begin(), outVarMap.end(), inOutResolve);
std::for_each(uniformVarMap.begin(), uniformVarMap.end(), uniformResolve);
resolver->endResolve(stage);
if (!hadError) {
// sort by id again, so we can use lower bound to find entries
std::sort(uniformVarMap.begin(), uniformVarMap.end(), TVarEntryInfo::TOrderById());
TVarSetTraverser iter_iomap(intermediate, inVarMap, outVarMap, uniformVarMap);
root->traverse(&iter_iomap);
}
return !hadError;
}
} // end namespace glslang

View File

@ -0,0 +1,63 @@
//
// Copyright (C) 2016 LunarG, Inc.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
#ifndef _IOMAPPER_INCLUDED
#define _IOMAPPER_INCLUDED
#include "../Public/ShaderLang.h"
//
// A reflection database and its interface, consistent with the OpenGL API reflection queries.
//
class TInfoSink;
namespace glslang {
class TIntermediate;
// I/O mapper
class TIoMapper {
public:
TIoMapper() {}
virtual ~TIoMapper() {}
// grow the reflection stage by stage
bool addStage(EShLanguage, TIntermediate&, TInfoSink&, TIoMapResolver*);
};
} // end namespace glslang
#endif // _IOMAPPER_INCLUDED

View File

@ -0,0 +1,198 @@
//
// Copyright (C) 2013 LunarG, Inc.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
//
// Do sub tree walks for
// 1) inductive loop bodies to see if the inductive variable is modified
// 2) array-index expressions to see if they are "constant-index-expression"
//
// These are per Appendix A of ES 2.0:
//
// "Within the body of the loop, the loop index is not statically assigned to nor is it used as the
// argument to a function out or inout parameter."
//
// "The following are constant-index-expressions:
// - Constant expressions
// - Loop indices as defined in section 4
// - Expressions composed of both of the above"
//
// N.B.: assuming the last rule excludes function calls
//
#include "ParseHelper.h"
namespace glslang {
//
// The inductive loop-body traverser.
//
// Just look at things that might modify the loop index.
//
class TInductiveTraverser : public TIntermTraverser {
public:
TInductiveTraverser(int id, TSymbolTable& st)
: loopId(id), symbolTable(st), bad(false) { }
virtual bool visitBinary(TVisit, TIntermBinary* node);
virtual bool visitUnary(TVisit, TIntermUnary* node);
virtual bool visitAggregate(TVisit, TIntermAggregate* node);
int loopId; // unique ID of the symbol that's the loop inductive variable
TSymbolTable& symbolTable;
bool bad;
TSourceLoc badLoc;
protected:
TInductiveTraverser(TInductiveTraverser&);
TInductiveTraverser& operator=(TInductiveTraverser&);
};
// check binary operations for those modifying the loop index
bool TInductiveTraverser::visitBinary(TVisit /* visit */, TIntermBinary* node)
{
if (node->modifiesState() && node->getLeft()->getAsSymbolNode() &&
node->getLeft()->getAsSymbolNode()->getId() == loopId) {
bad = true;
badLoc = node->getLoc();
}
return true;
}
// check unary operations for those modifying the loop index
bool TInductiveTraverser::visitUnary(TVisit /* visit */, TIntermUnary* node)
{
if (node->modifiesState() && node->getOperand()->getAsSymbolNode() &&
node->getOperand()->getAsSymbolNode()->getId() == loopId) {
bad = true;
badLoc = node->getLoc();
}
return true;
}
// check function calls for arguments modifying the loop index
bool TInductiveTraverser::visitAggregate(TVisit /* visit */, TIntermAggregate* node)
{
if (node->getOp() == EOpFunctionCall) {
// see if an out or inout argument is the loop index
const TIntermSequence& args = node->getSequence();
for (int i = 0; i < (int)args.size(); ++i) {
if (args[i]->getAsSymbolNode() && args[i]->getAsSymbolNode()->getId() == loopId) {
TSymbol* function = symbolTable.find(node->getName());
const TType* type = (*function->getAsFunction())[i].type;
if (type->getQualifier().storage == EvqOut ||
type->getQualifier().storage == EvqInOut) {
bad = true;
badLoc = node->getLoc();
}
}
}
}
return true;
}
//
// External function to call for loop check.
//
void TParseContext::inductiveLoopBodyCheck(TIntermNode* body, int loopId, TSymbolTable& symbolTable)
{
TInductiveTraverser it(loopId, symbolTable);
if (body == nullptr)
return;
body->traverse(&it);
if (it.bad)
error(it.badLoc, "inductive loop index modified", "limitations", "");
}
//
// The "constant-index-expression" tranverser.
//
// Just look at things that can form an index.
//
class TIndexTraverser : public TIntermTraverser {
public:
TIndexTraverser(const TIdSetType& ids) : inductiveLoopIds(ids), bad(false) { }
virtual void visitSymbol(TIntermSymbol* symbol);
virtual bool visitAggregate(TVisit, TIntermAggregate* node);
const TIdSetType& inductiveLoopIds;
bool bad;
TSourceLoc badLoc;
protected:
TIndexTraverser(TIndexTraverser&);
TIndexTraverser& operator=(TIndexTraverser&);
};
// make sure symbols are inductive-loop indexes
void TIndexTraverser::visitSymbol(TIntermSymbol* symbol)
{
if (inductiveLoopIds.find(symbol->getId()) == inductiveLoopIds.end()) {
bad = true;
badLoc = symbol->getLoc();
}
}
// check for function calls, assuming they are bad; spec. doesn't really say
bool TIndexTraverser::visitAggregate(TVisit /* visit */, TIntermAggregate* node)
{
if (node->getOp() == EOpFunctionCall) {
bad = true;
badLoc = node->getLoc();
}
return true;
}
//
// External function to call for loop check.
//
void TParseContext::constantIndexExpressionCheck(TIntermNode* index)
{
TIndexTraverser it(inductiveLoopIds);
index->traverse(&it);
if (it.bad)
error(it.badLoc, "Non-constant-index-expression", "limitations", "");
}
} // end namespace glslang

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,757 @@
//
// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
// Copyright (C) 2016 LunarG, Inc.
// Copyright (C) 2017 ARM Limited.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
#ifndef _LOCAL_INTERMEDIATE_INCLUDED_
#define _LOCAL_INTERMEDIATE_INCLUDED_
#include "../Include/intermediate.h"
#include "../Public/ShaderLang.h"
#include "Versions.h"
#include <algorithm>
#include <set>
#include <array>
class TInfoSink;
namespace glslang {
struct TMatrixSelector {
int coord1; // stay agnostic about column/row; this is parse order
int coord2;
};
typedef int TVectorSelector;
const int MaxSwizzleSelectors = 4;
template<typename selectorType>
class TSwizzleSelectors {
public:
TSwizzleSelectors() : size_(0) { }
void push_back(selectorType comp)
{
if (size_ < MaxSwizzleSelectors)
components[size_++] = comp;
}
void resize(int s)
{
assert(s <= size_);
size_ = s;
}
int size() const { return size_; }
selectorType operator[](int i) const
{
assert(i < MaxSwizzleSelectors);
return components[i];
}
private:
int size_;
selectorType components[MaxSwizzleSelectors];
};
//
// Some helper structures for TIntermediate. Their contents are encapsulated
// by TIntermediate.
//
// Used for call-graph algorithms for detecting recursion, missing bodies, and dead bodies.
// A "call" is a pair: <caller, callee>.
// There can be duplicates. General assumption is the list is small.
struct TCall {
TCall(const TString& pCaller, const TString& pCallee) : caller(pCaller), callee(pCallee) { }
TString caller;
TString callee;
bool visited;
bool currentPath;
bool errorGiven;
int calleeBodyPosition;
};
// A generic 1-D range.
struct TRange {
TRange(int start, int last) : start(start), last(last) { }
bool overlap(const TRange& rhs) const
{
return last >= rhs.start && start <= rhs.last;
}
int start;
int last;
};
// An IO range is a 3-D rectangle; the set of (location, component, index) triples all lying
// within the same location range, component range, and index value. Locations don't alias unless
// all other dimensions of their range overlap.
struct TIoRange {
TIoRange(TRange location, TRange component, TBasicType basicType, int index)
: location(location), component(component), basicType(basicType), index(index) { }
bool overlap(const TIoRange& rhs) const
{
return location.overlap(rhs.location) && component.overlap(rhs.component) && index == rhs.index;
}
TRange location;
TRange component;
TBasicType basicType;
int index;
};
// An offset range is a 2-D rectangle; the set of (binding, offset) pairs all lying
// within the same binding and offset range.
struct TOffsetRange {
TOffsetRange(TRange binding, TRange offset)
: binding(binding), offset(offset) { }
bool overlap(const TOffsetRange& rhs) const
{
return binding.overlap(rhs.binding) && offset.overlap(rhs.offset);
}
TRange binding;
TRange offset;
};
// Things that need to be tracked per xfb buffer.
struct TXfbBuffer {
TXfbBuffer() : stride(TQualifier::layoutXfbStrideEnd), implicitStride(0), containsDouble(false) { }
std::vector<TRange> ranges; // byte offsets that have already been assigned
unsigned int stride;
unsigned int implicitStride;
bool containsDouble;
};
// Track a set of strings describing how the module was processed.
// Using the form:
// process arg0 arg1 arg2 ...
// process arg0 arg1 arg2 ...
// where everything is textual, and there can be zero or more arguments
class TProcesses {
public:
TProcesses() {}
~TProcesses() {}
void addProcess(const char* process)
{
processes.push_back(process);
}
void addProcess(const std::string& process)
{
processes.push_back(process);
}
void addArgument(int arg)
{
processes.back().append(" ");
std::string argString = std::to_string(arg);
processes.back().append(argString);
}
void addArgument(const char* arg)
{
processes.back().append(" ");
processes.back().append(arg);
}
void addArgument(const std::string& arg)
{
processes.back().append(" ");
processes.back().append(arg);
}
void addIfNonZero(const char* process, int value)
{
if (value != 0) {
addProcess(process);
addArgument(value);
}
}
const std::vector<std::string>& getProcesses() const { return processes; }
private:
std::vector<std::string> processes;
};
class TSymbolTable;
class TSymbol;
class TVariable;
//
// Set of helper functions to help parse and build the tree.
//
class TIntermediate {
public:
explicit TIntermediate(EShLanguage l, int v = 0, EProfile p = ENoProfile) :
implicitThisName("@this"), implicitCounterName("@count"),
language(l), source(EShSourceNone), profile(p), version(v), treeRoot(0),
numEntryPoints(0), numErrors(0), numPushConstants(0), recursive(false),
invocations(TQualifier::layoutNotSet), vertices(TQualifier::layoutNotSet),
inputPrimitive(ElgNone), outputPrimitive(ElgNone),
pixelCenterInteger(false), originUpperLeft(false),
vertexSpacing(EvsNone), vertexOrder(EvoNone), pointMode(false), earlyFragmentTests(false),
postDepthCoverage(false), depthLayout(EldNone), depthReplacing(false),
hlslFunctionality1(false),
blendEquations(0), xfbMode(false), multiStream(false),
#ifdef NV_EXTENSIONS
layoutOverrideCoverage(false),
geoPassthroughEXT(false),
#endif
autoMapBindings(false),
autoMapLocations(false),
invertY(false),
flattenUniformArrays(false),
useUnknownFormat(false),
hlslOffsets(false),
useStorageBuffer(false),
hlslIoMapping(false),
textureSamplerTransformMode(EShTexSampTransKeep),
needToLegalize(false),
binaryDoubleOutput(false)
{
localSize[0] = 1;
localSize[1] = 1;
localSize[2] = 1;
localSizeSpecId[0] = TQualifier::layoutNotSet;
localSizeSpecId[1] = TQualifier::layoutNotSet;
localSizeSpecId[2] = TQualifier::layoutNotSet;
xfbBuffers.resize(TQualifier::layoutXfbBufferEnd);
shiftBinding.fill(0);
}
void setLimits(const TBuiltInResource& r) { resources = r; }
bool postProcess(TIntermNode*, EShLanguage);
void output(TInfoSink&, bool tree);
void removeTree();
void setSource(EShSource s) { source = s; }
EShSource getSource() const { return source; }
void setEntryPointName(const char* ep)
{
entryPointName = ep;
processes.addProcess("entry-point");
processes.addArgument(entryPointName);
}
void setEntryPointMangledName(const char* ep) { entryPointMangledName = ep; }
const std::string& getEntryPointName() const { return entryPointName; }
const std::string& getEntryPointMangledName() const { return entryPointMangledName; }
void setShiftBinding(TResourceType res, unsigned int shift)
{
shiftBinding[res] = shift;
const char* name = getResourceName(res);
if (name != nullptr)
processes.addIfNonZero(name, shift);
}
unsigned int getShiftBinding(TResourceType res) const { return shiftBinding[res]; }
void setShiftBindingForSet(TResourceType res, unsigned int shift, unsigned int set)
{
if (shift == 0) // ignore if there's no shift: it's a no-op.
return;
shiftBindingForSet[res][set] = shift;
const char* name = getResourceName(res);
if (name != nullptr) {
processes.addProcess(name);
processes.addArgument(shift);
processes.addArgument(set);
}
}
int getShiftBindingForSet(TResourceType res, unsigned int set) const
{
const auto shift = shiftBindingForSet[res].find(set);
return shift == shiftBindingForSet[res].end() ? -1 : shift->second;
}
bool hasShiftBindingForSet(TResourceType res) const { return !shiftBindingForSet[res].empty(); }
void setResourceSetBinding(const std::vector<std::string>& shift)
{
resourceSetBinding = shift;
if (shift.size() > 0) {
processes.addProcess("resource-set-binding");
for (int s = 0; s < (int)shift.size(); ++s)
processes.addArgument(shift[s]);
}
}
const std::vector<std::string>& getResourceSetBinding() const { return resourceSetBinding; }
void setAutoMapBindings(bool map)
{
autoMapBindings = map;
if (autoMapBindings)
processes.addProcess("auto-map-bindings");
}
bool getAutoMapBindings() const { return autoMapBindings; }
void setAutoMapLocations(bool map)
{
autoMapLocations = map;
if (autoMapLocations)
processes.addProcess("auto-map-locations");
}
bool getAutoMapLocations() const { return autoMapLocations; }
void setInvertY(bool invert)
{
invertY = invert;
if (invertY)
processes.addProcess("invert-y");
}
bool getInvertY() const { return invertY; }
void setFlattenUniformArrays(bool flatten)
{
flattenUniformArrays = flatten;
if (flattenUniformArrays)
processes.addProcess("flatten-uniform-arrays");
}
bool getFlattenUniformArrays() const { return flattenUniformArrays; }
void setNoStorageFormat(bool b)
{
useUnknownFormat = b;
if (useUnknownFormat)
processes.addProcess("no-storage-format");
}
bool getNoStorageFormat() const { return useUnknownFormat; }
void setHlslOffsets()
{
hlslOffsets = true;
if (hlslOffsets)
processes.addProcess("hlsl-offsets");
}
bool usingHlslOFfsets() const { return hlslOffsets; }
void setUseStorageBuffer()
{
useStorageBuffer = true;
processes.addProcess("use-storage-buffer");
}
bool usingStorageBuffer() const { return useStorageBuffer; }
void setHlslIoMapping(bool b)
{
hlslIoMapping = b;
if (hlslIoMapping)
processes.addProcess("hlsl-iomap");
}
bool usingHlslIoMapping() { return hlslIoMapping; }
template<class T> T addCounterBufferName(const T& name) const { return name + implicitCounterName; }
bool hasCounterBufferName(const TString& name) const {
size_t len = strlen(implicitCounterName);
return name.size() > len &&
name.compare(name.size() - len, len, implicitCounterName) == 0;
}
void setTextureSamplerTransformMode(EShTextureSamplerTransformMode mode) { textureSamplerTransformMode = mode; }
void setVersion(int v) { version = v; }
int getVersion() const { return version; }
void setProfile(EProfile p) { profile = p; }
EProfile getProfile() const { return profile; }
void setSpv(const SpvVersion& s)
{
spvVersion = s;
// client processes
if (spvVersion.vulkan > 0)
processes.addProcess("client vulkan100");
if (spvVersion.openGl > 0)
processes.addProcess("client opengl100");
// target-environment processes
if (spvVersion.vulkan > 0)
processes.addProcess("target-env vulkan1.0");
else if (spvVersion.vulkan > 0)
processes.addProcess("target-env vulkanUnknown");
if (spvVersion.openGl > 0)
processes.addProcess("target-env opengl");
}
const SpvVersion& getSpv() const { return spvVersion; }
EShLanguage getStage() const { return language; }
void addRequestedExtension(const char* extension) { requestedExtensions.insert(extension); }
const std::set<std::string>& getRequestedExtensions() const { return requestedExtensions; }
void setTreeRoot(TIntermNode* r) { treeRoot = r; }
TIntermNode* getTreeRoot() const { return treeRoot; }
void incrementEntryPointCount() { ++numEntryPoints; }
int getNumEntryPoints() const { return numEntryPoints; }
int getNumErrors() const { return numErrors; }
void addPushConstantCount() { ++numPushConstants; }
bool isRecursive() const { return recursive; }
TIntermSymbol* addSymbol(const TVariable&);
TIntermSymbol* addSymbol(const TVariable&, const TSourceLoc&);
TIntermSymbol* addSymbol(const TType&, const TSourceLoc&);
TIntermSymbol* addSymbol(const TIntermSymbol&);
TIntermTyped* addConversion(TOperator, const TType&, TIntermTyped*) const;
std::tuple<TIntermTyped*, TIntermTyped*> addConversion(TOperator op, TIntermTyped* node0, TIntermTyped* node1) const;
TIntermTyped* addUniShapeConversion(TOperator, const TType&, TIntermTyped*);
void addBiShapeConversion(TOperator, TIntermTyped*& lhsNode, TIntermTyped*& rhsNode);
TIntermTyped* addShapeConversion(const TType&, TIntermTyped*);
TIntermTyped* addBinaryMath(TOperator, TIntermTyped* left, TIntermTyped* right, TSourceLoc);
TIntermTyped* addAssign(TOperator op, TIntermTyped* left, TIntermTyped* right, TSourceLoc);
TIntermTyped* addIndex(TOperator op, TIntermTyped* base, TIntermTyped* index, TSourceLoc);
TIntermTyped* addUnaryMath(TOperator, TIntermTyped* child, TSourceLoc);
TIntermTyped* addBuiltInFunctionCall(const TSourceLoc& line, TOperator, bool unary, TIntermNode*, const TType& returnType);
bool canImplicitlyPromote(TBasicType from, TBasicType to, TOperator op = EOpNull) const;
bool isIntegralPromotion(TBasicType from, TBasicType to) const;
bool isFPPromotion(TBasicType from, TBasicType to) const;
bool isIntegralConversion(TBasicType from, TBasicType to) const;
bool isFPConversion(TBasicType from, TBasicType to) const;
bool isFPIntegralConversion(TBasicType from, TBasicType to) const;
TOperator mapTypeToConstructorOp(const TType&) const;
TIntermAggregate* growAggregate(TIntermNode* left, TIntermNode* right);
TIntermAggregate* growAggregate(TIntermNode* left, TIntermNode* right, const TSourceLoc&);
TIntermAggregate* makeAggregate(TIntermNode* node);
TIntermAggregate* makeAggregate(TIntermNode* node, const TSourceLoc&);
TIntermAggregate* makeAggregate(const TSourceLoc&);
TIntermTyped* setAggregateOperator(TIntermNode*, TOperator, const TType& type, TSourceLoc);
bool areAllChildConst(TIntermAggregate* aggrNode);
TIntermSelection* addSelection(TIntermTyped* cond, TIntermNodePair code, const TSourceLoc&);
TIntermTyped* addSelection(TIntermTyped* cond, TIntermTyped* trueBlock, TIntermTyped* falseBlock, const TSourceLoc&);
TIntermTyped* addComma(TIntermTyped* left, TIntermTyped* right, const TSourceLoc&);
TIntermTyped* addMethod(TIntermTyped*, const TType&, const TString*, const TSourceLoc&);
TIntermConstantUnion* addConstantUnion(const TConstUnionArray&, const TType&, const TSourceLoc&, bool literal = false) const;
TIntermConstantUnion* addConstantUnion(signed char, const TSourceLoc&, bool literal = false) const;
TIntermConstantUnion* addConstantUnion(unsigned char, const TSourceLoc&, bool literal = false) const;
TIntermConstantUnion* addConstantUnion(signed short, const TSourceLoc&, bool literal = false) const;
TIntermConstantUnion* addConstantUnion(unsigned short, const TSourceLoc&, bool literal = false) const;
TIntermConstantUnion* addConstantUnion(int, const TSourceLoc&, bool literal = false) const;
TIntermConstantUnion* addConstantUnion(unsigned int, const TSourceLoc&, bool literal = false) const;
TIntermConstantUnion* addConstantUnion(long long, const TSourceLoc&, bool literal = false) const;
TIntermConstantUnion* addConstantUnion(unsigned long long, const TSourceLoc&, bool literal = false) const;
TIntermConstantUnion* addConstantUnion(bool, const TSourceLoc&, bool literal = false) const;
TIntermConstantUnion* addConstantUnion(double, TBasicType, const TSourceLoc&, bool literal = false) const;
TIntermConstantUnion* addConstantUnion(const TString*, const TSourceLoc&, bool literal = false) const;
TIntermTyped* promoteConstantUnion(TBasicType, TIntermConstantUnion*) const;
bool parseConstTree(TIntermNode*, TConstUnionArray, TOperator, const TType&, bool singleConstantParam = false);
TIntermLoop* addLoop(TIntermNode*, TIntermTyped*, TIntermTyped*, bool testFirst, const TSourceLoc&);
TIntermAggregate* addForLoop(TIntermNode*, TIntermNode*, TIntermTyped*, TIntermTyped*, bool testFirst,
const TSourceLoc&, TIntermLoop*&);
TIntermBranch* addBranch(TOperator, const TSourceLoc&);
TIntermBranch* addBranch(TOperator, TIntermTyped*, const TSourceLoc&);
template<typename selectorType> TIntermTyped* addSwizzle(TSwizzleSelectors<selectorType>&, const TSourceLoc&);
// Low level functions to add nodes (no conversions or other higher level transformations)
// If a type is provided, the node's type will be set to it.
TIntermBinary* addBinaryNode(TOperator op, TIntermTyped* left, TIntermTyped* right, TSourceLoc) const;
TIntermBinary* addBinaryNode(TOperator op, TIntermTyped* left, TIntermTyped* right, TSourceLoc, const TType&) const;
TIntermUnary* addUnaryNode(TOperator op, TIntermTyped* child, TSourceLoc) const;
TIntermUnary* addUnaryNode(TOperator op, TIntermTyped* child, TSourceLoc, const TType&) const;
// Constant folding (in Constant.cpp)
TIntermTyped* fold(TIntermAggregate* aggrNode);
TIntermTyped* foldConstructor(TIntermAggregate* aggrNode);
TIntermTyped* foldDereference(TIntermTyped* node, int index, const TSourceLoc&);
TIntermTyped* foldSwizzle(TIntermTyped* node, TSwizzleSelectors<TVectorSelector>& fields, const TSourceLoc&);
// Tree ops
static const TIntermTyped* findLValueBase(const TIntermTyped*, bool swizzleOkay);
// Linkage related
void addSymbolLinkageNodes(TIntermAggregate*& linkage, EShLanguage, TSymbolTable&);
void addSymbolLinkageNode(TIntermAggregate*& linkage, const TSymbol&);
bool setInvocations(int i)
{
if (invocations != TQualifier::layoutNotSet)
return invocations == i;
invocations = i;
return true;
}
int getInvocations() const { return invocations; }
bool setVertices(int m)
{
if (vertices != TQualifier::layoutNotSet)
return vertices == m;
vertices = m;
return true;
}
int getVertices() const { return vertices; }
bool setInputPrimitive(TLayoutGeometry p)
{
if (inputPrimitive != ElgNone)
return inputPrimitive == p;
inputPrimitive = p;
return true;
}
TLayoutGeometry getInputPrimitive() const { return inputPrimitive; }
bool setVertexSpacing(TVertexSpacing s)
{
if (vertexSpacing != EvsNone)
return vertexSpacing == s;
vertexSpacing = s;
return true;
}
TVertexSpacing getVertexSpacing() const { return vertexSpacing; }
bool setVertexOrder(TVertexOrder o)
{
if (vertexOrder != EvoNone)
return vertexOrder == o;
vertexOrder = o;
return true;
}
TVertexOrder getVertexOrder() const { return vertexOrder; }
void setPointMode() { pointMode = true; }
bool getPointMode() const { return pointMode; }
bool setLocalSize(int dim, int size)
{
if (localSize[dim] > 1)
return size == localSize[dim];
localSize[dim] = size;
return true;
}
unsigned int getLocalSize(int dim) const { return localSize[dim]; }
bool setLocalSizeSpecId(int dim, int id)
{
if (localSizeSpecId[dim] != TQualifier::layoutNotSet)
return id == localSizeSpecId[dim];
localSizeSpecId[dim] = id;
return true;
}
int getLocalSizeSpecId(int dim) const { return localSizeSpecId[dim]; }
void setXfbMode() { xfbMode = true; }
bool getXfbMode() const { return xfbMode; }
void setMultiStream() { multiStream = true; }
bool isMultiStream() const { return multiStream; }
bool setOutputPrimitive(TLayoutGeometry p)
{
if (outputPrimitive != ElgNone)
return outputPrimitive == p;
outputPrimitive = p;
return true;
}
TLayoutGeometry getOutputPrimitive() const { return outputPrimitive; }
void setOriginUpperLeft() { originUpperLeft = true; }
bool getOriginUpperLeft() const { return originUpperLeft; }
void setPixelCenterInteger() { pixelCenterInteger = true; }
bool getPixelCenterInteger() const { return pixelCenterInteger; }
void setEarlyFragmentTests() { earlyFragmentTests = true; }
bool getEarlyFragmentTests() const { return earlyFragmentTests; }
void setPostDepthCoverage() { postDepthCoverage = true; }
bool getPostDepthCoverage() const { return postDepthCoverage; }
bool setDepth(TLayoutDepth d)
{
if (depthLayout != EldNone)
return depthLayout == d;
depthLayout = d;
return true;
}
TLayoutDepth getDepth() const { return depthLayout; }
void setDepthReplacing() { depthReplacing = true; }
bool isDepthReplacing() const { return depthReplacing; }
void setHlslFunctionality1() { hlslFunctionality1 = true; }
bool getHlslFunctionality1() const { return hlslFunctionality1; }
void addBlendEquation(TBlendEquationShift b) { blendEquations |= (1 << b); }
unsigned int getBlendEquations() const { return blendEquations; }
void addToCallGraph(TInfoSink&, const TString& caller, const TString& callee);
void merge(TInfoSink&, TIntermediate&);
void finalCheck(TInfoSink&, bool keepUncalled);
void addIoAccessed(const TString& name) { ioAccessed.insert(name); }
bool inIoAccessed(const TString& name) const { return ioAccessed.find(name) != ioAccessed.end(); }
int addUsedLocation(const TQualifier&, const TType&, bool& typeCollision);
int checkLocationRange(int set, const TIoRange& range, const TType&, bool& typeCollision);
int addUsedOffsets(int binding, int offset, int numOffsets);
bool addUsedConstantId(int id);
static int computeTypeLocationSize(const TType&, EShLanguage);
static int computeTypeUniformLocationSize(const TType&);
bool setXfbBufferStride(int buffer, unsigned stride)
{
if (xfbBuffers[buffer].stride != TQualifier::layoutXfbStrideEnd)
return xfbBuffers[buffer].stride == stride;
xfbBuffers[buffer].stride = stride;
return true;
}
unsigned getXfbStride(int buffer) const { return xfbBuffers[buffer].stride; }
int addXfbBufferOffset(const TType&);
unsigned int computeTypeXfbSize(const TType&, bool& containsDouble) const;
static int getBaseAlignmentScalar(const TType&, int& size);
static int getBaseAlignment(const TType&, int& size, int& stride, bool std140, bool rowMajor);
static bool improperStraddle(const TType& type, int size, int offset);
bool promote(TIntermOperator*);
#ifdef NV_EXTENSIONS
void setLayoutOverrideCoverage() { layoutOverrideCoverage = true; }
bool getLayoutOverrideCoverage() const { return layoutOverrideCoverage; }
void setGeoPassthroughEXT() { geoPassthroughEXT = true; }
bool getGeoPassthroughEXT() const { return geoPassthroughEXT; }
#endif
const char* addSemanticName(const TString& name)
{
return semanticNameSet.insert(name).first->c_str();
}
void setSourceFile(const char* file) { if (file != nullptr) sourceFile = file; }
const std::string& getSourceFile() const { return sourceFile; }
void addSourceText(const char* text) { sourceText = sourceText + text; }
const std::string& getSourceText() const { return sourceText; }
void addProcesses(const std::vector<std::string>& p) {
for (int i = 0; i < (int)p.size(); ++i)
processes.addProcess(p[i]);
}
void addProcess(const std::string& process) { processes.addProcess(process); }
void addProcessArgument(const std::string& arg) { processes.addArgument(arg); }
const std::vector<std::string>& getProcesses() const { return processes.getProcesses(); }
void setNeedsLegalization() { needToLegalize = true; }
bool needsLegalization() const { return needToLegalize; }
void setBinaryDoubleOutput() { binaryDoubleOutput = true; }
bool getBinaryDoubleOutput() { return binaryDoubleOutput; }
const char* const implicitThisName;
const char* const implicitCounterName;
protected:
TIntermSymbol* addSymbol(int Id, const TString&, const TType&, const TConstUnionArray&, TIntermTyped* subtree, const TSourceLoc&);
void error(TInfoSink& infoSink, const char*);
void warn(TInfoSink& infoSink, const char*);
void mergeBodies(TInfoSink&, TIntermSequence& globals, const TIntermSequence& unitGlobals);
void mergeLinkerObjects(TInfoSink&, TIntermSequence& linkerObjects, const TIntermSequence& unitLinkerObjects);
void mergeImplicitArraySizes(TType&, const TType&);
void mergeErrorCheck(TInfoSink&, const TIntermSymbol&, const TIntermSymbol&, bool crossStage);
void checkCallGraphCycles(TInfoSink&);
void checkCallGraphBodies(TInfoSink&, bool keepUncalled);
void inOutLocationCheck(TInfoSink&);
TIntermSequence& findLinkerObjects() const;
bool userOutputUsed() const;
bool isSpecializationOperation(const TIntermOperator&) const;
bool isNonuniformPropagating(TOperator) const;
bool promoteUnary(TIntermUnary&);
bool promoteBinary(TIntermBinary&);
void addSymbolLinkageNode(TIntermAggregate*& linkage, TSymbolTable&, const TString&);
bool promoteAggregate(TIntermAggregate&);
void pushSelector(TIntermSequence&, const TVectorSelector&, const TSourceLoc&);
void pushSelector(TIntermSequence&, const TMatrixSelector&, const TSourceLoc&);
bool specConstantPropagates(const TIntermTyped&, const TIntermTyped&);
void performTextureUpgradeAndSamplerRemovalTransformation(TIntermNode* root);
bool isConversionAllowed(TOperator op, TIntermTyped* node) const;
TIntermUnary* createConversion(TBasicType convertTo, TIntermTyped* node) const;
std::tuple<TBasicType, TBasicType> getConversionDestinatonType(TBasicType type0, TBasicType type1, TOperator op) const;
bool extensionRequested(const char *extension) const {return requestedExtensions.find(extension) != requestedExtensions.end();}
static const char* getResourceName(TResourceType);
const EShLanguage language; // stage, known at construction time
EShSource source; // source language, known a bit later
std::string entryPointName;
std::string entryPointMangledName;
EProfile profile; // source profile
int version; // source version
SpvVersion spvVersion;
TIntermNode* treeRoot;
std::set<std::string> requestedExtensions; // cumulation of all enabled or required extensions; not connected to what subset of the shader used them
TBuiltInResource resources;
int numEntryPoints;
int numErrors;
int numPushConstants;
bool recursive;
int invocations;
int vertices;
TLayoutGeometry inputPrimitive;
TLayoutGeometry outputPrimitive;
bool pixelCenterInteger;
bool originUpperLeft;
TVertexSpacing vertexSpacing;
TVertexOrder vertexOrder;
bool pointMode;
int localSize[3];
int localSizeSpecId[3];
bool earlyFragmentTests;
bool postDepthCoverage;
TLayoutDepth depthLayout;
bool depthReplacing;
bool hlslFunctionality1;
int blendEquations; // an 'or'ing of masks of shifts of TBlendEquationShift
bool xfbMode;
bool multiStream;
#ifdef NV_EXTENSIONS
bool layoutOverrideCoverage;
bool geoPassthroughEXT;
#endif
// Base shift values
std::array<unsigned int, EResCount> shiftBinding;
// Per-descriptor-set shift values
std::array<std::map<int, int>, EResCount> shiftBindingForSet;
std::vector<std::string> resourceSetBinding;
bool autoMapBindings;
bool autoMapLocations;
bool invertY;
bool flattenUniformArrays;
bool useUnknownFormat;
bool hlslOffsets;
bool useStorageBuffer;
bool hlslIoMapping;
typedef std::list<TCall> TGraph;
TGraph callGraph;
std::set<TString> ioAccessed; // set of names of statically read/written I/O that might need extra checking
std::vector<TIoRange> usedIo[4]; // sets of used locations, one for each of in, out, uniform, and buffers
std::vector<TOffsetRange> usedAtomics; // sets of bindings used by atomic counters
std::vector<TXfbBuffer> xfbBuffers; // all the data we need to track per xfb buffer
std::unordered_set<int> usedConstantId; // specialization constant ids used
std::set<TString> semanticNameSet;
EShTextureSamplerTransformMode textureSamplerTransformMode;
// source code of shader, useful as part of debug information
std::string sourceFile;
std::string sourceText;
// for OpModuleProcessed, or equivalent
TProcesses processes;
bool needToLegalize;
bool binaryDoubleOutput;
private:
void operator=(TIntermediate&); // prevent assignments
};
} // end namespace glslang
#endif // _LOCAL_INTERMEDIATE_INCLUDED_

View File

@ -0,0 +1,204 @@
//
// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
//
// Traverse a tree of constants to create a single folded constant.
// It should only be used when the whole tree is known to be constant.
//
#include "ParseHelper.h"
namespace glslang {
class TConstTraverser : public TIntermTraverser {
public:
TConstTraverser(const TConstUnionArray& cUnion, bool singleConstParam, TOperator constructType, const TType& t)
: unionArray(cUnion), type(t),
constructorType(constructType), singleConstantParam(singleConstParam), error(false), isMatrix(false),
matrixCols(0), matrixRows(0) { index = 0; tOp = EOpNull; }
virtual void visitConstantUnion(TIntermConstantUnion* node);
virtual bool visitAggregate(TVisit, TIntermAggregate* node);
int index;
TConstUnionArray unionArray;
TOperator tOp;
const TType& type;
TOperator constructorType;
bool singleConstantParam;
bool error;
int size; // size of the constructor ( 4 for vec4)
bool isMatrix;
int matrixCols;
int matrixRows;
protected:
TConstTraverser(TConstTraverser&);
TConstTraverser& operator=(TConstTraverser&);
};
bool TConstTraverser::visitAggregate(TVisit /* visit */, TIntermAggregate* node)
{
if (! node->isConstructor() && node->getOp() != EOpComma) {
error = true;
return false;
}
bool flag = node->getSequence().size() == 1 && node->getSequence()[0]->getAsTyped()->getAsConstantUnion();
if (flag) {
singleConstantParam = true;
constructorType = node->getOp();
size = node->getType().computeNumComponents();
if (node->getType().isMatrix()) {
isMatrix = true;
matrixCols = node->getType().getMatrixCols();
matrixRows = node->getType().getMatrixRows();
}
}
for (TIntermSequence::iterator p = node->getSequence().begin();
p != node->getSequence().end(); p++) {
if (node->getOp() == EOpComma)
index = 0;
(*p)->traverse(this);
}
if (flag)
{
singleConstantParam = false;
constructorType = EOpNull;
size = 0;
isMatrix = false;
matrixCols = 0;
matrixRows = 0;
}
return false;
}
void TConstTraverser::visitConstantUnion(TIntermConstantUnion* node)
{
TConstUnionArray leftUnionArray(unionArray);
int instanceSize = type.computeNumComponents();
if (index >= instanceSize)
return;
if (! singleConstantParam) {
int rightUnionSize = node->getType().computeNumComponents();
const TConstUnionArray& rightUnionArray = node->getConstArray();
for (int i = 0; i < rightUnionSize; i++) {
if (index >= instanceSize)
return;
leftUnionArray[index] = rightUnionArray[i];
index++;
}
} else {
int endIndex = index + size;
const TConstUnionArray& rightUnionArray = node->getConstArray();
if (! isMatrix) {
int count = 0;
int nodeComps = node->getType().computeNumComponents();
for (int i = index; i < endIndex; i++) {
if (i >= instanceSize)
return;
leftUnionArray[i] = rightUnionArray[count];
(index)++;
if (nodeComps > 1)
count++;
}
} else {
// constructing a matrix, but from what?
if (node->isMatrix()) {
// Matrix from a matrix; this has the outer matrix, node is the argument matrix.
// Traverse the outer, potentially bigger matrix, fill in missing pieces with the
// identity matrix.
for (int c = 0; c < matrixCols; ++c) {
for (int r = 0; r < matrixRows; ++r) {
int targetOffset = index + c * matrixRows + r;
if (r < node->getType().getMatrixRows() && c < node->getType().getMatrixCols()) {
int srcOffset = c * node->getType().getMatrixRows() + r;
leftUnionArray[targetOffset] = rightUnionArray[srcOffset];
} else if (r == c)
leftUnionArray[targetOffset].setDConst(1.0);
else
leftUnionArray[targetOffset].setDConst(0.0);
}
}
} else {
// matrix from vector
int count = 0;
const int startIndex = index;
int nodeComps = node->getType().computeNumComponents();
for (int i = startIndex; i < endIndex; i++) {
if (i >= instanceSize)
return;
if (i == startIndex || (i - startIndex) % (matrixRows + 1) == 0 )
leftUnionArray[i] = rightUnionArray[count];
else
leftUnionArray[i].setDConst(0.0);
index++;
if (nodeComps > 1)
count++;
}
}
}
}
}
bool TIntermediate::parseConstTree(TIntermNode* root, TConstUnionArray unionArray, TOperator constructorType, const TType& t, bool singleConstantParam)
{
if (root == 0)
return false;
TConstTraverser it(unionArray, singleConstantParam, constructorType, t);
root->traverse(&it);
if (it.error)
return true;
else
return false;
}
} // end namespace glslang

View File

@ -0,0 +1,156 @@
//
// Copyright (C) 2016 Google, Inc.
// Copyright (C) 2017 ARM Limited.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// This is implemented in Versions.cpp
#ifndef _PARSE_VERSIONS_INCLUDED_
#define _PARSE_VERSIONS_INCLUDED_
#include "../Public/ShaderLang.h"
#include "../Include/InfoSink.h"
#include "Scan.h"
#include <map>
namespace glslang {
//
// Base class for parse helpers.
// This just has version-related information and checking.
// This class should be sufficient for preprocessing.
//
class TParseVersions {
public:
TParseVersions(TIntermediate& interm, int version, EProfile profile,
const SpvVersion& spvVersion, EShLanguage language, TInfoSink& infoSink,
bool forwardCompatible, EShMessages messages)
: infoSink(infoSink), version(version), profile(profile), language(language),
spvVersion(spvVersion), forwardCompatible(forwardCompatible),
intermediate(interm), messages(messages), numErrors(0), currentScanner(0) { }
virtual ~TParseVersions() { }
virtual void initializeExtensionBehavior();
virtual void requireProfile(const TSourceLoc&, int queryProfiles, const char* featureDesc);
virtual void profileRequires(const TSourceLoc&, int queryProfiles, int minVersion, int numExtensions, const char* const extensions[], const char* featureDesc);
virtual void profileRequires(const TSourceLoc&, int queryProfiles, int minVersion, const char* const extension, const char* featureDesc);
virtual void requireStage(const TSourceLoc&, EShLanguageMask, const char* featureDesc);
virtual void requireStage(const TSourceLoc&, EShLanguage, const char* featureDesc);
virtual void checkDeprecated(const TSourceLoc&, int queryProfiles, int depVersion, const char* featureDesc);
virtual void requireNotRemoved(const TSourceLoc&, int queryProfiles, int removedVersion, const char* featureDesc);
virtual void unimplemented(const TSourceLoc&, const char* featureDesc);
virtual void requireExtensions(const TSourceLoc&, int numExtensions, const char* const extensions[], const char* featureDesc);
virtual void ppRequireExtensions(const TSourceLoc&, int numExtensions, const char* const extensions[], const char* featureDesc);
virtual TExtensionBehavior getExtensionBehavior(const char*);
virtual bool extensionTurnedOn(const char* const extension);
virtual bool extensionsTurnedOn(int numExtensions, const char* const extensions[]);
virtual void updateExtensionBehavior(int line, const char* const extension, const char* behavior);
virtual void fullIntegerCheck(const TSourceLoc&, const char* op);
virtual void doubleCheck(const TSourceLoc&, const char* op);
virtual void float16Check(const TSourceLoc&, const char* op, bool builtIn = false);
virtual void float16ScalarVectorCheck(const TSourceLoc&, const char* op, bool builtIn = false);
virtual bool float16Arithmetic();
virtual void requireFloat16Arithmetic(const TSourceLoc& loc, const char* op, const char* featureDesc);
virtual void int16ScalarVectorCheck(const TSourceLoc&, const char* op, bool builtIn = false);
virtual bool int16Arithmetic();
virtual void requireInt16Arithmetic(const TSourceLoc& loc, const char* op, const char* featureDesc);
virtual void int8ScalarVectorCheck(const TSourceLoc&, const char* op, bool builtIn = false);
virtual bool int8Arithmetic();
virtual void requireInt8Arithmetic(const TSourceLoc& loc, const char* op, const char* featureDesc);
#ifdef AMD_EXTENSIONS
virtual void float16OpaqueCheck(const TSourceLoc&, const char* op, bool builtIn = false);
#endif
virtual void int64Check(const TSourceLoc&, const char* op, bool builtIn = false);
virtual void explicitInt8Check(const TSourceLoc&, const char* op, bool builtIn = false);
virtual void explicitInt16Check(const TSourceLoc&, const char* op, bool builtIn = false);
virtual void explicitInt32Check(const TSourceLoc&, const char* op, bool builtIn = false);
virtual void explicitFloat32Check(const TSourceLoc&, const char* op, bool builtIn = false);
virtual void explicitFloat64Check(const TSourceLoc&, const char* op, bool builtIn = false);
virtual void spvRemoved(const TSourceLoc&, const char* op);
virtual void vulkanRemoved(const TSourceLoc&, const char* op);
virtual void requireVulkan(const TSourceLoc&, const char* op);
virtual void requireSpv(const TSourceLoc&, const char* op);
virtual bool checkExtensionsRequested(const TSourceLoc&, int numExtensions, const char* const extensions[], const char* featureDesc);
virtual void updateExtensionBehavior(const char* const extension, TExtensionBehavior);
virtual void C_DECL error(const TSourceLoc&, const char* szReason, const char* szToken,
const char* szExtraInfoFormat, ...) = 0;
virtual void C_DECL warn(const TSourceLoc&, const char* szReason, const char* szToken,
const char* szExtraInfoFormat, ...) = 0;
virtual void C_DECL ppError(const TSourceLoc&, const char* szReason, const char* szToken,
const char* szExtraInfoFormat, ...) = 0;
virtual void C_DECL ppWarn(const TSourceLoc&, const char* szReason, const char* szToken,
const char* szExtraInfoFormat, ...) = 0;
void addError() { ++numErrors; }
int getNumErrors() const { return numErrors; }
void setScanner(TInputScanner* scanner) { currentScanner = scanner; }
TInputScanner* getScanner() const { return currentScanner; }
const TSourceLoc& getCurrentLoc() const { return currentScanner->getSourceLoc(); }
void setCurrentLine(int line) { currentScanner->setLine(line); }
void setCurrentColumn(int col) { currentScanner->setColumn(col); }
void setCurrentSourceName(const char* name) { currentScanner->setFile(name); }
void setCurrentString(int string) { currentScanner->setString(string); }
void getPreamble(std::string&);
bool relaxedErrors() const { return (messages & EShMsgRelaxedErrors) != 0; }
bool suppressWarnings() const { return (messages & EShMsgSuppressWarnings) != 0; }
bool isReadingHLSL() const { return (messages & EShMsgReadHlsl) == EShMsgReadHlsl; }
bool hlslEnable16BitTypes() const { return (messages & EShMsgHlslEnable16BitTypes) != 0; }
TInfoSink& infoSink;
// compilation mode
int version; // version, updated by #version in the shader
EProfile profile; // the declared profile in the shader (core by default)
EShLanguage language; // really the stage
SpvVersion spvVersion;
bool forwardCompatible; // true if errors are to be given for use of deprecated features
TIntermediate& intermediate; // helper for making and hooking up pieces of the parse tree
protected:
TMap<TString, TExtensionBehavior> extensionBehavior; // for each extension string, what its current behavior is set to
EShMessages messages; // errors/warnings/rule-sets
int numErrors; // number of compile-time errors encountered
TInputScanner* currentScanner;
private:
explicit TParseVersions(const TParseVersions&);
TParseVersions& operator=(const TParseVersions&);
};
} // end namespace glslang
#endif // _PARSE_VERSIONS_INCLUDED_

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,181 @@
//
// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
// Copyright (C) 2013 LunarG, Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
/****************************************************************************\
Copyright (c) 2002, NVIDIA Corporation.
NVIDIA Corporation("NVIDIA") supplies this software to you in
consideration of your agreement to the following terms, and your use,
installation, modification or redistribution of this NVIDIA software
constitutes acceptance of these terms. If you do not agree with these
terms, please do not use, install, modify or redistribute this NVIDIA
software.
In consideration of your agreement to abide by the following terms, and
subject to these terms, NVIDIA grants you a personal, non-exclusive
license, under NVIDIA's copyrights in this original NVIDIA software (the
"NVIDIA Software"), to use, reproduce, modify and redistribute the
NVIDIA Software, with or without modifications, in source and/or binary
forms; provided that if you redistribute the NVIDIA Software, you must
retain the copyright notice of NVIDIA, this notice and the following
text and disclaimers in all such redistributions of the NVIDIA Software.
Neither the name, trademarks, service marks nor logos of NVIDIA
Corporation may be used to endorse or promote products derived from the
NVIDIA Software without specific prior written permission from NVIDIA.
Except as expressly stated in this notice, no other rights or licenses
express or implied, are granted by NVIDIA herein, including but not
limited to any patent rights that may be infringed by your derivative
works or by other works in which the NVIDIA Software may be
incorporated. No hardware is licensed hereunder.
THE NVIDIA SOFTWARE IS BEING PROVIDED ON AN "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED,
INCLUDING WITHOUT LIMITATION, WARRANTIES OR CONDITIONS OF TITLE,
NON-INFRINGEMENT, MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR
ITS USE AND OPERATION EITHER ALONE OR IN COMBINATION WITH OTHER
PRODUCTS.
IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT,
INCIDENTAL, EXEMPLARY, CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, LOST PROFITS; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) OR ARISING IN ANY WAY
OUT OF THE USE, REPRODUCTION, MODIFICATION AND/OR DISTRIBUTION OF THE
NVIDIA SOFTWARE, HOWEVER CAUSED AND WHETHER UNDER THEORY OF CONTRACT,
TORT (INCLUDING NEGLIGENCE), STRICT LIABILITY OR OTHERWISE, EVEN IF
NVIDIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\****************************************************************************/
#ifndef _CRT_SECURE_NO_WARNINGS
#define _CRT_SECURE_NO_WARNINGS
#endif
#include <cassert>
#include <cstdlib>
#include <cstring>
#include "PpContext.h"
#include "PpTokens.h"
namespace {
using namespace glslang;
const struct {
int val;
const char* str;
} tokens[] = {
{ PPAtomAddAssign, "+=" },
{ PPAtomSubAssign, "-=" },
{ PPAtomMulAssign, "*=" },
{ PPAtomDivAssign, "/=" },
{ PPAtomModAssign, "%=" },
{ PpAtomRight, ">>" },
{ PpAtomLeft, "<<" },
{ PpAtomAnd, "&&" },
{ PpAtomOr, "||" },
{ PpAtomXor, "^^" },
{ PpAtomRightAssign, ">>=" },
{ PpAtomLeftAssign, "<<=" },
{ PpAtomAndAssign, "&=" },
{ PpAtomOrAssign, "|=" },
{ PpAtomXorAssign, "^=" },
{ PpAtomEQ, "==" },
{ PpAtomNE, "!=" },
{ PpAtomGE, ">=" },
{ PpAtomLE, "<=" },
{ PpAtomDecrement, "--" },
{ PpAtomIncrement, "++" },
{ PpAtomColonColon, "::" },
{ PpAtomDefine, "define" },
{ PpAtomUndef, "undef" },
{ PpAtomIf, "if" },
{ PpAtomElif, "elif" },
{ PpAtomElse, "else" },
{ PpAtomEndif, "endif" },
{ PpAtomIfdef, "ifdef" },
{ PpAtomIfndef, "ifndef" },
{ PpAtomLine, "line" },
{ PpAtomPragma, "pragma" },
{ PpAtomError, "error" },
{ PpAtomVersion, "version" },
{ PpAtomCore, "core" },
{ PpAtomCompatibility, "compatibility" },
{ PpAtomEs, "es" },
{ PpAtomExtension, "extension" },
{ PpAtomLineMacro, "__LINE__" },
{ PpAtomFileMacro, "__FILE__" },
{ PpAtomVersionMacro, "__VERSION__" },
{ PpAtomInclude, "include" },
};
} // end anonymous namespace
namespace glslang {
//
// Initialize the atom table.
//
TStringAtomMap::TStringAtomMap()
{
badToken.assign("<bad token>");
// Add single character tokens to the atom table:
const char* s = "~!%^&*()-+=|,.<>/?;:[]{}#\\";
char t[2];
t[1] = '\0';
while (*s) {
t[0] = *s;
addAtomFixed(t, s[0]);
s++;
}
// Add multiple character scanner tokens :
for (size_t ii = 0; ii < sizeof(tokens)/sizeof(tokens[0]); ii++)
addAtomFixed(tokens[ii].str, tokens[ii].val);
nextAtom = PpAtomLast;
}
} // end namespace glslang

View File

@ -0,0 +1,118 @@
//
// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
// Copyright (C) 2013 LunarG, Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
/****************************************************************************\
Copyright (c) 2002, NVIDIA Corporation.
NVIDIA Corporation("NVIDIA") supplies this software to you in
consideration of your agreement to the following terms, and your use,
installation, modification or redistribution of this NVIDIA software
constitutes acceptance of these terms. If you do not agree with these
terms, please do not use, install, modify or redistribute this NVIDIA
software.
In consideration of your agreement to abide by the following terms, and
subject to these terms, NVIDIA grants you a personal, non-exclusive
license, under NVIDIA's copyrights in this original NVIDIA software (the
"NVIDIA Software"), to use, reproduce, modify and redistribute the
NVIDIA Software, with or without modifications, in source and/or binary
forms; provided that if you redistribute the NVIDIA Software, you must
retain the copyright notice of NVIDIA, this notice and the following
text and disclaimers in all such redistributions of the NVIDIA Software.
Neither the name, trademarks, service marks nor logos of NVIDIA
Corporation may be used to endorse or promote products derived from the
NVIDIA Software without specific prior written permission from NVIDIA.
Except as expressly stated in this notice, no other rights or licenses
express or implied, are granted by NVIDIA herein, including but not
limited to any patent rights that may be infringed by your derivative
works or by other works in which the NVIDIA Software may be
incorporated. No hardware is licensed hereunder.
THE NVIDIA SOFTWARE IS BEING PROVIDED ON AN "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED,
INCLUDING WITHOUT LIMITATION, WARRANTIES OR CONDITIONS OF TITLE,
NON-INFRINGEMENT, MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR
ITS USE AND OPERATION EITHER ALONE OR IN COMBINATION WITH OTHER
PRODUCTS.
IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT,
INCIDENTAL, EXEMPLARY, CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, LOST PROFITS; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) OR ARISING IN ANY WAY
OUT OF THE USE, REPRODUCTION, MODIFICATION AND/OR DISTRIBUTION OF THE
NVIDIA SOFTWARE, HOWEVER CAUSED AND WHETHER UNDER THEORY OF CONTRACT,
TORT (INCLUDING NEGLIGENCE), STRICT LIABILITY OR OTHERWISE, EVEN IF
NVIDIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\****************************************************************************/
#include <cstdlib>
#include <locale>
#include "PpContext.h"
namespace glslang {
TPpContext::TPpContext(TParseContextBase& pc, const std::string& rootFileName, TShader::Includer& inclr) :
preamble(0), strings(0), previous_token('\n'), parseContext(pc), includer(inclr), inComment(false),
rootFileName(rootFileName),
currentSourceFile(rootFileName)
{
ifdepth = 0;
for (elsetracker = 0; elsetracker < maxIfNesting; elsetracker++)
elseSeen[elsetracker] = false;
elsetracker = 0;
strtodStream.imbue(std::locale::classic());
}
TPpContext::~TPpContext()
{
delete [] preamble;
// free up the inputStack
while (! inputStack.empty())
popInput();
}
void TPpContext::setInput(TInputScanner& input, bool versionWillBeError)
{
assert(inputStack.size() == 0);
pushInput(new tStringInput(this, input));
errorOnVersion = versionWillBeError;
versionSeen = false;
}
} // end namespace glslang

View File

@ -0,0 +1,637 @@
//
// Copyright (C) 2013 LunarG, Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
/****************************************************************************\
Copyright (c) 2002, NVIDIA Corporation.
NVIDIA Corporation("NVIDIA") supplies this software to you in
consideration of your agreement to the following terms, and your use,
installation, modification or redistribution of this NVIDIA software
constitutes acceptance of these terms. If you do not agree with these
terms, please do not use, install, modify or redistribute this NVIDIA
software.
In consideration of your agreement to abide by the following terms, and
subject to these terms, NVIDIA grants you a personal, non-exclusive
license, under NVIDIA's copyrights in this original NVIDIA software (the
"NVIDIA Software"), to use, reproduce, modify and redistribute the
NVIDIA Software, with or without modifications, in source and/or binary
forms; provided that if you redistribute the NVIDIA Software, you must
retain the copyright notice of NVIDIA, this notice and the following
text and disclaimers in all such redistributions of the NVIDIA Software.
Neither the name, trademarks, service marks nor logos of NVIDIA
Corporation may be used to endorse or promote products derived from the
NVIDIA Software without specific prior written permission from NVIDIA.
Except as expressly stated in this notice, no other rights or licenses
express or implied, are granted by NVIDIA herein, including but not
limited to any patent rights that may be infringed by your derivative
works or by other works in which the NVIDIA Software may be
incorporated. No hardware is licensed hereunder.
THE NVIDIA SOFTWARE IS BEING PROVIDED ON AN "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED,
INCLUDING WITHOUT LIMITATION, WARRANTIES OR CONDITIONS OF TITLE,
NON-INFRINGEMENT, MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR
ITS USE AND OPERATION EITHER ALONE OR IN COMBINATION WITH OTHER
PRODUCTS.
IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT,
INCIDENTAL, EXEMPLARY, CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, LOST PROFITS; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) OR ARISING IN ANY WAY
OUT OF THE USE, REPRODUCTION, MODIFICATION AND/OR DISTRIBUTION OF THE
NVIDIA SOFTWARE, HOWEVER CAUSED AND WHETHER UNDER THEORY OF CONTRACT,
TORT (INCLUDING NEGLIGENCE), STRICT LIABILITY OR OTHERWISE, EVEN IF
NVIDIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\****************************************************************************/
#ifndef PPCONTEXT_H
#define PPCONTEXT_H
#include <stack>
#include <unordered_map>
#include <sstream>
#include "../ParseHelper.h"
/* windows only pragma */
#ifdef _MSC_VER
#pragma warning(disable : 4127)
#endif
namespace glslang {
class TPpToken {
public:
TPpToken() { clear(); }
void clear()
{
space = false;
i64val = 0;
loc.init();
name[0] = 0;
}
// Used for comparing macro definitions, so checks what is relevant for that.
bool operator==(const TPpToken& right)
{
return space == right.space &&
ival == right.ival && dval == right.dval && i64val == right.i64val &&
strncmp(name, right.name, MaxTokenLength) == 0;
}
bool operator!=(const TPpToken& right) { return ! operator==(right); }
TSourceLoc loc;
// True if a space (for white space or a removed comment) should also be
// recognized, in front of the token returned:
bool space;
// Numeric value of the token:
union {
int ival;
double dval;
long long i64val;
};
// Text string of the token:
char name[MaxTokenLength + 1];
};
class TStringAtomMap {
//
// Implementation is in PpAtom.cpp
//
// Maintain a bi-directional mapping between relevant preprocessor strings and
// "atoms" which a unique integers (small, contiguous, not hash-like) per string.
//
public:
TStringAtomMap();
// Map string -> atom.
// Return 0 if no existing string.
int getAtom(const char* s) const
{
auto it = atomMap.find(s);
return it == atomMap.end() ? 0 : it->second;
}
// Map a new or existing string -> atom, inventing a new atom if necessary.
int getAddAtom(const char* s)
{
int atom = getAtom(s);
if (atom == 0) {
atom = nextAtom++;
addAtomFixed(s, atom);
}
return atom;
}
// Map atom -> string.
const char* getString(int atom) const { return stringMap[atom]->c_str(); }
protected:
TStringAtomMap(TStringAtomMap&);
TStringAtomMap& operator=(TStringAtomMap&);
TUnorderedMap<TString, int> atomMap;
TVector<const TString*> stringMap; // these point into the TString in atomMap
int nextAtom;
// Bad source characters can lead to bad atoms, so gracefully handle those by
// pre-filling the table with them (to avoid if tests later).
TString badToken;
// Add bi-directional mappings:
// - string -> atom
// - atom -> string
void addAtomFixed(const char* s, int atom)
{
auto it = atomMap.insert(std::pair<TString, int>(s, atom)).first;
if (stringMap.size() < (size_t)atom + 1)
stringMap.resize(atom + 100, &badToken);
stringMap[atom] = &it->first;
}
};
class TInputScanner;
enum MacroExpandResult {
MacroExpandNotStarted, // macro not expanded, which might not be an error
MacroExpandError, // a clear error occurred while expanding, no expansion
MacroExpandStarted, // macro expansion process has started
MacroExpandUndef // macro is undefined and will be expanded
};
// This class is the result of turning a huge pile of C code communicating through globals
// into a class. This was done to allowing instancing to attain thread safety.
// Don't expect too much in terms of OO design.
class TPpContext {
public:
TPpContext(TParseContextBase&, const std::string& rootFileName, TShader::Includer&);
virtual ~TPpContext();
void setPreamble(const char* preamble, size_t length);
int tokenize(TPpToken& ppToken);
int tokenPaste(int token, TPpToken&);
class tInput {
public:
tInput(TPpContext* p) : done(false), pp(p) { }
virtual ~tInput() { }
virtual int scan(TPpToken*) = 0;
virtual int getch() = 0;
virtual void ungetch() = 0;
virtual bool peekPasting() { return false; } // true when about to see ##
virtual bool endOfReplacementList() { return false; } // true when at the end of a macro replacement list (RHS of #define)
virtual bool isMacroInput() { return false; }
// Will be called when we start reading tokens from this instance
virtual void notifyActivated() {}
// Will be called when we do not read tokens from this instance anymore
virtual void notifyDeleted() {}
protected:
bool done;
TPpContext* pp;
};
void setInput(TInputScanner& input, bool versionWillBeError);
void pushInput(tInput* in)
{
inputStack.push_back(in);
in->notifyActivated();
}
void popInput()
{
inputStack.back()->notifyDeleted();
delete inputStack.back();
inputStack.pop_back();
}
//
// From PpTokens.cpp
//
class TokenStream {
public:
TokenStream() : current(0) { }
void putToken(int token, TPpToken* ppToken);
int getToken(TParseContextBase&, TPpToken*);
bool atEnd() { return current >= data.size(); }
bool peekTokenizedPasting(bool lastTokenPastes);
bool peekUntokenizedPasting();
void reset() { current = 0; }
protected:
void putSubtoken(char);
int getSubtoken();
void ungetSubtoken();
TVector<unsigned char> data;
size_t current;
};
//
// From Pp.cpp
//
struct MacroSymbol {
MacroSymbol() : emptyArgs(0), busy(0), undef(0) { }
TVector<int> args;
TokenStream body;
unsigned emptyArgs : 1;
unsigned busy : 1;
unsigned undef : 1;
};
typedef TMap<int, MacroSymbol> TSymbolMap;
TSymbolMap macroDefs; // map atoms to macro definitions
MacroSymbol* lookupMacroDef(int atom)
{
auto existingMacroIt = macroDefs.find(atom);
return (existingMacroIt == macroDefs.end()) ? nullptr : &(existingMacroIt->second);
}
void addMacroDef(int atom, MacroSymbol& macroDef) { macroDefs[atom] = macroDef; }
protected:
TPpContext(TPpContext&);
TPpContext& operator=(TPpContext&);
TStringAtomMap atomStrings;
char* preamble; // string to parse, all before line 1 of string 0, it is 0 if no preamble
int preambleLength;
char** strings; // official strings of shader, starting a string 0 line 1
size_t* lengths;
int numStrings; // how many official strings there are
int currentString; // which string we're currently parsing (-1 for preamble)
// Scanner data:
int previous_token;
TParseContextBase& parseContext;
// Get the next token from *stack* of input sources, popping input sources
// that are out of tokens, down until an input source is found that has a token.
// Return EndOfInput when there are no more tokens to be found by doing this.
int scanToken(TPpToken* ppToken)
{
int token = EndOfInput;
while (! inputStack.empty()) {
token = inputStack.back()->scan(ppToken);
if (token != EndOfInput || inputStack.empty())
break;
popInput();
}
return token;
}
int getChar() { return inputStack.back()->getch(); }
void ungetChar() { inputStack.back()->ungetch(); }
bool peekPasting() { return !inputStack.empty() && inputStack.back()->peekPasting(); }
bool endOfReplacementList() { return inputStack.empty() || inputStack.back()->endOfReplacementList(); }
bool isMacroInput() { return inputStack.size() > 0 && inputStack.back()->isMacroInput(); }
static const int maxIfNesting = 65;
int ifdepth; // current #if-#else-#endif nesting in the cpp.c file (pre-processor)
bool elseSeen[maxIfNesting]; // Keep a track of whether an else has been seen at a particular depth
int elsetracker; // #if-#else and #endif constructs...Counter.
class tMacroInput : public tInput {
public:
tMacroInput(TPpContext* pp) : tInput(pp), prepaste(false), postpaste(false) { }
virtual ~tMacroInput()
{
for (size_t i = 0; i < args.size(); ++i)
delete args[i];
for (size_t i = 0; i < expandedArgs.size(); ++i)
delete expandedArgs[i];
}
virtual int scan(TPpToken*) override;
virtual int getch() override { assert(0); return EndOfInput; }
virtual void ungetch() override { assert(0); }
bool peekPasting() override { return prepaste; }
bool endOfReplacementList() override { return mac->body.atEnd(); }
bool isMacroInput() override { return true; }
MacroSymbol *mac;
TVector<TokenStream*> args;
TVector<TokenStream*> expandedArgs;
protected:
bool prepaste; // true if we are just before ##
bool postpaste; // true if we are right after ##
};
class tMarkerInput : public tInput {
public:
tMarkerInput(TPpContext* pp) : tInput(pp) { }
virtual int scan(TPpToken*) override
{
if (done)
return EndOfInput;
done = true;
return marker;
}
virtual int getch() override { assert(0); return EndOfInput; }
virtual void ungetch() override { assert(0); }
static const int marker = -3;
};
class tZeroInput : public tInput {
public:
tZeroInput(TPpContext* pp) : tInput(pp) { }
virtual int scan(TPpToken*) override;
virtual int getch() override { assert(0); return EndOfInput; }
virtual void ungetch() override { assert(0); }
};
std::vector<tInput*> inputStack;
bool errorOnVersion;
bool versionSeen;
//
// from Pp.cpp
//
// Used to obtain #include content.
TShader::Includer& includer;
int CPPdefine(TPpToken * ppToken);
int CPPundef(TPpToken * ppToken);
int CPPelse(int matchelse, TPpToken * ppToken);
int extraTokenCheck(int atom, TPpToken* ppToken, int token);
int eval(int token, int precedence, bool shortCircuit, int& res, bool& err, TPpToken * ppToken);
int evalToToken(int token, bool shortCircuit, int& res, bool& err, TPpToken * ppToken);
int CPPif (TPpToken * ppToken);
int CPPifdef(int defined, TPpToken * ppToken);
int CPPinclude(TPpToken * ppToken);
int CPPline(TPpToken * ppToken);
int CPPerror(TPpToken * ppToken);
int CPPpragma(TPpToken * ppToken);
int CPPversion(TPpToken * ppToken);
int CPPextension(TPpToken * ppToken);
int readCPPline(TPpToken * ppToken);
int scanHeaderName(TPpToken* ppToken, char delimit);
TokenStream* PrescanMacroArg(TokenStream&, TPpToken*, bool newLineOkay);
MacroExpandResult MacroExpand(TPpToken* ppToken, bool expandUndef, bool newLineOkay);
//
// From PpTokens.cpp
//
void pushTokenStreamInput(TokenStream&, bool pasting = false);
void UngetToken(int token, TPpToken*);
class tTokenInput : public tInput {
public:
tTokenInput(TPpContext* pp, TokenStream* t, bool prepasting) : tInput(pp), tokens(t), lastTokenPastes(prepasting) { }
virtual int scan(TPpToken *ppToken) override { return tokens->getToken(pp->parseContext, ppToken); }
virtual int getch() override { assert(0); return EndOfInput; }
virtual void ungetch() override { assert(0); }
virtual bool peekPasting() override { return tokens->peekTokenizedPasting(lastTokenPastes); }
protected:
TokenStream* tokens;
bool lastTokenPastes; // true if the last token in the input is to be pasted, rather than consumed as a token
};
class tUngotTokenInput : public tInput {
public:
tUngotTokenInput(TPpContext* pp, int t, TPpToken* p) : tInput(pp), token(t), lval(*p) { }
virtual int scan(TPpToken *) override;
virtual int getch() override { assert(0); return EndOfInput; }
virtual void ungetch() override { assert(0); }
protected:
int token;
TPpToken lval;
};
//
// From PpScanner.cpp
//
class tStringInput : public tInput {
public:
tStringInput(TPpContext* pp, TInputScanner& i) : tInput(pp), input(&i) { }
virtual int scan(TPpToken*) override;
// Scanner used to get source stream characters.
// - Escaped newlines are handled here, invisibly to the caller.
// - All forms of newline are handled, and turned into just a '\n'.
int getch() override
{
int ch = input->get();
if (ch == '\\') {
// Move past escaped newlines, as many as sequentially exist
do {
if (input->peek() == '\r' || input->peek() == '\n') {
bool allowed = pp->parseContext.lineContinuationCheck(input->getSourceLoc(), pp->inComment);
if (! allowed && pp->inComment)
return '\\';
// escape one newline now
ch = input->get();
int nextch = input->get();
if (ch == '\r' && nextch == '\n')
ch = input->get();
else
ch = nextch;
} else
return '\\';
} while (ch == '\\');
}
// handle any non-escaped newline
if (ch == '\r' || ch == '\n') {
if (ch == '\r' && input->peek() == '\n')
input->get();
return '\n';
}
return ch;
}
// Scanner used to backup the source stream characters. Newlines are
// handled here, invisibly to the caller, meaning have to undo exactly
// what getch() above does (e.g., don't leave things in the middle of a
// sequence of escaped newlines).
void ungetch() override
{
input->unget();
do {
int ch = input->peek();
if (ch == '\r' || ch == '\n') {
if (ch == '\n') {
// correct for two-character newline
input->unget();
if (input->peek() != '\r')
input->get();
}
// now in front of a complete newline, move past an escape character
input->unget();
if (input->peek() == '\\')
input->unget();
else {
input->get();
break;
}
} else
break;
} while (true);
}
protected:
TInputScanner* input;
};
// Holds a reference to included file data, as well as a
// prologue and an epilogue string. This can be scanned using the tInput
// interface and acts as a single source string.
class TokenizableIncludeFile : public tInput {
public:
// Copies prologue and epilogue. The includedFile must remain valid
// until this TokenizableIncludeFile is no longer used.
TokenizableIncludeFile(const TSourceLoc& startLoc,
const std::string& prologue,
TShader::Includer::IncludeResult* includedFile,
const std::string& epilogue,
TPpContext* pp)
: tInput(pp),
prologue_(prologue),
epilogue_(epilogue),
includedFile_(includedFile),
scanner(3, strings, lengths, names, 0, 0, true),
prevScanner(nullptr),
stringInput(pp, scanner)
{
strings[0] = prologue_.data();
strings[1] = includedFile_->headerData;
strings[2] = epilogue_.data();
lengths[0] = prologue_.size();
lengths[1] = includedFile_->headerLength;
lengths[2] = epilogue_.size();
scanner.setLine(startLoc.line);
scanner.setString(startLoc.string);
scanner.setFile(startLoc.name, 0);
scanner.setFile(startLoc.name, 1);
scanner.setFile(startLoc.name, 2);
}
// tInput methods:
int scan(TPpToken* t) override { return stringInput.scan(t); }
int getch() override { return stringInput.getch(); }
void ungetch() override { stringInput.ungetch(); }
void notifyActivated() override
{
prevScanner = pp->parseContext.getScanner();
pp->parseContext.setScanner(&scanner);
pp->push_include(includedFile_);
}
void notifyDeleted() override
{
pp->parseContext.setScanner(prevScanner);
pp->pop_include();
}
private:
TokenizableIncludeFile& operator=(const TokenizableIncludeFile&);
// Stores the prologue for this string.
const std::string prologue_;
// Stores the epilogue for this string.
const std::string epilogue_;
// Points to the IncludeResult that this TokenizableIncludeFile represents.
TShader::Includer::IncludeResult* includedFile_;
// Will point to prologue_, includedFile_->headerData and epilogue_
// This is passed to scanner constructor.
// These do not own the storage and it must remain valid until this
// object has been destroyed.
const char* strings[3];
// Length of str_, passed to scanner constructor.
size_t lengths[3];
// String names
const char* names[3];
// Scans over str_.
TInputScanner scanner;
// The previous effective scanner before the scanner in this instance
// has been activated.
TInputScanner* prevScanner;
// Delegate object implementing the tInput interface.
tStringInput stringInput;
};
int ScanFromString(char* s);
void missingEndifCheck();
int lFloatConst(int len, int ch, TPpToken* ppToken);
int characterLiteral(TPpToken* ppToken);
void push_include(TShader::Includer::IncludeResult* result)
{
currentSourceFile = result->headerName;
includeStack.push(result);
}
void pop_include()
{
TShader::Includer::IncludeResult* include = includeStack.top();
includeStack.pop();
includer.releaseInclude(include);
if (includeStack.empty()) {
currentSourceFile = rootFileName;
} else {
currentSourceFile = includeStack.top()->headerName;
}
}
bool inComment;
std::string rootFileName;
std::stack<TShader::Includer::IncludeResult*> includeStack;
std::string currentSourceFile;
std::istringstream strtodStream;
};
} // end namespace glslang
#endif // PPCONTEXT_H

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,338 @@
//
// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
// Copyright (C) 2013 LunarG, Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
/****************************************************************************\
Copyright (c) 2002, NVIDIA Corporation.
NVIDIA Corporation("NVIDIA") supplies this software to you in
consideration of your agreement to the following terms, and your use,
installation, modification or redistribution of this NVIDIA software
constitutes acceptance of these terms. If you do not agree with these
terms, please do not use, install, modify or redistribute this NVIDIA
software.
In consideration of your agreement to abide by the following terms, and
subject to these terms, NVIDIA grants you a personal, non-exclusive
license, under NVIDIA's copyrights in this original NVIDIA software (the
"NVIDIA Software"), to use, reproduce, modify and redistribute the
NVIDIA Software, with or without modifications, in source and/or binary
forms; provided that if you redistribute the NVIDIA Software, you must
retain the copyright notice of NVIDIA, this notice and the following
text and disclaimers in all such redistributions of the NVIDIA Software.
Neither the name, trademarks, service marks nor logos of NVIDIA
Corporation may be used to endorse or promote products derived from the
NVIDIA Software without specific prior written permission from NVIDIA.
Except as expressly stated in this notice, no other rights or licenses
express or implied, are granted by NVIDIA herein, including but not
limited to any patent rights that may be infringed by your derivative
works or by other works in which the NVIDIA Software may be
incorporated. No hardware is licensed hereunder.
THE NVIDIA SOFTWARE IS BEING PROVIDED ON AN "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED,
INCLUDING WITHOUT LIMITATION, WARRANTIES OR CONDITIONS OF TITLE,
NON-INFRINGEMENT, MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR
ITS USE AND OPERATION EITHER ALONE OR IN COMBINATION WITH OTHER
PRODUCTS.
IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT,
INCIDENTAL, EXEMPLARY, CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, LOST PROFITS; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) OR ARISING IN ANY WAY
OUT OF THE USE, REPRODUCTION, MODIFICATION AND/OR DISTRIBUTION OF THE
NVIDIA SOFTWARE, HOWEVER CAUSED AND WHETHER UNDER THEORY OF CONTRACT,
TORT (INCLUDING NEGLIGENCE), STRICT LIABILITY OR OTHERWISE, EVEN IF
NVIDIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\****************************************************************************/
//
// For recording and playing back the stream of tokens in a macro definition.
//
#ifndef _CRT_SECURE_NO_WARNINGS
#define _CRT_SECURE_NO_WARNINGS
#endif
#if (defined(_MSC_VER) && _MSC_VER < 1900 /*vs2015*/)
#define snprintf sprintf_s
#endif
#include <cassert>
#include <cstdlib>
#include <cstring>
#include <cctype>
#include "PpContext.h"
#include "PpTokens.h"
namespace glslang {
namespace {
// When recording (and playing back) should the backing name string
// be saved (restored)?
bool SaveName(int atom)
{
switch (atom) {
case PpAtomIdentifier:
case PpAtomConstString:
case PpAtomConstInt:
case PpAtomConstUint:
case PpAtomConstInt64:
case PpAtomConstUint64:
#ifdef AMD_EXTENSIONS
case PpAtomConstInt16:
case PpAtomConstUint16:
#endif
case PpAtomConstFloat:
case PpAtomConstDouble:
case PpAtomConstFloat16:
return true;
default:
return false;
}
}
// When recording (and playing back) should the numeric value
// be saved (restored)?
bool SaveValue(int atom)
{
switch (atom) {
case PpAtomConstInt:
case PpAtomConstUint:
case PpAtomConstInt64:
case PpAtomConstUint64:
#ifdef AMD_EXTENSIONS
case PpAtomConstInt16:
case PpAtomConstUint16:
#endif
case PpAtomConstFloat:
case PpAtomConstDouble:
case PpAtomConstFloat16:
return true;
default:
return false;
}
}
}
// push onto back of stream
void TPpContext::TokenStream::putSubtoken(char subtoken)
{
data.push_back(static_cast<unsigned char>(subtoken));
}
// get the next token in stream
int TPpContext::TokenStream::getSubtoken()
{
if (current < data.size())
return data[current++];
else
return EndOfInput;
}
// back up one position in the stream
void TPpContext::TokenStream::ungetSubtoken()
{
if (current > 0)
--current;
}
// Add a complete token (including backing string) to the end of a list
// for later playback.
void TPpContext::TokenStream::putToken(int atom, TPpToken* ppToken)
{
// save the atom
assert((atom & ~0xff) == 0);
putSubtoken(static_cast<char>(atom));
// save the backing name string
if (SaveName(atom)) {
const char* s = ppToken->name;
while (*s)
putSubtoken(*s++);
putSubtoken(0);
}
// save the numeric value
if (SaveValue(atom)) {
const char* n = reinterpret_cast<const char*>(&ppToken->i64val);
for (size_t i = 0; i < sizeof(ppToken->i64val); ++i)
putSubtoken(*n++);
}
}
// Read the next token from a token stream.
// (Not the source stream, but a stream used to hold a tokenized macro).
int TPpContext::TokenStream::getToken(TParseContextBase& parseContext, TPpToken *ppToken)
{
// get the atom
int atom = getSubtoken();
if (atom == EndOfInput)
return atom;
// init the token
ppToken->clear();
ppToken->loc = parseContext.getCurrentLoc();
// get the backing name string
if (SaveName(atom)) {
int ch = getSubtoken();
int len = 0;
while (ch != 0 && ch != EndOfInput) {
if (len < MaxTokenLength) {
ppToken->name[len] = (char)ch;
len++;
ch = getSubtoken();
} else {
parseContext.error(ppToken->loc, "token too long", "", "");
break;
}
}
ppToken->name[len] = 0;
}
// Check for ##, unless the current # is the last character
if (atom == '#') {
if (current < data.size()) {
if (getSubtoken() == '#') {
parseContext.requireProfile(ppToken->loc, ~EEsProfile, "token pasting (##)");
parseContext.profileRequires(ppToken->loc, ~EEsProfile, 130, 0, "token pasting (##)");
atom = PpAtomPaste;
} else
ungetSubtoken();
}
}
// get the numeric value
if (SaveValue(atom)) {
char* n = reinterpret_cast<char*>(&ppToken->i64val);
for (size_t i = 0; i < sizeof(ppToken->i64val); ++i)
*n++ = (char)getSubtoken();
}
return atom;
}
// We are pasting if
// 1. we are preceding a pasting operator within this stream
// or
// 2. the entire macro is preceding a pasting operator (lastTokenPastes)
// and we are also on the last token
bool TPpContext::TokenStream::peekTokenizedPasting(bool lastTokenPastes)
{
// 1. preceding ##?
size_t savePos = current;
int subtoken;
// skip white space
do {
subtoken = getSubtoken();
} while (subtoken == ' ');
current = savePos;
if (subtoken == PpAtomPaste)
return true;
// 2. last token and we've been told after this there will be a ##
if (! lastTokenPastes)
return false;
// Getting here means the last token will be pasted, after this
// Are we at the last non-whitespace token?
savePos = current;
bool moreTokens = false;
do {
subtoken = getSubtoken();
if (subtoken == EndOfInput)
break;
if (subtoken != ' ') {
moreTokens = true;
break;
}
} while (true);
current = savePos;
return !moreTokens;
}
// See if the next non-white-space tokens are two consecutive #
bool TPpContext::TokenStream::peekUntokenizedPasting()
{
// don't return early, have to restore this
size_t savePos = current;
// skip white-space
int subtoken;
do {
subtoken = getSubtoken();
} while (subtoken == ' ');
// check for ##
bool pasting = false;
if (subtoken == '#') {
subtoken = getSubtoken();
if (subtoken == '#')
pasting = true;
}
current = savePos;
return pasting;
}
void TPpContext::pushTokenStreamInput(TokenStream& ts, bool prepasting)
{
pushInput(new tTokenInput(this, &ts, prepasting));
ts.reset();
}
int TPpContext::tUngotTokenInput::scan(TPpToken* ppToken)
{
if (done)
return EndOfInput;
int ret = token;
*ppToken = lval;
done = true;
return ret;
}
void TPpContext::UngetToken(int token, TPpToken* ppToken)
{
pushInput(new tUngotTokenInput(this, token, ppToken));
}
} // end namespace glslang

View File

@ -0,0 +1,179 @@
//
// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
/****************************************************************************\
Copyright (c) 2002, NVIDIA Corporation.
NVIDIA Corporation("NVIDIA") supplies this software to you in
consideration of your agreement to the following terms, and your use,
installation, modification or redistribution of this NVIDIA software
constitutes acceptance of these terms. If you do not agree with these
terms, please do not use, install, modify or redistribute this NVIDIA
software.
In consideration of your agreement to abide by the following terms, and
subject to these terms, NVIDIA grants you a personal, non-exclusive
license, under NVIDIA's copyrights in this original NVIDIA software (the
"NVIDIA Software"), to use, reproduce, modify and redistribute the
NVIDIA Software, with or without modifications, in source and/or binary
forms; provided that if you redistribute the NVIDIA Software, you must
retain the copyright notice of NVIDIA, this notice and the following
text and disclaimers in all such redistributions of the NVIDIA Software.
Neither the name, trademarks, service marks nor logos of NVIDIA
Corporation may be used to endorse or promote products derived from the
NVIDIA Software without specific prior written permission from NVIDIA.
Except as expressly stated in this notice, no other rights or licenses
express or implied, are granted by NVIDIA herein, including but not
limited to any patent rights that may be infringed by your derivative
works or by other works in which the NVIDIA Software may be
incorporated. No hardware is licensed hereunder.
THE NVIDIA SOFTWARE IS BEING PROVIDED ON AN "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED,
INCLUDING WITHOUT LIMITATION, WARRANTIES OR CONDITIONS OF TITLE,
NON-INFRINGEMENT, MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR
ITS USE AND OPERATION EITHER ALONE OR IN COMBINATION WITH OTHER
PRODUCTS.
IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT,
INCIDENTAL, EXEMPLARY, CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, LOST PROFITS; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) OR ARISING IN ANY WAY
OUT OF THE USE, REPRODUCTION, MODIFICATION AND/OR DISTRIBUTION OF THE
NVIDIA SOFTWARE, HOWEVER CAUSED AND WHETHER UNDER THEORY OF CONTRACT,
TORT (INCLUDING NEGLIGENCE), STRICT LIABILITY OR OTHERWISE, EVEN IF
NVIDIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\****************************************************************************/
#ifndef PARSER_H
#define PARSER_H
namespace glslang {
// Multi-character tokens
enum EFixedAtoms {
// single character tokens get their own char value as their token; start here for multi-character tokens
PpAtomMaxSingle = 127,
// replace bad character tokens with this, to avoid accidental aliasing with the below
PpAtomBadToken,
// Operators
PPAtomAddAssign,
PPAtomSubAssign,
PPAtomMulAssign,
PPAtomDivAssign,
PPAtomModAssign,
PpAtomRight,
PpAtomLeft,
PpAtomRightAssign,
PpAtomLeftAssign,
PpAtomAndAssign,
PpAtomOrAssign,
PpAtomXorAssign,
PpAtomAnd,
PpAtomOr,
PpAtomXor,
PpAtomEQ,
PpAtomNE,
PpAtomGE,
PpAtomLE,
PpAtomDecrement,
PpAtomIncrement,
PpAtomColonColon,
PpAtomPaste,
// Constants
PpAtomConstInt,
PpAtomConstUint,
PpAtomConstInt64,
PpAtomConstUint64,
PpAtomConstInt16,
PpAtomConstUint16,
PpAtomConstFloat,
PpAtomConstDouble,
PpAtomConstFloat16,
PpAtomConstString,
// Identifiers
PpAtomIdentifier,
// preprocessor "keywords"
PpAtomDefine,
PpAtomUndef,
PpAtomIf,
PpAtomIfdef,
PpAtomIfndef,
PpAtomElse,
PpAtomElif,
PpAtomEndif,
PpAtomLine,
PpAtomPragma,
PpAtomError,
// #version ...
PpAtomVersion,
PpAtomCore,
PpAtomCompatibility,
PpAtomEs,
// #extension
PpAtomExtension,
// __LINE__, __FILE__, __VERSION__
PpAtomLineMacro,
PpAtomFileMacro,
PpAtomVersionMacro,
// #include
PpAtomInclude,
PpAtomLast,
};
} // end namespace glslang
#endif /* not PARSER_H */

View File

@ -0,0 +1,866 @@
//
// Copyright (C) 2015-2016 Google, Inc.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Visit the nodes in the glslang intermediate tree representation to
// propagate the 'noContraction' qualifier.
//
#include "propagateNoContraction.h"
#include <cstdlib>
#include <string>
#include <tuple>
#include <unordered_map>
#include <unordered_set>
#include "localintermediate.h"
namespace {
// Use a string to hold the access chain information, as in most cases the
// access chain is short and may contain only one element, which is the symbol
// ID.
// Example: struct {float a; float b;} s;
// Object s.a will be represented with: <symbol ID of s>/0
// Object s.b will be represented with: <symbol ID of s>/1
// Object s will be represented with: <symbol ID of s>
// For members of vector, matrix and arrays, they will be represented with the
// same symbol ID of their container symbol objects. This is because their
// preciseness is always the same as their container symbol objects.
typedef std::string ObjectAccessChain;
// The delimiter used in the ObjectAccessChain string to separate symbol ID and
// different level of struct indices.
const char ObjectAccesschainDelimiter = '/';
// Mapping from Symbol IDs of symbol nodes, to their defining operation
// nodes.
typedef std::unordered_multimap<ObjectAccessChain, glslang::TIntermOperator*> NodeMapping;
// Mapping from object nodes to their access chain info string.
typedef std::unordered_map<glslang::TIntermTyped*, ObjectAccessChain> AccessChainMapping;
// Set of object IDs.
typedef std::unordered_set<ObjectAccessChain> ObjectAccesschainSet;
// Set of return branch nodes.
typedef std::unordered_set<glslang::TIntermBranch*> ReturnBranchNodeSet;
// A helper function to tell whether a node is 'noContraction'. Returns true if
// the node has 'noContraction' qualifier, otherwise false.
bool isPreciseObjectNode(glslang::TIntermTyped* node)
{
return node->getType().getQualifier().noContraction;
}
// Returns true if the opcode is a dereferencing one.
bool isDereferenceOperation(glslang::TOperator op)
{
switch (op) {
case glslang::EOpIndexDirect:
case glslang::EOpIndexDirectStruct:
case glslang::EOpIndexIndirect:
case glslang::EOpVectorSwizzle:
case glslang::EOpMatrixSwizzle:
return true;
default:
return false;
}
}
// Returns true if the opcode leads to an assignment operation.
bool isAssignOperation(glslang::TOperator op)
{
switch (op) {
case glslang::EOpAssign:
case glslang::EOpAddAssign:
case glslang::EOpSubAssign:
case glslang::EOpMulAssign:
case glslang::EOpVectorTimesMatrixAssign:
case glslang::EOpVectorTimesScalarAssign:
case glslang::EOpMatrixTimesScalarAssign:
case glslang::EOpMatrixTimesMatrixAssign:
case glslang::EOpDivAssign:
case glslang::EOpModAssign:
case glslang::EOpAndAssign:
case glslang::EOpLeftShiftAssign:
case glslang::EOpRightShiftAssign:
case glslang::EOpInclusiveOrAssign:
case glslang::EOpExclusiveOrAssign:
case glslang::EOpPostIncrement:
case glslang::EOpPostDecrement:
case glslang::EOpPreIncrement:
case glslang::EOpPreDecrement:
return true;
default:
return false;
}
}
// A helper function to get the unsigned int from a given constant union node.
// Note the node should only hold a uint scalar.
unsigned getStructIndexFromConstantUnion(glslang::TIntermTyped* node)
{
assert(node->getAsConstantUnion() && node->getAsConstantUnion()->isScalar());
unsigned struct_dereference_index = node->getAsConstantUnion()->getConstArray()[0].getUConst();
return struct_dereference_index;
}
// A helper function to generate symbol_label.
ObjectAccessChain generateSymbolLabel(glslang::TIntermSymbol* node)
{
ObjectAccessChain symbol_id =
std::to_string(node->getId()) + "(" + node->getName().c_str() + ")";
return symbol_id;
}
// Returns true if the operation is an arithmetic operation and valid for
// the 'NoContraction' decoration.
bool isArithmeticOperation(glslang::TOperator op)
{
switch (op) {
case glslang::EOpAddAssign:
case glslang::EOpSubAssign:
case glslang::EOpMulAssign:
case glslang::EOpVectorTimesMatrixAssign:
case glslang::EOpVectorTimesScalarAssign:
case glslang::EOpMatrixTimesScalarAssign:
case glslang::EOpMatrixTimesMatrixAssign:
case glslang::EOpDivAssign:
case glslang::EOpModAssign:
case glslang::EOpNegative:
case glslang::EOpAdd:
case glslang::EOpSub:
case glslang::EOpMul:
case glslang::EOpDiv:
case glslang::EOpMod:
case glslang::EOpVectorTimesScalar:
case glslang::EOpVectorTimesMatrix:
case glslang::EOpMatrixTimesVector:
case glslang::EOpMatrixTimesScalar:
case glslang::EOpMatrixTimesMatrix:
case glslang::EOpDot:
case glslang::EOpPostIncrement:
case glslang::EOpPostDecrement:
case glslang::EOpPreIncrement:
case glslang::EOpPreDecrement:
return true;
default:
return false;
}
}
// A helper class to help manage the populating_initial_no_contraction_ flag.
template <typename T> class StateSettingGuard {
public:
StateSettingGuard(T* state_ptr, T new_state_value)
: state_ptr_(state_ptr), previous_state_(*state_ptr)
{
*state_ptr = new_state_value;
}
StateSettingGuard(T* state_ptr) : state_ptr_(state_ptr), previous_state_(*state_ptr) {}
void setState(T new_state_value) { *state_ptr_ = new_state_value; }
~StateSettingGuard() { *state_ptr_ = previous_state_; }
private:
T* state_ptr_;
T previous_state_;
};
// A helper function to get the front element from a given ObjectAccessChain
ObjectAccessChain getFrontElement(const ObjectAccessChain& chain)
{
size_t pos_delimiter = chain.find(ObjectAccesschainDelimiter);
return pos_delimiter == std::string::npos ? chain : chain.substr(0, pos_delimiter);
}
// A helper function to get the access chain starting from the second element.
ObjectAccessChain subAccessChainFromSecondElement(const ObjectAccessChain& chain)
{
size_t pos_delimiter = chain.find(ObjectAccesschainDelimiter);
return pos_delimiter == std::string::npos ? "" : chain.substr(pos_delimiter + 1);
}
// A helper function to get the access chain after removing a given prefix.
ObjectAccessChain getSubAccessChainAfterPrefix(const ObjectAccessChain& chain,
const ObjectAccessChain& prefix)
{
size_t pos = chain.find(prefix);
if (pos != 0)
return chain;
return chain.substr(prefix.length() + sizeof(ObjectAccesschainDelimiter));
}
//
// A traverser which traverses the whole AST and populates:
// 1) A mapping from symbol nodes' IDs to their defining operation nodes.
// 2) A set of access chains of the initial precise object nodes.
//
class TSymbolDefinitionCollectingTraverser : public glslang::TIntermTraverser {
public:
TSymbolDefinitionCollectingTraverser(NodeMapping* symbol_definition_mapping,
AccessChainMapping* accesschain_mapping,
ObjectAccesschainSet* precise_objects,
ReturnBranchNodeSet* precise_return_nodes);
bool visitUnary(glslang::TVisit, glslang::TIntermUnary*) override;
bool visitBinary(glslang::TVisit, glslang::TIntermBinary*) override;
void visitSymbol(glslang::TIntermSymbol*) override;
bool visitAggregate(glslang::TVisit, glslang::TIntermAggregate*) override;
bool visitBranch(glslang::TVisit, glslang::TIntermBranch*) override;
protected:
TSymbolDefinitionCollectingTraverser& operator=(const TSymbolDefinitionCollectingTraverser&);
// The mapping from symbol node IDs to their defining nodes. This should be
// populated along traversing the AST.
NodeMapping& symbol_definition_mapping_;
// The set of symbol node IDs for precise symbol nodes, the ones marked as
// 'noContraction'.
ObjectAccesschainSet& precise_objects_;
// The set of precise return nodes.
ReturnBranchNodeSet& precise_return_nodes_;
// A temporary cache of the symbol node whose defining node is to be found
// currently along traversing the AST.
ObjectAccessChain current_object_;
// A map from object node to its access chain. This traverser stores
// the built access chains into this map for each object node it has
// visited.
AccessChainMapping& accesschain_mapping_;
// The pointer to the Function Definition node, so we can get the
// preciseness of the return expression from it when we traverse the
// return branch node.
glslang::TIntermAggregate* current_function_definition_node_;
};
TSymbolDefinitionCollectingTraverser::TSymbolDefinitionCollectingTraverser(
NodeMapping* symbol_definition_mapping, AccessChainMapping* accesschain_mapping,
ObjectAccesschainSet* precise_objects,
std::unordered_set<glslang::TIntermBranch*>* precise_return_nodes)
: TIntermTraverser(true, false, false), symbol_definition_mapping_(*symbol_definition_mapping),
precise_objects_(*precise_objects), precise_return_nodes_(*precise_return_nodes),
current_object_(), accesschain_mapping_(*accesschain_mapping),
current_function_definition_node_(nullptr) {}
// Visits a symbol node, set the current_object_ to the
// current node symbol ID, and record a mapping from this node to the current
// current_object_, which is the just obtained symbol
// ID.
void TSymbolDefinitionCollectingTraverser::visitSymbol(glslang::TIntermSymbol* node)
{
current_object_ = generateSymbolLabel(node);
accesschain_mapping_[node] = current_object_;
}
// Visits an aggregate node, traverses all of its children.
bool TSymbolDefinitionCollectingTraverser::visitAggregate(glslang::TVisit,
glslang::TIntermAggregate* node)
{
// This aggregate node might be a function definition node, in which case we need to
// cache this node, so we can get the preciseness information of the return value
// of this function later.
StateSettingGuard<glslang::TIntermAggregate*> current_function_definition_node_setting_guard(
&current_function_definition_node_);
if (node->getOp() == glslang::EOpFunction) {
// This is function definition node, we need to cache this node so that we can
// get the preciseness of the return value later.
current_function_definition_node_setting_guard.setState(node);
}
// Traverse the items in the sequence.
glslang::TIntermSequence& seq = node->getSequence();
for (int i = 0; i < (int)seq.size(); ++i) {
current_object_.clear();
seq[i]->traverse(this);
}
return false;
}
bool TSymbolDefinitionCollectingTraverser::visitBranch(glslang::TVisit,
glslang::TIntermBranch* node)
{
if (node->getFlowOp() == glslang::EOpReturn && node->getExpression() &&
current_function_definition_node_ &&
current_function_definition_node_->getType().getQualifier().noContraction) {
// This node is a return node with an expression, and its function has a
// precise return value. We need to find the involved objects in its
// expression and add them to the set of initial precise objects.
precise_return_nodes_.insert(node);
node->getExpression()->traverse(this);
}
return false;
}
// Visits a unary node. This might be an implicit assignment like i++, i--. etc.
bool TSymbolDefinitionCollectingTraverser::visitUnary(glslang::TVisit /* visit */,
glslang::TIntermUnary* node)
{
current_object_.clear();
node->getOperand()->traverse(this);
if (isAssignOperation(node->getOp())) {
// We should always be able to get an access chain of the operand node.
assert(!current_object_.empty());
// If the operand node object is 'precise', we collect its access chain
// for the initial set of 'precise' objects.
if (isPreciseObjectNode(node->getOperand())) {
// The operand node is an 'precise' object node, add its
// access chain to the set of 'precise' objects. This is to collect
// the initial set of 'precise' objects.
precise_objects_.insert(current_object_);
}
// Gets the symbol ID from the object's access chain.
ObjectAccessChain id_symbol = getFrontElement(current_object_);
// Add a mapping from the symbol ID to this assignment operation node.
symbol_definition_mapping_.insert(std::make_pair(id_symbol, node));
}
// A unary node is not a dereference node, so we clear the access chain which
// is under construction.
current_object_.clear();
return false;
}
// Visits a binary node and updates the mapping from symbol IDs to the definition
// nodes. Also collects the access chains for the initial precise objects.
bool TSymbolDefinitionCollectingTraverser::visitBinary(glslang::TVisit /* visit */,
glslang::TIntermBinary* node)
{
// Traverses the left node to build the access chain info for the object.
current_object_.clear();
node->getLeft()->traverse(this);
if (isAssignOperation(node->getOp())) {
// We should always be able to get an access chain for the left node.
assert(!current_object_.empty());
// If the left node object is 'precise', it is an initial precise object
// specified in the shader source. Adds it to the initial work list to
// process later.
if (isPreciseObjectNode(node->getLeft())) {
// The left node is an 'precise' object node, add its access chain to
// the set of 'precise' objects. This is to collect the initial set
// of 'precise' objects.
precise_objects_.insert(current_object_);
}
// Gets the symbol ID from the object access chain, which should be the
// first element recorded in the access chain.
ObjectAccessChain id_symbol = getFrontElement(current_object_);
// Adds a mapping from the symbol ID to this assignment operation node.
symbol_definition_mapping_.insert(std::make_pair(id_symbol, node));
// Traverses the right node, there may be other 'assignment'
// operations in the right.
current_object_.clear();
node->getRight()->traverse(this);
} else if (isDereferenceOperation(node->getOp())) {
// The left node (parent node) is a struct type object. We need to
// record the access chain information of the current node into its
// object id.
if (node->getOp() == glslang::EOpIndexDirectStruct) {
unsigned struct_dereference_index = getStructIndexFromConstantUnion(node->getRight());
current_object_.push_back(ObjectAccesschainDelimiter);
current_object_.append(std::to_string(struct_dereference_index));
}
accesschain_mapping_[node] = current_object_;
// For a dereference node, there is no need to traverse the right child
// node as the right node should always be an integer type object.
} else {
// For other binary nodes, still traverse the right node.
current_object_.clear();
node->getRight()->traverse(this);
}
return false;
}
// Traverses the AST and returns a tuple of four members:
// 1) a mapping from symbol IDs to the definition nodes (aka. assignment nodes) of these symbols.
// 2) a mapping from object nodes in the AST to the access chains of these objects.
// 3) a set of access chains of precise objects.
// 4) a set of return nodes with precise expressions.
std::tuple<NodeMapping, AccessChainMapping, ObjectAccesschainSet, ReturnBranchNodeSet>
getSymbolToDefinitionMappingAndPreciseSymbolIDs(const glslang::TIntermediate& intermediate)
{
auto result_tuple = std::make_tuple(NodeMapping(), AccessChainMapping(), ObjectAccesschainSet(),
ReturnBranchNodeSet());
TIntermNode* root = intermediate.getTreeRoot();
if (root == 0)
return result_tuple;
NodeMapping& symbol_definition_mapping = std::get<0>(result_tuple);
AccessChainMapping& accesschain_mapping = std::get<1>(result_tuple);
ObjectAccesschainSet& precise_objects = std::get<2>(result_tuple);
ReturnBranchNodeSet& precise_return_nodes = std::get<3>(result_tuple);
// Traverses the AST and populate the results.
TSymbolDefinitionCollectingTraverser collector(&symbol_definition_mapping, &accesschain_mapping,
&precise_objects, &precise_return_nodes);
root->traverse(&collector);
return result_tuple;
}
//
// A traverser that determine whether the left node (or operand node for unary
// node) of an assignment node is 'precise', containing 'precise' or not,
// according to the access chain a given precise object which share the same
// symbol as the left node.
//
// Post-orderly traverses the left node subtree of an binary assignment node and:
//
// 1) Propagates the 'precise' from the left object nodes to this object node.
//
// 2) Builds object access chain along the traversal, and also compares with
// the access chain of the given 'precise' object along with the traversal to
// tell if the node to be defined is 'precise' or not.
//
class TNoContractionAssigneeCheckingTraverser : public glslang::TIntermTraverser {
enum DecisionStatus {
// The object node to be assigned to may contain 'precise' objects and also not 'precise' objects.
Mixed = 0,
// The object node to be assigned to is either a 'precise' object or a struct objects whose members are all 'precise'.
Precise = 1,
// The object node to be assigned to is not a 'precise' object.
NotPreicse = 2,
};
public:
TNoContractionAssigneeCheckingTraverser(const AccessChainMapping& accesschain_mapping)
: TIntermTraverser(true, false, false), accesschain_mapping_(accesschain_mapping),
precise_object_(nullptr) {}
// Checks the preciseness of a given assignment node with a precise object
// represented as access chain. The precise object shares the same symbol
// with the assignee of the given assignment node. Return a tuple of two:
//
// 1) The preciseness of the assignee node of this assignment node. True
// if the assignee contains 'precise' objects or is 'precise', false if
// the assignee is not 'precise' according to the access chain of the given
// precise object.
//
// 2) The incremental access chain from the assignee node to its nested
// 'precise' object, according to the access chain of the given precise
// object. This incremental access chain can be empty, which means the
// assignee is 'precise'. Otherwise it shows the path to the nested
// precise object.
std::tuple<bool, ObjectAccessChain>
getPrecisenessAndRemainedAccessChain(glslang::TIntermOperator* node,
const ObjectAccessChain& precise_object)
{
assert(isAssignOperation(node->getOp()));
precise_object_ = &precise_object;
ObjectAccessChain assignee_object;
if (glslang::TIntermBinary* BN = node->getAsBinaryNode()) {
// This is a binary assignment node, we need to check the
// preciseness of the left node.
assert(accesschain_mapping_.count(BN->getLeft()));
// The left node (assignee node) is an object node, traverse the
// node to let the 'precise' of nesting objects being transfered to
// nested objects.
BN->getLeft()->traverse(this);
// After traversing the left node, if the left node is 'precise',
// we can conclude this assignment should propagate 'precise'.
if (isPreciseObjectNode(BN->getLeft())) {
return make_tuple(true, ObjectAccessChain());
}
// If the preciseness of the left node (assignee node) can not
// be determined by now, we need to compare the access chain string
// of the assignee object with the given precise object.
assignee_object = accesschain_mapping_.at(BN->getLeft());
} else if (glslang::TIntermUnary* UN = node->getAsUnaryNode()) {
// This is a unary assignment node, we need to check the
// preciseness of the operand node. For unary assignment node, the
// operand node should always be an object node.
assert(accesschain_mapping_.count(UN->getOperand()));
// Traverse the operand node to let the 'precise' being propagated
// from lower nodes to upper nodes.
UN->getOperand()->traverse(this);
// After traversing the operand node, if the operand node is
// 'precise', this assignment should propagate 'precise'.
if (isPreciseObjectNode(UN->getOperand())) {
return make_tuple(true, ObjectAccessChain());
}
// If the preciseness of the operand node (assignee node) can not
// be determined by now, we need to compare the access chain string
// of the assignee object with the given precise object.
assignee_object = accesschain_mapping_.at(UN->getOperand());
} else {
// Not a binary or unary node, should not happen.
assert(false);
}
// Compare the access chain string of the assignee node with the given
// precise object to determine if this assignment should propagate
// 'precise'.
if (assignee_object.find(precise_object) == 0) {
// The access chain string of the given precise object is a prefix
// of assignee's access chain string. The assignee should be
// 'precise'.
return make_tuple(true, ObjectAccessChain());
} else if (precise_object.find(assignee_object) == 0) {
// The assignee's access chain string is a prefix of the given
// precise object, the assignee object contains 'precise' object,
// and we need to pass the remained access chain to the object nodes
// in the right.
return make_tuple(true, getSubAccessChainAfterPrefix(precise_object, assignee_object));
} else {
// The access chain strings do not match, the assignee object can
// not be labeled as 'precise' according to the given precise
// object.
return make_tuple(false, ObjectAccessChain());
}
}
protected:
TNoContractionAssigneeCheckingTraverser& operator=(const TNoContractionAssigneeCheckingTraverser&);
bool visitBinary(glslang::TVisit, glslang::TIntermBinary* node) override;
void visitSymbol(glslang::TIntermSymbol* node) override;
// A map from object nodes to their access chain string (used as object ID).
const AccessChainMapping& accesschain_mapping_;
// A given precise object, represented in it access chain string. This
// precise object is used to be compared with the assignee node to tell if
// the assignee node is 'precise', contains 'precise' object or not
// 'precise'.
const ObjectAccessChain* precise_object_;
};
// Visits a binary node. If the node is an object node, it must be a dereference
// node. In such cases, if the left node is 'precise', this node should also be
// 'precise'.
bool TNoContractionAssigneeCheckingTraverser::visitBinary(glslang::TVisit,
glslang::TIntermBinary* node)
{
// Traverses the left so that we transfer the 'precise' from nesting object
// to its nested object.
node->getLeft()->traverse(this);
// If this binary node is an object node, we should have it in the
// accesschain_mapping_.
if (accesschain_mapping_.count(node)) {
// A binary object node must be a dereference node.
assert(isDereferenceOperation(node->getOp()));
// If the left node is 'precise', this node should also be precise,
// otherwise, compare with the given precise_object_. If the
// access chain of this node matches with the given precise_object_,
// this node should be marked as 'precise'.
if (isPreciseObjectNode(node->getLeft())) {
node->getWritableType().getQualifier().noContraction = true;
} else if (accesschain_mapping_.at(node) == *precise_object_) {
node->getWritableType().getQualifier().noContraction = true;
}
}
return false;
}
// Visits a symbol node, if the symbol node ID (its access chain string) matches
// with the given precise object, this node should be 'precise'.
void TNoContractionAssigneeCheckingTraverser::visitSymbol(glslang::TIntermSymbol* node)
{
// A symbol node should always be an object node, and should have been added
// to the map from object nodes to their access chain strings.
assert(accesschain_mapping_.count(node));
if (accesschain_mapping_.at(node) == *precise_object_) {
node->getWritableType().getQualifier().noContraction = true;
}
}
//
// A traverser that only traverses the right side of binary assignment nodes
// and the operand node of unary assignment nodes.
//
// 1) Marks arithmetic operations as 'NoContraction'.
//
// 2) Find the object which should be marked as 'precise' in the right and
// update the 'precise' object work list.
//
class TNoContractionPropagator : public glslang::TIntermTraverser {
public:
TNoContractionPropagator(ObjectAccesschainSet* precise_objects,
const AccessChainMapping& accesschain_mapping)
: TIntermTraverser(true, false, false),
precise_objects_(*precise_objects), added_precise_object_ids_(),
remained_accesschain_(), accesschain_mapping_(accesschain_mapping) {}
// Propagates 'precise' in the right nodes of a given assignment node with
// access chain record from the assignee node to a 'precise' object it
// contains.
void
propagateNoContractionInOneExpression(glslang::TIntermTyped* defining_node,
const ObjectAccessChain& assignee_remained_accesschain)
{
remained_accesschain_ = assignee_remained_accesschain;
if (glslang::TIntermBinary* BN = defining_node->getAsBinaryNode()) {
assert(isAssignOperation(BN->getOp()));
BN->getRight()->traverse(this);
if (isArithmeticOperation(BN->getOp())) {
BN->getWritableType().getQualifier().noContraction = true;
}
} else if (glslang::TIntermUnary* UN = defining_node->getAsUnaryNode()) {
assert(isAssignOperation(UN->getOp()));
UN->getOperand()->traverse(this);
if (isArithmeticOperation(UN->getOp())) {
UN->getWritableType().getQualifier().noContraction = true;
}
}
}
// Propagates 'precise' in a given precise return node.
void propagateNoContractionInReturnNode(glslang::TIntermBranch* return_node)
{
remained_accesschain_ = "";
assert(return_node->getFlowOp() == glslang::EOpReturn && return_node->getExpression());
return_node->getExpression()->traverse(this);
}
protected:
TNoContractionPropagator& operator=(const TNoContractionPropagator&);
// Visits an aggregate node. The node can be a initializer list, in which
// case we need to find the 'precise' or 'precise' containing object node
// with the access chain record. In other cases, just need to traverse all
// the children nodes.
bool visitAggregate(glslang::TVisit, glslang::TIntermAggregate* node) override
{
if (!remained_accesschain_.empty() && node->getOp() == glslang::EOpConstructStruct) {
// This is a struct initializer node, and the remained
// access chain is not empty, we need to refer to the
// assignee_remained_access_chain_ to find the nested
// 'precise' object. And we don't need to visit other nodes in this
// aggregate node.
// Gets the struct dereference index that leads to 'precise' object.
ObjectAccessChain precise_accesschain_index_str =
getFrontElement(remained_accesschain_);
unsigned precise_accesschain_index = (unsigned)strtoul(precise_accesschain_index_str.c_str(), nullptr, 10);
// Gets the node pointed by the access chain index extracted before.
glslang::TIntermTyped* potential_precise_node =
node->getSequence()[precise_accesschain_index]->getAsTyped();
assert(potential_precise_node);
// Pop the front access chain index from the path, and visit the nested node.
{
ObjectAccessChain next_level_accesschain =
subAccessChainFromSecondElement(remained_accesschain_);
StateSettingGuard<ObjectAccessChain> setup_remained_accesschain_for_next_level(
&remained_accesschain_, next_level_accesschain);
potential_precise_node->traverse(this);
}
return false;
}
return true;
}
// Visits a binary node. A binary node can be an object node, e.g. a dereference node.
// As only the top object nodes in the right side of an assignment needs to be visited
// and added to 'precise' work list, this traverser won't visit the children nodes of
// an object node. If the binary node does not represent an object node, it should
// go on to traverse its children nodes and if it is an arithmetic operation node, this
// operation should be marked as 'noContraction'.
bool visitBinary(glslang::TVisit, glslang::TIntermBinary* node) override
{
if (isDereferenceOperation(node->getOp())) {
// This binary node is an object node. Need to update the precise
// object set with the access chain of this node + remained
// access chain .
ObjectAccessChain new_precise_accesschain = accesschain_mapping_.at(node);
if (remained_accesschain_.empty()) {
node->getWritableType().getQualifier().noContraction = true;
} else {
new_precise_accesschain += ObjectAccesschainDelimiter + remained_accesschain_;
}
// Cache the access chain as added precise object, so we won't add the
// same object to the work list again.
if (!added_precise_object_ids_.count(new_precise_accesschain)) {
precise_objects_.insert(new_precise_accesschain);
added_precise_object_ids_.insert(new_precise_accesschain);
}
// Only the upper-most object nodes should be visited, so do not
// visit children of this object node.
return false;
}
// If this is an arithmetic operation, marks this node as 'noContraction'.
if (isArithmeticOperation(node->getOp()) && node->getBasicType() != glslang::EbtInt) {
node->getWritableType().getQualifier().noContraction = true;
}
// As this node is not an object node, need to traverse the children nodes.
return true;
}
// Visits a unary node. A unary node can not be an object node. If the operation
// is an arithmetic operation, need to mark this node as 'noContraction'.
bool visitUnary(glslang::TVisit /* visit */, glslang::TIntermUnary* node) override
{
// If this is an arithmetic operation, marks this with 'noContraction'
if (isArithmeticOperation(node->getOp())) {
node->getWritableType().getQualifier().noContraction = true;
}
return true;
}
// Visits a symbol node. A symbol node is always an object node. So we
// should always be able to find its in our collected mapping from object
// nodes to access chains. As an object node, a symbol node can be either
// 'precise' or containing 'precise' objects according to unused
// access chain information we have when we visit this node.
void visitSymbol(glslang::TIntermSymbol* node) override
{
// Symbol nodes are object nodes and should always have an
// access chain collected before matches with it.
assert(accesschain_mapping_.count(node));
ObjectAccessChain new_precise_accesschain = accesschain_mapping_.at(node);
// If the unused access chain is empty, this symbol node should be
// marked as 'precise'. Otherwise, the unused access chain should be
// appended to the symbol ID to build a new access chain which points to
// the nested 'precise' object in this symbol object.
if (remained_accesschain_.empty()) {
node->getWritableType().getQualifier().noContraction = true;
} else {
new_precise_accesschain += ObjectAccesschainDelimiter + remained_accesschain_;
}
// Add the new 'precise' access chain to the work list and make sure we
// don't visit it again.
if (!added_precise_object_ids_.count(new_precise_accesschain)) {
precise_objects_.insert(new_precise_accesschain);
added_precise_object_ids_.insert(new_precise_accesschain);
}
}
// A set of precise objects, represented as access chains.
ObjectAccesschainSet& precise_objects_;
// Visited symbol nodes, should not revisit these nodes.
ObjectAccesschainSet added_precise_object_ids_;
// The left node of an assignment operation might be an parent of 'precise' objects.
// This means the left node might not be an 'precise' object node, but it may contains
// 'precise' qualifier which should be propagated to the corresponding child node in
// the right. So we need the path from the left node to its nested 'precise' node to
// tell us how to find the corresponding 'precise' node in the right.
ObjectAccessChain remained_accesschain_;
// A map from node pointers to their access chains.
const AccessChainMapping& accesschain_mapping_;
};
}
namespace glslang {
void PropagateNoContraction(const glslang::TIntermediate& intermediate)
{
// First, traverses the AST, records symbols with their defining operations
// and collects the initial set of precise symbols (symbol nodes that marked
// as 'noContraction') and precise return nodes.
auto mappings_and_precise_objects =
getSymbolToDefinitionMappingAndPreciseSymbolIDs(intermediate);
// The mapping of symbol node IDs to their defining nodes. This enables us
// to get the defining node directly from a given symbol ID without
// traversing the tree again.
NodeMapping& symbol_definition_mapping = std::get<0>(mappings_and_precise_objects);
// The mapping of object nodes to their access chains recorded.
AccessChainMapping& accesschain_mapping = std::get<1>(mappings_and_precise_objects);
// The initial set of 'precise' objects which are represented as the
// access chain toward them.
ObjectAccesschainSet& precise_object_accesschains = std::get<2>(mappings_and_precise_objects);
// The set of 'precise' return nodes.
ReturnBranchNodeSet& precise_return_nodes = std::get<3>(mappings_and_precise_objects);
// Second, uses the initial set of precise objects as a work list, pops an
// access chain, extract the symbol ID from it. Then:
// 1) Check the assignee object, see if it is 'precise' object node or
// contains 'precise' object. Obtain the incremental access chain from the
// assignee node to its nested 'precise' node (if any).
// 2) If the assignee object node is 'precise' or it contains 'precise'
// objects, traverses the right side of the assignment operation
// expression to mark arithmetic operations as 'noContration' and update
// 'precise' access chain work list with new found object nodes.
// Repeat above steps until the work list is empty.
TNoContractionAssigneeCheckingTraverser checker(accesschain_mapping);
TNoContractionPropagator propagator(&precise_object_accesschains, accesschain_mapping);
// We have two initial precise work lists to handle:
// 1) precise return nodes
// 2) precise object access chains
// We should process the precise return nodes first and the involved
// objects in the return expression should be added to the precise object
// access chain set.
while (!precise_return_nodes.empty()) {
glslang::TIntermBranch* precise_return_node = *precise_return_nodes.begin();
propagator.propagateNoContractionInReturnNode(precise_return_node);
precise_return_nodes.erase(precise_return_node);
}
while (!precise_object_accesschains.empty()) {
// Get the access chain of a precise object from the work list.
ObjectAccessChain precise_object_accesschain = *precise_object_accesschains.begin();
// Get the symbol id from the access chain.
ObjectAccessChain symbol_id = getFrontElement(precise_object_accesschain);
// Get all the defining nodes of that symbol ID.
std::pair<NodeMapping::iterator, NodeMapping::iterator> range =
symbol_definition_mapping.equal_range(symbol_id);
// Visits all the assignment nodes of that symbol ID and
// 1) Check if the assignee node is 'precise' or contains 'precise'
// objects.
// 2) Propagate the 'precise' to the top layer object nodes
// in the right side of the assignment operation, update the 'precise'
// work list with new access chains representing the new 'precise'
// objects, and mark arithmetic operations as 'noContraction'.
for (NodeMapping::iterator defining_node_iter = range.first;
defining_node_iter != range.second; defining_node_iter++) {
TIntermOperator* defining_node = defining_node_iter->second;
// Check the assignee node.
auto checker_result = checker.getPrecisenessAndRemainedAccessChain(
defining_node, precise_object_accesschain);
bool& contain_precise = std::get<0>(checker_result);
ObjectAccessChain& remained_accesschain = std::get<1>(checker_result);
// If the assignee node is 'precise' or contains 'precise', propagate the
// 'precise' to the right. Otherwise just skip this assignment node.
if (contain_precise) {
propagator.propagateNoContractionInOneExpression(defining_node,
remained_accesschain);
}
}
// Remove the last processed 'precise' object from the work list.
precise_object_accesschains.erase(precise_object_accesschain);
}
}
};

View File

@ -0,0 +1,55 @@
//
// Copyright (C) 2015-2016 Google, Inc.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Visit the nodes in the glslang intermediate tree representation to
// propagate 'noContraction' qualifier.
//
#pragma once
#include "../Include/intermediate.h"
namespace glslang {
// Propagates the 'precise' qualifier for objects (objects marked with
// 'noContraction' qualifier) from the shader source specified 'precise'
// variables to all the involved objects, and add 'noContraction' qualifier for
// the involved arithmetic operations.
// Note that the same qualifier: 'noContraction' is used in both object nodes
// and arithmetic operation nodes, but has different meaning. For object nodes,
// 'noContraction' means the object is 'precise'; and for arithmetic operation
// nodes, it means the operation should not be contracted.
void PropagateNoContraction(const glslang::TIntermediate& intermediate);
};

View File

@ -0,0 +1,852 @@
//
// Copyright (C) 2013-2016 LunarG, Inc.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
#include "../Include/Common.h"
#include "reflection.h"
#include "LiveTraverser.h"
#include "localintermediate.h"
#include "gl_types.h"
//
// Grow the reflection database through a friend traverser class of TReflection and a
// collection of functions to do a liveness traversal that note what uniforms are used
// in semantically non-dead code.
//
// Can be used multiple times, once per stage, to grow a program reflection.
//
// High-level algorithm for one stage:
//
// 1. Put the entry point on the list of live functions.
//
// 2. Traverse any live function, while skipping if-tests with a compile-time constant
// condition of false, and while adding any encountered function calls to the live
// function list.
//
// Repeat until the live function list is empty.
//
// 3. Add any encountered uniform variables and blocks to the reflection database.
//
// Can be attempted with a failed link, but will return false if recursion had been detected, or
// there wasn't exactly one entry point.
//
namespace glslang {
//
// The traverser: mostly pass through, except
// - processing binary nodes to see if they are dereferences of an aggregates to track
// - processing symbol nodes to see if they are non-aggregate objects to track
//
// This ignores semantically dead code by using TLiveTraverser.
//
// This is in the glslang namespace directly so it can be a friend of TReflection.
//
class TReflectionTraverser : public TLiveTraverser {
public:
TReflectionTraverser(const TIntermediate& i, TReflection& r) :
TLiveTraverser(i), reflection(r) { }
virtual bool visitBinary(TVisit, TIntermBinary* node);
virtual void visitSymbol(TIntermSymbol* base);
// Add a simple reference to a uniform variable to the uniform database, no dereference involved.
// However, no dereference doesn't mean simple... it could be a complex aggregate.
void addUniform(const TIntermSymbol& base)
{
if (processedDerefs.find(&base) == processedDerefs.end()) {
processedDerefs.insert(&base);
// Use a degenerate (empty) set of dereferences to immediately put as at the end of
// the dereference change expected by blowUpActiveAggregate.
TList<TIntermBinary*> derefs;
blowUpActiveAggregate(base.getType(), base.getName(), derefs, derefs.end(), -1, -1, 0);
}
}
void addAttribute(const TIntermSymbol& base)
{
if (processedDerefs.find(&base) == processedDerefs.end()) {
processedDerefs.insert(&base);
const TString &name = base.getName();
const TType &type = base.getType();
TReflection::TNameToIndex::const_iterator it = reflection.nameToIndex.find(name);
if (it == reflection.nameToIndex.end()) {
reflection.nameToIndex[name] = (int)reflection.indexToAttribute.size();
reflection.indexToAttribute.push_back(TObjectReflection(name, type, 0, mapToGlType(type), 0, 0));
}
}
}
// Lookup or calculate the offset of a block member, using the recursively
// defined block offset rules.
int getOffset(const TType& type, int index)
{
const TTypeList& memberList = *type.getStruct();
// Don't calculate offset if one is present, it could be user supplied
// and different than what would be calculated. That is, this is faster,
// but not just an optimization.
if (memberList[index].type->getQualifier().hasOffset())
return memberList[index].type->getQualifier().layoutOffset;
int memberSize;
int dummyStride;
int offset = 0;
for (int m = 0; m <= index; ++m) {
// modify just the children's view of matrix layout, if there is one for this member
TLayoutMatrix subMatrixLayout = memberList[m].type->getQualifier().layoutMatrix;
int memberAlignment = intermediate.getBaseAlignment(*memberList[m].type, memberSize, dummyStride,
type.getQualifier().layoutPacking == ElpStd140,
subMatrixLayout != ElmNone
? subMatrixLayout == ElmRowMajor
: type.getQualifier().layoutMatrix == ElmRowMajor);
RoundToPow2(offset, memberAlignment);
if (m < index)
offset += memberSize;
}
return offset;
}
// Calculate the block data size.
// Block arrayness is not taken into account, each element is backed by a separate buffer.
int getBlockSize(const TType& blockType)
{
const TTypeList& memberList = *blockType.getStruct();
int lastIndex = (int)memberList.size() - 1;
int lastOffset = getOffset(blockType, lastIndex);
int lastMemberSize;
int dummyStride;
intermediate.getBaseAlignment(*memberList[lastIndex].type, lastMemberSize, dummyStride,
blockType.getQualifier().layoutPacking == ElpStd140,
blockType.getQualifier().layoutMatrix == ElmRowMajor);
return lastOffset + lastMemberSize;
}
// Traverse the provided deref chain, including the base, and
// - build a full reflection-granularity name, array size, etc. entry out of it, if it goes down to that granularity
// - recursively expand any variable array index in the middle of that traversal
// - recursively expand what's left at the end if the deref chain did not reach down to reflection granularity
//
// arraySize tracks, just for the final dereference in the chain, if there was a specific known size.
// A value of 0 for arraySize will mean to use the full array's size.
void blowUpActiveAggregate(const TType& baseType, const TString& baseName, const TList<TIntermBinary*>& derefs,
TList<TIntermBinary*>::const_iterator deref, int offset, int blockIndex, int arraySize)
{
// process the part of the dereference chain that was explicit in the shader
TString name = baseName;
const TType* terminalType = &baseType;
for (; deref != derefs.end(); ++deref) {
TIntermBinary* visitNode = *deref;
terminalType = &visitNode->getType();
int index;
switch (visitNode->getOp()) {
case EOpIndexIndirect:
// Visit all the indices of this array, and for each one add on the remaining dereferencing
for (int i = 0; i < std::max(visitNode->getLeft()->getType().getOuterArraySize(), 1); ++i) {
TString newBaseName = name;
if (baseType.getBasicType() != EbtBlock)
newBaseName.append(TString("[") + String(i) + "]");
TList<TIntermBinary*>::const_iterator nextDeref = deref;
++nextDeref;
TType derefType(*terminalType, 0);
blowUpActiveAggregate(derefType, newBaseName, derefs, nextDeref, offset, blockIndex, arraySize);
}
// it was all completed in the recursive calls above
return;
case EOpIndexDirect:
index = visitNode->getRight()->getAsConstantUnion()->getConstArray()[0].getIConst();
if (baseType.getBasicType() != EbtBlock)
name.append(TString("[") + String(index) + "]");
break;
case EOpIndexDirectStruct:
index = visitNode->getRight()->getAsConstantUnion()->getConstArray()[0].getIConst();
if (offset >= 0)
offset += getOffset(visitNode->getLeft()->getType(), index);
if (name.size() > 0)
name.append(".");
name.append((*visitNode->getLeft()->getType().getStruct())[index].type->getFieldName());
break;
default:
break;
}
}
// if the terminalType is still too coarse a granularity, this is still an aggregate to expand, expand it...
if (! isReflectionGranularity(*terminalType)) {
if (terminalType->isArray()) {
// Visit all the indices of this array, and for each one,
// fully explode the remaining aggregate to dereference
for (int i = 0; i < std::max(terminalType->getOuterArraySize(), 1); ++i) {
TString newBaseName = name;
newBaseName.append(TString("[") + String(i) + "]");
TType derefType(*terminalType, 0);
blowUpActiveAggregate(derefType, newBaseName, derefs, derefs.end(), offset, blockIndex, 0);
}
} else {
// Visit all members of this aggregate, and for each one,
// fully explode the remaining aggregate to dereference
const TTypeList& typeList = *terminalType->getStruct();
for (int i = 0; i < (int)typeList.size(); ++i) {
TString newBaseName = name;
newBaseName.append(TString(".") + typeList[i].type->getFieldName());
TType derefType(*terminalType, i);
blowUpActiveAggregate(derefType, newBaseName, derefs, derefs.end(), offset, blockIndex, 0);
}
}
// it was all completed in the recursive calls above
return;
}
// Finally, add a full string to the reflection database, and update the array size if necessary.
// If the dereferenced entity to record is an array, compute the size and update the maximum size.
// there might not be a final array dereference, it could have been copied as an array object
if (arraySize == 0)
arraySize = mapToGlArraySize(*terminalType);
TReflection::TNameToIndex::const_iterator it = reflection.nameToIndex.find(name);
if (it == reflection.nameToIndex.end()) {
reflection.nameToIndex[name] = (int)reflection.indexToUniform.size();
reflection.indexToUniform.push_back(TObjectReflection(name, *terminalType, offset,
mapToGlType(*terminalType),
arraySize, blockIndex));
} else if (arraySize > 1) {
int& reflectedArraySize = reflection.indexToUniform[it->second].size;
reflectedArraySize = std::max(arraySize, reflectedArraySize);
}
}
// Add a uniform dereference where blocks/struct/arrays are involved in the access.
// Handles the situation where the left node is at the correct or too coarse a
// granularity for reflection. (That is, further dereferences up the tree will be
// skipped.) Earlier dereferences, down the tree, will be handled
// at the same time, and logged to prevent reprocessing as the tree is traversed.
//
// Note: Other things like the following must be caught elsewhere:
// - a simple non-array, non-struct variable (no dereference even conceivable)
// - an aggregrate consumed en masse, without a dereference
//
// So, this code is for cases like
// - a struct/block dereferencing a member (whether the member is array or not)
// - an array of struct
// - structs/arrays containing the above
//
void addDereferencedUniform(TIntermBinary* topNode)
{
// See if too fine-grained to process (wait to get further down the tree)
const TType& leftType = topNode->getLeft()->getType();
if ((leftType.isVector() || leftType.isMatrix()) && ! leftType.isArray())
return;
// We have an array or structure or block dereference, see if it's a uniform
// based dereference (if not, skip it).
TIntermSymbol* base = findBase(topNode);
if (! base || ! base->getQualifier().isUniformOrBuffer())
return;
// See if we've already processed this (e.g., in the middle of something
// we did earlier), and if so skip it
if (processedDerefs.find(topNode) != processedDerefs.end())
return;
// Process this uniform dereference
int offset = -1;
int blockIndex = -1;
bool anonymous = false;
// See if we need to record the block itself
bool block = base->getBasicType() == EbtBlock;
if (block) {
offset = 0;
anonymous = IsAnonymous(base->getName());
const TString& blockName = base->getType().getTypeName();
if (base->getType().isArray()) {
TType derefType(base->getType(), 0);
assert(! anonymous);
for (int e = 0; e < base->getType().getCumulativeArraySize(); ++e)
blockIndex = addBlockName(blockName + "[" + String(e) + "]", derefType,
getBlockSize(base->getType()));
} else
blockIndex = addBlockName(blockName, base->getType(), getBlockSize(base->getType()));
}
// Process the dereference chain, backward, accumulating the pieces for later forward traversal.
// If the topNode is a reflection-granularity-array dereference, don't include that last dereference.
TList<TIntermBinary*> derefs;
for (TIntermBinary* visitNode = topNode; visitNode; visitNode = visitNode->getLeft()->getAsBinaryNode()) {
if (isReflectionGranularity(visitNode->getLeft()->getType()))
continue;
derefs.push_front(visitNode);
processedDerefs.insert(visitNode);
}
processedDerefs.insert(base);
// See if we have a specific array size to stick to while enumerating the explosion of the aggregate
int arraySize = 0;
if (isReflectionGranularity(topNode->getLeft()->getType()) && topNode->getLeft()->isArray()) {
if (topNode->getOp() == EOpIndexDirect)
arraySize = topNode->getRight()->getAsConstantUnion()->getConstArray()[0].getIConst() + 1;
}
// Put the dereference chain together, forward
TString baseName;
if (! anonymous) {
if (block)
baseName = base->getType().getTypeName();
else
baseName = base->getName();
}
blowUpActiveAggregate(base->getType(), baseName, derefs, derefs.begin(), offset, blockIndex, arraySize);
}
int addBlockName(const TString& name, const TType& type, int size)
{
int blockIndex;
TReflection::TNameToIndex::const_iterator it = reflection.nameToIndex.find(name);
if (reflection.nameToIndex.find(name) == reflection.nameToIndex.end()) {
blockIndex = (int)reflection.indexToUniformBlock.size();
reflection.nameToIndex[name] = blockIndex;
reflection.indexToUniformBlock.push_back(TObjectReflection(name, type, -1, -1, size, -1));
} else
blockIndex = it->second;
return blockIndex;
}
// Are we at a level in a dereference chain at which individual active uniform queries are made?
bool isReflectionGranularity(const TType& type)
{
return type.getBasicType() != EbtBlock && type.getBasicType() != EbtStruct;
}
// For a binary operation indexing into an aggregate, chase down the base of the aggregate.
// Return 0 if the topology does not fit this situation.
TIntermSymbol* findBase(const TIntermBinary* node)
{
TIntermSymbol *base = node->getLeft()->getAsSymbolNode();
if (base)
return base;
TIntermBinary* left = node->getLeft()->getAsBinaryNode();
if (! left)
return nullptr;
return findBase(left);
}
//
// Translate a glslang sampler type into the GL API #define number.
//
int mapSamplerToGlType(TSampler sampler)
{
if (! sampler.image) {
// a sampler...
switch (sampler.type) {
case EbtFloat:
switch ((int)sampler.dim) {
case Esd1D:
switch ((int)sampler.shadow) {
case false: return sampler.arrayed ? GL_SAMPLER_1D_ARRAY : GL_SAMPLER_1D;
case true: return sampler.arrayed ? GL_SAMPLER_1D_ARRAY_SHADOW : GL_SAMPLER_1D_SHADOW;
}
case Esd2D:
switch ((int)sampler.ms) {
case false:
switch ((int)sampler.shadow) {
case false: return sampler.arrayed ? GL_SAMPLER_2D_ARRAY : GL_SAMPLER_2D;
case true: return sampler.arrayed ? GL_SAMPLER_2D_ARRAY_SHADOW : GL_SAMPLER_2D_SHADOW;
}
case true: return sampler.arrayed ? GL_SAMPLER_2D_MULTISAMPLE_ARRAY : GL_SAMPLER_2D_MULTISAMPLE;
}
case Esd3D:
return GL_SAMPLER_3D;
case EsdCube:
switch ((int)sampler.shadow) {
case false: return sampler.arrayed ? GL_SAMPLER_CUBE_MAP_ARRAY : GL_SAMPLER_CUBE;
case true: return sampler.arrayed ? GL_SAMPLER_CUBE_MAP_ARRAY_SHADOW : GL_SAMPLER_CUBE_SHADOW;
}
case EsdRect:
return sampler.shadow ? GL_SAMPLER_2D_RECT_SHADOW : GL_SAMPLER_2D_RECT;
case EsdBuffer:
return GL_SAMPLER_BUFFER;
}
#ifdef AMD_EXTENSIONS
case EbtFloat16:
switch ((int)sampler.dim) {
case Esd1D:
switch ((int)sampler.shadow) {
case false: return sampler.arrayed ? GL_FLOAT16_SAMPLER_1D_ARRAY_AMD : GL_FLOAT16_SAMPLER_1D_AMD;
case true: return sampler.arrayed ? GL_FLOAT16_SAMPLER_1D_ARRAY_SHADOW_AMD : GL_FLOAT16_SAMPLER_1D_SHADOW_AMD;
}
case Esd2D:
switch ((int)sampler.ms) {
case false:
switch ((int)sampler.shadow) {
case false: return sampler.arrayed ? GL_FLOAT16_SAMPLER_2D_ARRAY_AMD : GL_FLOAT16_SAMPLER_2D_AMD;
case true: return sampler.arrayed ? GL_FLOAT16_SAMPLER_2D_ARRAY_SHADOW_AMD : GL_FLOAT16_SAMPLER_2D_SHADOW_AMD;
}
case true: return sampler.arrayed ? GL_FLOAT16_SAMPLER_2D_MULTISAMPLE_ARRAY_AMD : GL_FLOAT16_SAMPLER_2D_MULTISAMPLE_AMD;
}
case Esd3D:
return GL_FLOAT16_SAMPLER_3D_AMD;
case EsdCube:
switch ((int)sampler.shadow) {
case false: return sampler.arrayed ? GL_FLOAT16_SAMPLER_CUBE_MAP_ARRAY_AMD : GL_FLOAT16_SAMPLER_CUBE_AMD;
case true: return sampler.arrayed ? GL_FLOAT16_SAMPLER_CUBE_MAP_ARRAY_SHADOW_AMD : GL_FLOAT16_SAMPLER_CUBE_SHADOW_AMD;
}
case EsdRect:
return sampler.shadow ? GL_FLOAT16_SAMPLER_2D_RECT_SHADOW_AMD : GL_FLOAT16_SAMPLER_2D_RECT_AMD;
case EsdBuffer:
return GL_FLOAT16_SAMPLER_BUFFER_AMD;
}
#endif
case EbtInt:
switch ((int)sampler.dim) {
case Esd1D:
return sampler.arrayed ? GL_INT_SAMPLER_1D_ARRAY : GL_INT_SAMPLER_1D;
case Esd2D:
switch ((int)sampler.ms) {
case false: return sampler.arrayed ? GL_INT_SAMPLER_2D_ARRAY : GL_INT_SAMPLER_2D;
case true: return sampler.arrayed ? GL_INT_SAMPLER_2D_MULTISAMPLE_ARRAY
: GL_INT_SAMPLER_2D_MULTISAMPLE;
}
case Esd3D:
return GL_INT_SAMPLER_3D;
case EsdCube:
return sampler.arrayed ? GL_INT_SAMPLER_CUBE_MAP_ARRAY : GL_INT_SAMPLER_CUBE;
case EsdRect:
return GL_INT_SAMPLER_2D_RECT;
case EsdBuffer:
return GL_INT_SAMPLER_BUFFER;
}
case EbtUint:
switch ((int)sampler.dim) {
case Esd1D:
return sampler.arrayed ? GL_UNSIGNED_INT_SAMPLER_1D_ARRAY : GL_UNSIGNED_INT_SAMPLER_1D;
case Esd2D:
switch ((int)sampler.ms) {
case false: return sampler.arrayed ? GL_UNSIGNED_INT_SAMPLER_2D_ARRAY : GL_UNSIGNED_INT_SAMPLER_2D;
case true: return sampler.arrayed ? GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE_ARRAY
: GL_UNSIGNED_INT_SAMPLER_2D_MULTISAMPLE;
}
case Esd3D:
return GL_UNSIGNED_INT_SAMPLER_3D;
case EsdCube:
return sampler.arrayed ? GL_UNSIGNED_INT_SAMPLER_CUBE_MAP_ARRAY : GL_UNSIGNED_INT_SAMPLER_CUBE;
case EsdRect:
return GL_UNSIGNED_INT_SAMPLER_2D_RECT;
case EsdBuffer:
return GL_UNSIGNED_INT_SAMPLER_BUFFER;
}
default:
return 0;
}
} else {
// an image...
switch (sampler.type) {
case EbtFloat:
switch ((int)sampler.dim) {
case Esd1D:
return sampler.arrayed ? GL_IMAGE_1D_ARRAY : GL_IMAGE_1D;
case Esd2D:
switch ((int)sampler.ms) {
case false: return sampler.arrayed ? GL_IMAGE_2D_ARRAY : GL_IMAGE_2D;
case true: return sampler.arrayed ? GL_IMAGE_2D_MULTISAMPLE_ARRAY : GL_IMAGE_2D_MULTISAMPLE;
}
case Esd3D:
return GL_IMAGE_3D;
case EsdCube:
return sampler.arrayed ? GL_IMAGE_CUBE_MAP_ARRAY : GL_IMAGE_CUBE;
case EsdRect:
return GL_IMAGE_2D_RECT;
case EsdBuffer:
return GL_IMAGE_BUFFER;
}
#ifdef AMD_EXTENSIONS
case EbtFloat16:
switch ((int)sampler.dim) {
case Esd1D:
return sampler.arrayed ? GL_FLOAT16_IMAGE_1D_ARRAY_AMD : GL_FLOAT16_IMAGE_1D_AMD;
case Esd2D:
switch ((int)sampler.ms) {
case false: return sampler.arrayed ? GL_FLOAT16_IMAGE_2D_ARRAY_AMD : GL_FLOAT16_IMAGE_2D_AMD;
case true: return sampler.arrayed ? GL_FLOAT16_IMAGE_2D_MULTISAMPLE_ARRAY_AMD : GL_FLOAT16_IMAGE_2D_MULTISAMPLE_AMD;
}
case Esd3D:
return GL_FLOAT16_IMAGE_3D_AMD;
case EsdCube:
return sampler.arrayed ? GL_FLOAT16_IMAGE_CUBE_MAP_ARRAY_AMD : GL_FLOAT16_IMAGE_CUBE_AMD;
case EsdRect:
return GL_FLOAT16_IMAGE_2D_RECT_AMD;
case EsdBuffer:
return GL_FLOAT16_IMAGE_BUFFER_AMD;
}
#endif
case EbtInt:
switch ((int)sampler.dim) {
case Esd1D:
return sampler.arrayed ? GL_INT_IMAGE_1D_ARRAY : GL_INT_IMAGE_1D;
case Esd2D:
switch ((int)sampler.ms) {
case false: return sampler.arrayed ? GL_INT_IMAGE_2D_ARRAY : GL_INT_IMAGE_2D;
case true: return sampler.arrayed ? GL_INT_IMAGE_2D_MULTISAMPLE_ARRAY : GL_INT_IMAGE_2D_MULTISAMPLE;
}
case Esd3D:
return GL_INT_IMAGE_3D;
case EsdCube:
return sampler.arrayed ? GL_INT_IMAGE_CUBE_MAP_ARRAY : GL_INT_IMAGE_CUBE;
case EsdRect:
return GL_INT_IMAGE_2D_RECT;
case EsdBuffer:
return GL_INT_IMAGE_BUFFER;
}
case EbtUint:
switch ((int)sampler.dim) {
case Esd1D:
return sampler.arrayed ? GL_UNSIGNED_INT_IMAGE_1D_ARRAY : GL_UNSIGNED_INT_IMAGE_1D;
case Esd2D:
switch ((int)sampler.ms) {
case false: return sampler.arrayed ? GL_UNSIGNED_INT_IMAGE_2D_ARRAY : GL_UNSIGNED_INT_IMAGE_2D;
case true: return sampler.arrayed ? GL_UNSIGNED_INT_IMAGE_2D_MULTISAMPLE_ARRAY
: GL_UNSIGNED_INT_IMAGE_2D_MULTISAMPLE;
}
case Esd3D:
return GL_UNSIGNED_INT_IMAGE_3D;
case EsdCube:
return sampler.arrayed ? GL_UNSIGNED_INT_IMAGE_CUBE_MAP_ARRAY : GL_UNSIGNED_INT_IMAGE_CUBE;
case EsdRect:
return GL_UNSIGNED_INT_IMAGE_2D_RECT;
case EsdBuffer:
return GL_UNSIGNED_INT_IMAGE_BUFFER;
}
default:
return 0;
}
}
}
//
// Translate a glslang type into the GL API #define number.
// Ignores arrayness.
//
int mapToGlType(const TType& type)
{
switch (type.getBasicType()) {
case EbtSampler:
return mapSamplerToGlType(type.getSampler());
case EbtStruct:
case EbtBlock:
case EbtVoid:
return 0;
default:
break;
}
if (type.isVector()) {
int offset = type.getVectorSize() - 2;
switch (type.getBasicType()) {
case EbtFloat: return GL_FLOAT_VEC2 + offset;
case EbtDouble: return GL_DOUBLE_VEC2 + offset;
#ifdef AMD_EXTENSIONS
case EbtFloat16: return GL_FLOAT16_VEC2_NV + offset;
#endif
case EbtInt: return GL_INT_VEC2 + offset;
case EbtUint: return GL_UNSIGNED_INT_VEC2 + offset;
case EbtInt64: return GL_INT64_ARB + offset;
case EbtUint64: return GL_UNSIGNED_INT64_ARB + offset;
case EbtBool: return GL_BOOL_VEC2 + offset;
case EbtAtomicUint: return GL_UNSIGNED_INT_ATOMIC_COUNTER + offset;
default: return 0;
}
}
if (type.isMatrix()) {
switch (type.getBasicType()) {
case EbtFloat:
switch (type.getMatrixCols()) {
case 2:
switch (type.getMatrixRows()) {
case 2: return GL_FLOAT_MAT2;
case 3: return GL_FLOAT_MAT2x3;
case 4: return GL_FLOAT_MAT2x4;
default: return 0;
}
case 3:
switch (type.getMatrixRows()) {
case 2: return GL_FLOAT_MAT3x2;
case 3: return GL_FLOAT_MAT3;
case 4: return GL_FLOAT_MAT3x4;
default: return 0;
}
case 4:
switch (type.getMatrixRows()) {
case 2: return GL_FLOAT_MAT4x2;
case 3: return GL_FLOAT_MAT4x3;
case 4: return GL_FLOAT_MAT4;
default: return 0;
}
}
case EbtDouble:
switch (type.getMatrixCols()) {
case 2:
switch (type.getMatrixRows()) {
case 2: return GL_DOUBLE_MAT2;
case 3: return GL_DOUBLE_MAT2x3;
case 4: return GL_DOUBLE_MAT2x4;
default: return 0;
}
case 3:
switch (type.getMatrixRows()) {
case 2: return GL_DOUBLE_MAT3x2;
case 3: return GL_DOUBLE_MAT3;
case 4: return GL_DOUBLE_MAT3x4;
default: return 0;
}
case 4:
switch (type.getMatrixRows()) {
case 2: return GL_DOUBLE_MAT4x2;
case 3: return GL_DOUBLE_MAT4x3;
case 4: return GL_DOUBLE_MAT4;
default: return 0;
}
}
#ifdef AMD_EXTENSIONS
case EbtFloat16:
switch (type.getMatrixCols()) {
case 2:
switch (type.getMatrixRows()) {
case 2: return GL_FLOAT16_MAT2_AMD;
case 3: return GL_FLOAT16_MAT2x3_AMD;
case 4: return GL_FLOAT16_MAT2x4_AMD;
default: return 0;
}
case 3:
switch (type.getMatrixRows()) {
case 2: return GL_FLOAT16_MAT3x2_AMD;
case 3: return GL_FLOAT16_MAT3_AMD;
case 4: return GL_FLOAT16_MAT3x4_AMD;
default: return 0;
}
case 4:
switch (type.getMatrixRows()) {
case 2: return GL_FLOAT16_MAT4x2_AMD;
case 3: return GL_FLOAT16_MAT4x3_AMD;
case 4: return GL_FLOAT16_MAT4_AMD;
default: return 0;
}
}
#endif
default:
return 0;
}
}
if (type.getVectorSize() == 1) {
switch (type.getBasicType()) {
case EbtFloat: return GL_FLOAT;
case EbtDouble: return GL_DOUBLE;
#ifdef AMD_EXTENSIONS
case EbtFloat16: return GL_FLOAT16_NV;
#endif
case EbtInt: return GL_INT;
case EbtUint: return GL_UNSIGNED_INT;
case EbtInt64: return GL_INT64_ARB;
case EbtUint64: return GL_UNSIGNED_INT64_ARB;
case EbtBool: return GL_BOOL;
case EbtAtomicUint: return GL_UNSIGNED_INT_ATOMIC_COUNTER;
default: return 0;
}
}
return 0;
}
int mapToGlArraySize(const TType& type)
{
return type.isArray() ? type.getOuterArraySize() : 1;
}
TReflection& reflection;
std::set<const TIntermNode*> processedDerefs;
protected:
TReflectionTraverser(TReflectionTraverser&);
TReflectionTraverser& operator=(TReflectionTraverser&);
};
//
// Implement the traversal functions of interest.
//
// To catch dereferenced aggregates that must be reflected.
// This catches them at the highest level possible in the tree.
bool TReflectionTraverser::visitBinary(TVisit /* visit */, TIntermBinary* node)
{
switch (node->getOp()) {
case EOpIndexDirect:
case EOpIndexIndirect:
case EOpIndexDirectStruct:
addDereferencedUniform(node);
break;
default:
break;
}
// still need to visit everything below, which could contain sub-expressions
// containing different uniforms
return true;
}
// To reflect non-dereferenced objects.
void TReflectionTraverser::visitSymbol(TIntermSymbol* base)
{
if (base->getQualifier().storage == EvqUniform)
addUniform(*base);
if (intermediate.getStage() == EShLangVertex && base->getQualifier().isPipeInput())
addAttribute(*base);
}
//
// Implement TReflection methods.
//
// Track any required attribute reflection, such as compute shader numthreads.
//
void TReflection::buildAttributeReflection(EShLanguage stage, const TIntermediate& intermediate)
{
if (stage == EShLangCompute) {
// Remember thread dimensions
for (int dim=0; dim<3; ++dim)
localSize[dim] = intermediate.getLocalSize(dim);
}
}
// build counter block index associations for buffers
void TReflection::buildCounterIndices(const TIntermediate& intermediate)
{
// search for ones that have counters
for (int i = 0; i < int(indexToUniformBlock.size()); ++i) {
const TString counterName(intermediate.addCounterBufferName(indexToUniformBlock[i].name));
const int index = getIndex(counterName);
if (index >= 0)
indexToUniformBlock[i].counterIndex = index;
}
}
// build Shader Stages mask for all uniforms
void TReflection::buildUniformStageMask(const TIntermediate& intermediate)
{
for (int i = 0; i < int(indexToUniform.size()); ++i) {
indexToUniform[i].stages = static_cast<EShLanguageMask>(indexToUniform[i].stages | 1 << intermediate.getStage());
}
}
// Merge live symbols from 'intermediate' into the existing reflection database.
//
// Returns false if the input is too malformed to do this.
bool TReflection::addStage(EShLanguage stage, const TIntermediate& intermediate)
{
if (intermediate.getTreeRoot() == nullptr ||
intermediate.getNumEntryPoints() != 1 ||
intermediate.isRecursive())
return false;
buildAttributeReflection(stage, intermediate);
TReflectionTraverser it(intermediate, *this);
// put the entry point on the list of functions to process
it.pushFunction(intermediate.getEntryPointMangledName().c_str());
// process all the functions
while (! it.functions.empty()) {
TIntermNode* function = it.functions.back();
it.functions.pop_back();
function->traverse(&it);
}
buildCounterIndices(intermediate);
buildUniformStageMask(intermediate);
return true;
}
void TReflection::dump()
{
printf("Uniform reflection:\n");
for (size_t i = 0; i < indexToUniform.size(); ++i)
indexToUniform[i].dump();
printf("\n");
printf("Uniform block reflection:\n");
for (size_t i = 0; i < indexToUniformBlock.size(); ++i)
indexToUniformBlock[i].dump();
printf("\n");
printf("Vertex attribute reflection:\n");
for (size_t i = 0; i < indexToAttribute.size(); ++i)
indexToAttribute[i].dump();
printf("\n");
if (getLocalSize(0) > 1) {
static const char* axis[] = { "X", "Y", "Z" };
for (int dim=0; dim<3; ++dim)
if (getLocalSize(dim) > 1)
printf("Local size %s: %d\n", axis[dim], getLocalSize(dim));
printf("\n");
}
// printf("Live names\n");
// for (TNameToIndex::const_iterator it = nameToIndex.begin(); it != nameToIndex.end(); ++it)
// printf("%s: %d\n", it->first.c_str(), it->second);
// printf("\n");
}
} // end namespace glslang

View File

@ -0,0 +1,179 @@
//
// Copyright (C) 2013-2016 LunarG, Inc.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
#ifndef _REFLECTION_INCLUDED
#define _REFLECTION_INCLUDED
#include "../Public/ShaderLang.h"
#include "../Include/Types.h"
#include <list>
#include <set>
//
// A reflection database and its interface, consistent with the OpenGL API reflection queries.
//
namespace glslang {
class TIntermediate;
class TIntermAggregate;
class TReflectionTraverser;
// Data needed for just a single object at the granularity exchanged by the reflection API
class TObjectReflection {
public:
TObjectReflection(const TString& pName, const TType& pType, int pOffset, int pGLDefineType, int pSize, int pIndex) :
name(pName), offset(pOffset),
glDefineType(pGLDefineType), size(pSize), index(pIndex), counterIndex(-1), type(pType.clone()), stages(EShLanguageMask(0)) { }
const TType* const getType() const { return type; }
int getBinding() const
{
if (type == nullptr || !type->getQualifier().hasBinding())
return -1;
return type->getQualifier().layoutBinding;
}
void dump() const
{
printf("%s: offset %d, type %x, size %d, index %d, binding %d, stages %d",
name.c_str(), offset, glDefineType, size, index, getBinding(), stages );
if (counterIndex != -1)
printf(", counter %d", counterIndex);
printf("\n");
}
static TObjectReflection badReflection() { return TObjectReflection(); }
TString name;
int offset;
int glDefineType;
int size; // data size in bytes for a block, array size for a (non-block) object that's an array
int index;
int counterIndex;
EShLanguageMask stages;
protected:
TObjectReflection() : offset(-1), glDefineType(-1), size(-1), index(-1), type(nullptr) { }
const TType* type;
};
// The full reflection database
class TReflection {
public:
TReflection() : badReflection(TObjectReflection::badReflection())
{
for (int dim=0; dim<3; ++dim)
localSize[dim] = 0;
}
virtual ~TReflection() {}
// grow the reflection stage by stage
bool addStage(EShLanguage, const TIntermediate&);
// for mapping a uniform index to a uniform object's description
int getNumUniforms() { return (int)indexToUniform.size(); }
const TObjectReflection& getUniform(int i) const
{
if (i >= 0 && i < (int)indexToUniform.size())
return indexToUniform[i];
else
return badReflection;
}
// for mapping a block index to the block's description
int getNumUniformBlocks() const { return (int)indexToUniformBlock.size(); }
const TObjectReflection& getUniformBlock(int i) const
{
if (i >= 0 && i < (int)indexToUniformBlock.size())
return indexToUniformBlock[i];
else
return badReflection;
}
// for mapping an attribute index to the attribute's description
int getNumAttributes() { return (int)indexToAttribute.size(); }
const TObjectReflection& getAttribute(int i) const
{
if (i >= 0 && i < (int)indexToAttribute.size())
return indexToAttribute[i];
else
return badReflection;
}
// for mapping any name to its index (block names, uniform names and attribute names)
int getIndex(const char* name) const
{
TNameToIndex::const_iterator it = nameToIndex.find(name);
if (it == nameToIndex.end())
return -1;
else
return it->second;
}
// see getIndex(const char*)
int getIndex(const TString& name) const { return getIndex(name.c_str()); }
// Thread local size
unsigned getLocalSize(int dim) const { return dim <= 2 ? localSize[dim] : 0; }
void dump();
protected:
friend class glslang::TReflectionTraverser;
void buildCounterIndices(const TIntermediate&);
void buildUniformStageMask(const TIntermediate& intermediate);
void buildAttributeReflection(EShLanguage, const TIntermediate&);
// Need a TString hash: typedef std::unordered_map<TString, int> TNameToIndex;
typedef std::map<TString, int> TNameToIndex;
typedef std::vector<TObjectReflection> TMapIndexToReflection;
TObjectReflection badReflection; // return for queries of -1 or generally out of range; has expected descriptions with in it for this
TNameToIndex nameToIndex; // maps names to indexes; can hold all types of data: uniform/buffer and which function names have been processed
TMapIndexToReflection indexToUniform;
TMapIndexToReflection indexToUniformBlock;
TMapIndexToReflection indexToAttribute;
unsigned int localSize[3];
};
} // end namespace glslang
#endif // _REFLECTION_INCLUDED

View File

@ -0,0 +1,8 @@
add_library(OSDependent STATIC ossource.cpp ../osinclude.h)
set_property(TARGET OSDependent PROPERTY FOLDER glslang)
set_property(TARGET OSDependent PROPERTY POSITION_INDEPENDENT_CODE ON)
if(ENABLE_GLSLANG_INSTALL)
install(TARGETS OSDependent
ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR})
endif(ENABLE_GLSLANG_INSTALL)

View File

@ -0,0 +1,204 @@
//
// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
//
// This file contains the Linux-specific functions
//
#include "../osinclude.h"
#include "../../../OGLCompilersDLL/InitializeDll.h"
#include <pthread.h>
#include <semaphore.h>
#include <assert.h>
#include <errno.h>
#include <stdint.h>
#include <cstdio>
#include <sys/time.h>
#include <sys/resource.h>
namespace glslang {
//
// Thread cleanup
//
//
// Wrapper for Linux call to DetachThread. This is required as pthread_cleanup_push() expects
// the cleanup routine to return void.
//
static void DetachThreadLinux(void *)
{
DetachThread();
}
//
// Registers cleanup handler, sets cancel type and state, and executes the thread specific
// cleanup handler. This function will be called in the Standalone.cpp for regression
// testing. When OpenGL applications are run with the driver code, Linux OS does the
// thread cleanup.
//
void OS_CleanupThreadData(void)
{
#ifdef __ANDROID__
DetachThreadLinux(NULL);
#else
int old_cancel_state, old_cancel_type;
void *cleanupArg = NULL;
//
// Set thread cancel state and push cleanup handler.
//
pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &old_cancel_state);
pthread_cleanup_push(DetachThreadLinux, (void *) cleanupArg);
//
// Put the thread in deferred cancellation mode.
//
pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, &old_cancel_type);
//
// Pop cleanup handler and execute it prior to unregistering the cleanup handler.
//
pthread_cleanup_pop(1);
//
// Restore the thread's previous cancellation mode.
//
pthread_setcanceltype(old_cancel_state, NULL);
#endif
}
//
// Thread Local Storage Operations
//
inline OS_TLSIndex PthreadKeyToTLSIndex(pthread_key_t key)
{
return (OS_TLSIndex)((uintptr_t)key + 1);
}
inline pthread_key_t TLSIndexToPthreadKey(OS_TLSIndex nIndex)
{
return (pthread_key_t)((uintptr_t)nIndex - 1);
}
OS_TLSIndex OS_AllocTLSIndex()
{
pthread_key_t pPoolIndex;
//
// Create global pool key.
//
if ((pthread_key_create(&pPoolIndex, NULL)) != 0) {
assert(0 && "OS_AllocTLSIndex(): Unable to allocate Thread Local Storage");
return OS_INVALID_TLS_INDEX;
}
else
return PthreadKeyToTLSIndex(pPoolIndex);
}
bool OS_SetTLSValue(OS_TLSIndex nIndex, void *lpvValue)
{
if (nIndex == OS_INVALID_TLS_INDEX) {
assert(0 && "OS_SetTLSValue(): Invalid TLS Index");
return false;
}
if (pthread_setspecific(TLSIndexToPthreadKey(nIndex), lpvValue) == 0)
return true;
else
return false;
}
void* OS_GetTLSValue(OS_TLSIndex nIndex)
{
//
// This function should return 0 if nIndex is invalid.
//
assert(nIndex != OS_INVALID_TLS_INDEX);
return pthread_getspecific(TLSIndexToPthreadKey(nIndex));
}
bool OS_FreeTLSIndex(OS_TLSIndex nIndex)
{
if (nIndex == OS_INVALID_TLS_INDEX) {
assert(0 && "OS_SetTLSValue(): Invalid TLS Index");
return false;
}
//
// Delete the global pool key.
//
if (pthread_key_delete(TLSIndexToPthreadKey(nIndex)) == 0)
return true;
else
return false;
}
namespace {
pthread_mutex_t gMutex;
}
void InitGlobalLock()
{
pthread_mutexattr_t mutexattr;
pthread_mutexattr_init(&mutexattr);
pthread_mutexattr_settype(&mutexattr, PTHREAD_MUTEX_RECURSIVE);
pthread_mutex_init(&gMutex, &mutexattr);
}
void GetGlobalLock()
{
pthread_mutex_lock(&gMutex);
}
void ReleaseGlobalLock()
{
pthread_mutex_unlock(&gMutex);
}
// #define DUMP_COUNTERS
void OS_DumpMemoryCounters()
{
#ifdef DUMP_COUNTERS
struct rusage usage;
if (getrusage(RUSAGE_SELF, &usage) == 0)
printf("Working set size: %ld\n", usage.ru_maxrss * 1024);
#else
printf("Recompile with DUMP_COUNTERS defined to see counters.\n");
#endif
}
} // end namespace glslang

View File

@ -0,0 +1,20 @@
set(SOURCES ossource.cpp ../osinclude.h)
add_library(OSDependent STATIC ${SOURCES})
set_property(TARGET OSDependent PROPERTY FOLDER glslang)
set_property(TARGET OSDependent PROPERTY POSITION_INDEPENDENT_CODE ON)
# MinGW GCC complains about function pointer casts to void*.
# Turn that off with -fpermissive.
if(${CMAKE_CXX_COMPILER_ID} MATCHES "GNU")
target_compile_options(OSDependent PRIVATE -fpermissive)
endif()
if(WIN32)
source_group("Source" FILES ${SOURCES})
endif(WIN32)
if(ENABLE_GLSLANG_INSTALL)
install(TARGETS OSDependent
ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR})
endif(ENABLE_GLSLANG_INSTALL)

View File

@ -0,0 +1,74 @@
//
// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
#include "InitializeDll.h"
#define STRICT
#define VC_EXTRALEAN 1
#include <windows.h>
#include <assert.h>
BOOL WINAPI DllMain(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved)
{
switch (fdwReason)
{
case DLL_PROCESS_ATTACH:
if (! glslang::InitProcess())
return FALSE;
break;
case DLL_THREAD_ATTACH:
if (! glslang::InitThread())
return FALSE;
break;
case DLL_THREAD_DETACH:
if (! glslang::DetachThread())
return FALSE;
break;
case DLL_PROCESS_DETACH:
glslang::DetachProcess();
break;
default:
assert(0 && "DllMain(): Reason for calling DLL Main is unknown");
return FALSE;
}
return TRUE;
}

View File

@ -0,0 +1,147 @@
//
// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
#include "../osinclude.h"
#define STRICT
#define VC_EXTRALEAN 1
#include <windows.h>
#include <cassert>
#include <process.h>
#include <psapi.h>
#include <cstdio>
#include <cstdint>
//
// This file contains the Window-OS-specific functions
//
#if !(defined(_WIN32) || defined(_WIN64))
#error Trying to build a windows specific file in a non windows build.
#endif
namespace glslang {
inline OS_TLSIndex ToGenericTLSIndex (DWORD handle)
{
return (OS_TLSIndex)((uintptr_t)handle + 1);
}
inline DWORD ToNativeTLSIndex (OS_TLSIndex nIndex)
{
return (DWORD)((uintptr_t)nIndex - 1);
}
//
// Thread Local Storage Operations
//
OS_TLSIndex OS_AllocTLSIndex()
{
DWORD dwIndex = TlsAlloc();
if (dwIndex == TLS_OUT_OF_INDEXES) {
assert(0 && "OS_AllocTLSIndex(): Unable to allocate Thread Local Storage");
return OS_INVALID_TLS_INDEX;
}
return ToGenericTLSIndex(dwIndex);
}
bool OS_SetTLSValue(OS_TLSIndex nIndex, void *lpvValue)
{
if (nIndex == OS_INVALID_TLS_INDEX) {
assert(0 && "OS_SetTLSValue(): Invalid TLS Index");
return false;
}
if (TlsSetValue(ToNativeTLSIndex(nIndex), lpvValue))
return true;
else
return false;
}
void* OS_GetTLSValue(OS_TLSIndex nIndex)
{
assert(nIndex != OS_INVALID_TLS_INDEX);
return TlsGetValue(ToNativeTLSIndex(nIndex));
}
bool OS_FreeTLSIndex(OS_TLSIndex nIndex)
{
if (nIndex == OS_INVALID_TLS_INDEX) {
assert(0 && "OS_SetTLSValue(): Invalid TLS Index");
return false;
}
if (TlsFree(ToNativeTLSIndex(nIndex)))
return true;
else
return false;
}
HANDLE GlobalLock;
void InitGlobalLock()
{
GlobalLock = CreateMutex(0, false, 0);
}
void GetGlobalLock()
{
WaitForSingleObject(GlobalLock, INFINITE);
}
void ReleaseGlobalLock()
{
ReleaseMutex(GlobalLock);
}
unsigned int __stdcall EnterGenericThread (void* entry)
{
return ((TThreadEntrypoint)entry)(0);
}
//#define DUMP_COUNTERS
void OS_DumpMemoryCounters()
{
#ifdef DUMP_COUNTERS
PROCESS_MEMORY_COUNTERS counters;
GetProcessMemoryInfo(GetCurrentProcess(), &counters, sizeof(counters));
printf("Working set size: %d\n", counters.WorkingSetSize);
#else
printf("Recompile with DUMP_COUNTERS defined to see counters.\n");
#endif
}
} // namespace glslang

View File

@ -0,0 +1,63 @@
//
// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
#ifndef __OSINCLUDE_H
#define __OSINCLUDE_H
namespace glslang {
//
// Thread Local Storage Operations
//
typedef void* OS_TLSIndex;
#define OS_INVALID_TLS_INDEX ((void*)0)
OS_TLSIndex OS_AllocTLSIndex();
bool OS_SetTLSValue(OS_TLSIndex nIndex, void *lpvValue);
bool OS_FreeTLSIndex(OS_TLSIndex nIndex);
void* OS_GetTLSValue(OS_TLSIndex nIndex);
void InitGlobalLock();
void GetGlobalLock();
void ReleaseGlobalLock();
typedef unsigned int (*TThreadEntrypoint)(void*);
void OS_CleanupThreadData(void);
void OS_DumpMemoryCounters();
} // end namespace glslang
#endif // __OSINCLUDE_H

View File

@ -0,0 +1,715 @@
//
// Copyright (C) 2002-2005 3Dlabs Inc. Ltd.
// Copyright (C) 2013-2016 LunarG, Inc.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
#ifndef _COMPILER_INTERFACE_INCLUDED_
#define _COMPILER_INTERFACE_INCLUDED_
#include "../Include/ResourceLimits.h"
#include "../MachineIndependent/Versions.h"
#include <cstring>
#include <vector>
#ifdef _WIN32
#define C_DECL __cdecl
//#ifdef SH_EXPORTING
// #define SH_IMPORT_EXPORT __declspec(dllexport)
//#else
// #define SH_IMPORT_EXPORT __declspec(dllimport)
//#endif
#define SH_IMPORT_EXPORT
#else
#define SH_IMPORT_EXPORT
#ifndef __fastcall
#define __fastcall
#endif
#define C_DECL
#endif
//
// This is the platform independent interface between an OGL driver
// and the shading language compiler/linker.
//
#ifdef __cplusplus
extern "C" {
#endif
// This should always increase, as some paths to do not consume
// a more major number.
// It should increment by one when new functionality is added.
#define GLSLANG_MINOR_VERSION 8
//
// Call before doing any other compiler/linker operations.
//
// (Call once per process, not once per thread.)
//
SH_IMPORT_EXPORT int ShInitialize();
//
// Call this at process shutdown to clean up memory.
//
SH_IMPORT_EXPORT int __fastcall ShFinalize();
//
// Types of languages the compiler can consume.
//
typedef enum {
EShLangVertex,
EShLangTessControl,
EShLangTessEvaluation,
EShLangGeometry,
EShLangFragment,
EShLangCompute,
EShLangCount,
} EShLanguage; // would be better as stage, but this is ancient now
typedef enum {
EShLangVertexMask = (1 << EShLangVertex),
EShLangTessControlMask = (1 << EShLangTessControl),
EShLangTessEvaluationMask = (1 << EShLangTessEvaluation),
EShLangGeometryMask = (1 << EShLangGeometry),
EShLangFragmentMask = (1 << EShLangFragment),
EShLangComputeMask = (1 << EShLangCompute),
} EShLanguageMask;
namespace glslang {
class TType;
typedef enum {
EShSourceNone,
EShSourceGlsl,
EShSourceHlsl,
} EShSource; // if EShLanguage were EShStage, this could be EShLanguage instead
typedef enum {
EShClientNone,
EShClientVulkan,
EShClientOpenGL,
} EShClient;
typedef enum {
EShTargetNone,
EShTargetSpv, // preferred spelling
EshTargetSpv = EShTargetSpv, // legacy spelling
} EShTargetLanguage;
typedef enum {
EShTargetVulkan_1_0 = (1 << 22),
EShTargetVulkan_1_1 = (1 << 22) | (1 << 12),
EShTargetOpenGL_450 = 450,
} EShTargetClientVersion;
typedef EShTargetClientVersion EshTargetClientVersion;
typedef enum {
EShTargetSpv_1_0 = (1 << 16),
EShTargetSpv_1_3 = (1 << 16) | (3 << 8),
} EShTargetLanguageVersion;
struct TInputLanguage {
EShSource languageFamily; // redundant information with other input, this one overrides when not EShSourceNone
EShLanguage stage; // redundant information with other input, this one overrides when not EShSourceNone
EShClient dialect;
int dialectVersion; // version of client's language definition, not the client (when not EShClientNone)
};
struct TClient {
EShClient client;
EShTargetClientVersion version; // version of client itself (not the client's input dialect)
};
struct TTarget {
EShTargetLanguage language;
EShTargetLanguageVersion version; // version to target, if SPIR-V, defined by "word 1" of the SPIR-V header
bool hlslFunctionality1; // can target hlsl_functionality1 extension(s)
};
// All source/client/target versions and settings.
// Can override previous methods of setting, when items are set here.
// Expected to grow, as more are added, rather than growing parameter lists.
struct TEnvironment {
TInputLanguage input; // definition of the input language
TClient client; // what client is the overall compilation being done for?
TTarget target; // what to generate
};
const char* StageName(EShLanguage);
} // end namespace glslang
//
// Types of output the linker will create.
//
typedef enum {
EShExVertexFragment,
EShExFragment
} EShExecutable;
//
// Optimization level for the compiler.
//
typedef enum {
EShOptNoGeneration,
EShOptNone,
EShOptSimple, // Optimizations that can be done quickly
EShOptFull, // Optimizations that will take more time
} EShOptimizationLevel;
//
// Texture and Sampler transformation mode.
//
typedef enum {
EShTexSampTransKeep, // keep textures and samplers as is (default)
EShTexSampTransUpgradeTextureRemoveSampler, // change texture w/o embeded sampler into sampled texture and throw away all samplers
} EShTextureSamplerTransformMode;
//
// Message choices for what errors and warnings are given.
//
enum EShMessages {
EShMsgDefault = 0, // default is to give all required errors and extra warnings
EShMsgRelaxedErrors = (1 << 0), // be liberal in accepting input
EShMsgSuppressWarnings = (1 << 1), // suppress all warnings, except those required by the specification
EShMsgAST = (1 << 2), // print the AST intermediate representation
EShMsgSpvRules = (1 << 3), // issue messages for SPIR-V generation
EShMsgVulkanRules = (1 << 4), // issue messages for Vulkan-requirements of GLSL for SPIR-V
EShMsgOnlyPreprocessor = (1 << 5), // only print out errors produced by the preprocessor
EShMsgReadHlsl = (1 << 6), // use HLSL parsing rules and semantics
EShMsgCascadingErrors = (1 << 7), // get cascading errors; risks error-recovery issues, instead of an early exit
EShMsgKeepUncalled = (1 << 8), // for testing, don't eliminate uncalled functions
EShMsgHlslOffsets = (1 << 9), // allow block offsets to follow HLSL rules instead of GLSL rules
EShMsgDebugInfo = (1 << 10), // save debug information
EShMsgHlslEnable16BitTypes = (1 << 11), // enable use of 16-bit types in SPIR-V for HLSL
EShMsgHlslLegalization = (1 << 12), // enable HLSL Legalization messages
};
//
// Build a table for bindings. This can be used for locating
// attributes, uniforms, globals, etc., as needed.
//
typedef struct {
const char* name;
int binding;
} ShBinding;
typedef struct {
int numBindings;
ShBinding* bindings; // array of bindings
} ShBindingTable;
//
// ShHandle held by but opaque to the driver. It is allocated,
// managed, and de-allocated by the compiler/linker. It's contents
// are defined by and used by the compiler and linker. For example,
// symbol table information and object code passed from the compiler
// to the linker can be stored where ShHandle points.
//
// If handle creation fails, 0 will be returned.
//
typedef void* ShHandle;
//
// Driver calls these to create and destroy compiler/linker
// objects.
//
SH_IMPORT_EXPORT ShHandle ShConstructCompiler(const EShLanguage, int debugOptions); // one per shader
SH_IMPORT_EXPORT ShHandle ShConstructLinker(const EShExecutable, int debugOptions); // one per shader pair
SH_IMPORT_EXPORT ShHandle ShConstructUniformMap(); // one per uniform namespace (currently entire program object)
SH_IMPORT_EXPORT void ShDestruct(ShHandle);
//
// The return value of ShCompile is boolean, non-zero indicating
// success.
//
// The info-log should be written by ShCompile into
// ShHandle, so it can answer future queries.
//
SH_IMPORT_EXPORT int ShCompile(
const ShHandle,
const char* const shaderStrings[],
const int numStrings,
const int* lengths,
const EShOptimizationLevel,
const TBuiltInResource *resources,
int debugOptions,
int defaultVersion = 110, // use 100 for ES environment, overridden by #version in shader
bool forwardCompatible = false, // give errors for use of deprecated features
EShMessages messages = EShMsgDefault // warnings and errors
);
SH_IMPORT_EXPORT int ShLinkExt(
const ShHandle, // linker object
const ShHandle h[], // compiler objects to link together
const int numHandles);
//
// ShSetEncrpytionMethod is a place-holder for specifying
// how source code is encrypted.
//
SH_IMPORT_EXPORT void ShSetEncryptionMethod(ShHandle);
//
// All the following return 0 if the information is not
// available in the object passed down, or the object is bad.
//
SH_IMPORT_EXPORT const char* ShGetInfoLog(const ShHandle);
SH_IMPORT_EXPORT const void* ShGetExecutable(const ShHandle);
SH_IMPORT_EXPORT int ShSetVirtualAttributeBindings(const ShHandle, const ShBindingTable*); // to detect user aliasing
SH_IMPORT_EXPORT int ShSetFixedAttributeBindings(const ShHandle, const ShBindingTable*); // to force any physical mappings
//
// Tell the linker to never assign a vertex attribute to this list of physical attributes
//
SH_IMPORT_EXPORT int ShExcludeAttributes(const ShHandle, int *attributes, int count);
//
// Returns the location ID of the named uniform.
// Returns -1 if error.
//
SH_IMPORT_EXPORT int ShGetUniformLocation(const ShHandle uniformMap, const char* name);
#ifdef __cplusplus
} // end extern "C"
#endif
////////////////////////////////////////////////////////////////////////////////////////////
//
// Deferred-Lowering C++ Interface
// -----------------------------------
//
// Below is a new alternate C++ interface, which deprecates the above
// opaque handle-based interface.
//
// The below is further designed to handle multiple compilation units per stage, where
// the intermediate results, including the parse tree, are preserved until link time,
// rather than the above interface which is designed to have each compilation unit
// lowered at compile time. In the above model, linking occurs on the lowered results,
// whereas in this model intra-stage linking can occur at the parse tree
// (treeRoot in TIntermediate) level, and then a full stage can be lowered.
//
#include <list>
#include <string>
#include <utility>
class TCompiler;
class TInfoSink;
namespace glslang {
const char* GetEsslVersionString();
const char* GetGlslVersionString();
int GetKhronosToolId();
class TIntermediate;
class TProgram;
class TPoolAllocator;
// Call this exactly once per process before using anything else
bool InitializeProcess();
// Call once per process to tear down everything
void FinalizeProcess();
// Resource type for IO resolver
enum TResourceType {
EResSampler,
EResTexture,
EResImage,
EResUbo,
EResSsbo,
EResUav,
EResCount
};
// Make one TShader per shader that you will link into a program. Then
// - provide the shader through setStrings() or setStringsWithLengths()
// - optionally call setEnv*(), see below for more detail
// - optionally use setPreamble() to set a special shader string that will be
// processed before all others but won't affect the validity of #version
// - call parse(): source language and target environment must be selected
// either by correct setting of EShMessages sent to parse(), or by
// explicitly calling setEnv*()
// - query the info logs
//
// N.B.: Does not yet support having the same TShader instance being linked into
// multiple programs.
//
// N.B.: Destruct a linked program *before* destructing the shaders linked into it.
//
class TShader {
public:
explicit TShader(EShLanguage);
virtual ~TShader();
void setStrings(const char* const* s, int n);
void setStringsWithLengths(const char* const* s, const int* l, int n);
void setStringsWithLengthsAndNames(
const char* const* s, const int* l, const char* const* names, int n);
void setPreamble(const char* s) { preamble = s; }
void setEntryPoint(const char* entryPoint);
void setSourceEntryPoint(const char* sourceEntryPointName);
void addProcesses(const std::vector<std::string>&);
// IO resolver binding data: see comments in ShaderLang.cpp
void setShiftBinding(TResourceType res, unsigned int base);
void setShiftSamplerBinding(unsigned int base); // DEPRECATED: use setShiftBinding
void setShiftTextureBinding(unsigned int base); // DEPRECATED: use setShiftBinding
void setShiftImageBinding(unsigned int base); // DEPRECATED: use setShiftBinding
void setShiftUboBinding(unsigned int base); // DEPRECATED: use setShiftBinding
void setShiftUavBinding(unsigned int base); // DEPRECATED: use setShiftBinding
void setShiftCbufferBinding(unsigned int base); // synonym for setShiftUboBinding
void setShiftSsboBinding(unsigned int base); // DEPRECATED: use setShiftBinding
void setShiftBindingForSet(TResourceType res, unsigned int base, unsigned int set);
void setResourceSetBinding(const std::vector<std::string>& base);
void setAutoMapBindings(bool map);
void setAutoMapLocations(bool map);
void setInvertY(bool invert);
void setHlslIoMapping(bool hlslIoMap);
void setFlattenUniformArrays(bool flatten);
void setNoStorageFormat(bool useUnknownFormat);
void setTextureSamplerTransformMode(EShTextureSamplerTransformMode mode);
// For setting up the environment (cleared to nothingness in the constructor).
// These must be called so that parsing is done for the right source language and
// target environment, either indirectly through TranslateEnvironment() based on
// EShMessages et. al., or directly by the user.
void setEnvInput(EShSource lang, EShLanguage envStage, EShClient client, int version)
{
environment.input.languageFamily = lang;
environment.input.stage = envStage;
environment.input.dialect = client;
environment.input.dialectVersion = version;
}
void setEnvClient(EShClient client, EShTargetClientVersion version)
{
environment.client.client = client;
environment.client.version = version;
}
void setEnvTarget(EShTargetLanguage lang, EShTargetLanguageVersion version)
{
environment.target.language = lang;
environment.target.version = version;
}
void setEnvTargetHlslFunctionality1() { environment.target.hlslFunctionality1 = true; }
bool getEnvTargetHlslFunctionality1() const { return environment.target.hlslFunctionality1; }
// Interface to #include handlers.
//
// To support #include, a client of Glslang does the following:
// 1. Call setStringsWithNames to set the source strings and associated
// names. For example, the names could be the names of the files
// containing the shader sources.
// 2. Call parse with an Includer.
//
// When the Glslang parser encounters an #include directive, it calls
// the Includer's include method with the requested include name
// together with the current string name. The returned IncludeResult
// contains the fully resolved name of the included source, together
// with the source text that should replace the #include directive
// in the source stream. After parsing that source, Glslang will
// release the IncludeResult object.
class Includer {
public:
// An IncludeResult contains the resolved name and content of a source
// inclusion.
struct IncludeResult {
IncludeResult(const std::string& headerName, const char* const headerData, const size_t headerLength, void* userData) :
headerName(headerName), headerData(headerData), headerLength(headerLength), userData(userData) { }
// For a successful inclusion, the fully resolved name of the requested
// include. For example, in a file system-based includer, full resolution
// should convert a relative path name into an absolute path name.
// For a failed inclusion, this is an empty string.
const std::string headerName;
// The content and byte length of the requested inclusion. The
// Includer producing this IncludeResult retains ownership of the
// storage.
// For a failed inclusion, the header
// field points to a string containing error details.
const char* const headerData;
const size_t headerLength;
// Include resolver's context.
void* userData;
protected:
IncludeResult& operator=(const IncludeResult&);
IncludeResult();
};
// For both include methods below:
//
// Resolves an inclusion request by name, current source name,
// and include depth.
// On success, returns an IncludeResult containing the resolved name
// and content of the include.
// On failure, returns a nullptr, or an IncludeResult
// with an empty string for the headerName and error details in the
// header field.
// The Includer retains ownership of the contents
// of the returned IncludeResult value, and those contents must
// remain valid until the releaseInclude method is called on that
// IncludeResult object.
//
// Note "local" vs. "system" is not an "either/or": "local" is an
// extra thing to do over "system". Both might get called, as per
// the C++ specification.
// For the "system" or <>-style includes; search the "system" paths.
virtual IncludeResult* includeSystem(const char* /*headerName*/,
const char* /*includerName*/,
size_t /*inclusionDepth*/) { return nullptr; }
// For the "local"-only aspect of a "" include. Should not search in the
// "system" paths, because on returning a failure, the parser will
// call includeSystem() to look in the "system" locations.
virtual IncludeResult* includeLocal(const char* /*headerName*/,
const char* /*includerName*/,
size_t /*inclusionDepth*/) { return nullptr; }
// Signals that the parser will no longer use the contents of the
// specified IncludeResult.
virtual void releaseInclude(IncludeResult*) = 0;
virtual ~Includer() {}
};
// Fail all Includer searches
class ForbidIncluder : public Includer {
public:
virtual void releaseInclude(IncludeResult*) override { }
};
bool parse(const TBuiltInResource*, int defaultVersion, EProfile defaultProfile, bool forceDefaultVersionAndProfile,
bool forwardCompatible, EShMessages, Includer&);
bool parse(const TBuiltInResource* res, int defaultVersion, EProfile defaultProfile, bool forceDefaultVersionAndProfile,
bool forwardCompatible, EShMessages messages)
{
TShader::ForbidIncluder includer;
return parse(res, defaultVersion, defaultProfile, forceDefaultVersionAndProfile, forwardCompatible, messages, includer);
}
// Equivalent to parse() without a default profile and without forcing defaults.
bool parse(const TBuiltInResource* builtInResources, int defaultVersion, bool forwardCompatible, EShMessages messages)
{
return parse(builtInResources, defaultVersion, ENoProfile, false, forwardCompatible, messages);
}
bool parse(const TBuiltInResource* builtInResources, int defaultVersion, bool forwardCompatible, EShMessages messages,
Includer& includer)
{
return parse(builtInResources, defaultVersion, ENoProfile, false, forwardCompatible, messages, includer);
}
bool preprocess(const TBuiltInResource* builtInResources,
int defaultVersion, EProfile defaultProfile, bool forceDefaultVersionAndProfile,
bool forwardCompatible, EShMessages message, std::string* outputString,
Includer& includer);
const char* getInfoLog();
const char* getInfoDebugLog();
EShLanguage getStage() const { return stage; }
TIntermediate* getIntermediate() const { return intermediate; }
protected:
TPoolAllocator* pool;
EShLanguage stage;
TCompiler* compiler;
TIntermediate* intermediate;
TInfoSink* infoSink;
// strings and lengths follow the standard for glShaderSource:
// strings is an array of numStrings pointers to string data.
// lengths can be null, but if not it is an array of numStrings
// integers containing the length of the associated strings.
// if lengths is null or lengths[n] < 0 the associated strings[n] is
// assumed to be null-terminated.
// stringNames is the optional names for all the strings. If stringNames
// is null, then none of the strings has name. If a certain element in
// stringNames is null, then the corresponding string does not have name.
const char* const* strings;
const int* lengths;
const char* const* stringNames;
const char* preamble;
int numStrings;
// a function in the source string can be renamed FROM this TO the name given in setEntryPoint.
std::string sourceEntryPointName;
TEnvironment environment;
friend class TProgram;
private:
TShader& operator=(TShader&);
};
class TReflection;
class TIoMapper;
// Allows to customize the binding layout after linking.
// All used uniform variables will invoke at least validateBinding.
// If validateBinding returned true then the other resolveBinding,
// resolveSet, and resolveLocation are invoked to resolve the binding
// and descriptor set index respectively.
//
// Invocations happen in a particular order:
// 1) all shader inputs
// 2) all shader outputs
// 3) all uniforms with binding and set already defined
// 4) all uniforms with binding but no set defined
// 5) all uniforms with set but no binding defined
// 6) all uniforms with no binding and no set defined
//
// mapIO will use this resolver in two phases. The first
// phase is a notification phase, calling the corresponging
// notifiy callbacks, this phase ends with a call to endNotifications.
// Phase two starts directly after the call to endNotifications
// and calls all other callbacks to validate and to get the
// bindings, sets, locations, component and color indices.
//
// NOTE: that still limit checks are applied to bindings and sets
// and may result in an error.
class TIoMapResolver
{
public:
virtual ~TIoMapResolver() {}
// Should return true if the resulting/current binding would be okay.
// Basic idea is to do aliasing binding checks with this.
virtual bool validateBinding(EShLanguage stage, const char* name, const TType& type, bool is_live) = 0;
// Should return a value >= 0 if the current binding should be overridden.
// Return -1 if the current binding (including no binding) should be kept.
virtual int resolveBinding(EShLanguage stage, const char* name, const TType& type, bool is_live) = 0;
// Should return a value >= 0 if the current set should be overridden.
// Return -1 if the current set (including no set) should be kept.
virtual int resolveSet(EShLanguage stage, const char* name, const TType& type, bool is_live) = 0;
// Should return a value >= 0 if the current location should be overridden.
// Return -1 if the current location (including no location) should be kept.
virtual int resolveUniformLocation(EShLanguage stage, const char* name, const TType& type, bool is_live) = 0;
// Should return true if the resulting/current setup would be okay.
// Basic idea is to do aliasing checks and reject invalid semantic names.
virtual bool validateInOut(EShLanguage stage, const char* name, const TType& type, bool is_live) = 0;
// Should return a value >= 0 if the current location should be overridden.
// Return -1 if the current location (including no location) should be kept.
virtual int resolveInOutLocation(EShLanguage stage, const char* name, const TType& type, bool is_live) = 0;
// Should return a value >= 0 if the current component index should be overridden.
// Return -1 if the current component index (including no index) should be kept.
virtual int resolveInOutComponent(EShLanguage stage, const char* name, const TType& type, bool is_live) = 0;
// Should return a value >= 0 if the current color index should be overridden.
// Return -1 if the current color index (including no index) should be kept.
virtual int resolveInOutIndex(EShLanguage stage, const char* name, const TType& type, bool is_live) = 0;
// Notification of a uniform variable
virtual void notifyBinding(EShLanguage stage, const char* name, const TType& type, bool is_live) = 0;
// Notification of a in or out variable
virtual void notifyInOut(EShLanguage stage, const char* name, const TType& type, bool is_live) = 0;
// Called by mapIO when it has finished the notify pass
virtual void endNotifications(EShLanguage stage) = 0;
// Called by mapIO when it starts its notify pass for the given stage
virtual void beginNotifications(EShLanguage stage) = 0;
// Called by mipIO when it starts its resolve pass for the given stage
virtual void beginResolve(EShLanguage stage) = 0;
// Called by mapIO when it has finished the resolve pass
virtual void endResolve(EShLanguage stage) = 0;
};
// Make one TProgram per set of shaders that will get linked together. Add all
// the shaders that are to be linked together. After calling shader.parse()
// for all shaders, call link().
//
// N.B.: Destruct a linked program *before* destructing the shaders linked into it.
//
class TProgram {
public:
TProgram();
virtual ~TProgram();
void addShader(TShader* shader) { stages[shader->stage].push_back(shader); }
// Link Validation interface
bool link(EShMessages);
const char* getInfoLog();
const char* getInfoDebugLog();
TIntermediate* getIntermediate(EShLanguage stage) const { return intermediate[stage]; }
// Reflection Interface
bool buildReflection(); // call first, to do liveness analysis, index mapping, etc.; returns false on failure
int getNumLiveUniformVariables() const; // can be used for glGetProgramiv(GL_ACTIVE_UNIFORMS)
int getNumLiveUniformBlocks() const; // can be used for glGetProgramiv(GL_ACTIVE_UNIFORM_BLOCKS)
const char* getUniformName(int index) const; // can be used for "name" part of glGetActiveUniform()
const char* getUniformBlockName(int blockIndex) const; // can be used for glGetActiveUniformBlockName()
int getUniformBlockSize(int blockIndex) const; // can be used for glGetActiveUniformBlockiv(UNIFORM_BLOCK_DATA_SIZE)
int getUniformIndex(const char* name) const; // can be used for glGetUniformIndices()
int getUniformBinding(int index) const; // returns the binding number
EShLanguageMask getUniformStages(int index) const; // returns Shaders Stages where a Uniform is present
int getUniformBlockBinding(int index) const; // returns the block binding number
int getUniformBlockIndex(int index) const; // can be used for glGetActiveUniformsiv(GL_UNIFORM_BLOCK_INDEX)
int getUniformBlockCounterIndex(int index) const; // returns block index of associated counter.
int getUniformType(int index) const; // can be used for glGetActiveUniformsiv(GL_UNIFORM_TYPE)
int getUniformBufferOffset(int index) const; // can be used for glGetActiveUniformsiv(GL_UNIFORM_OFFSET)
int getUniformArraySize(int index) const; // can be used for glGetActiveUniformsiv(GL_UNIFORM_SIZE)
int getNumLiveAttributes() const; // can be used for glGetProgramiv(GL_ACTIVE_ATTRIBUTES)
unsigned getLocalSize(int dim) const; // return dim'th local size
const char *getAttributeName(int index) const; // can be used for glGetActiveAttrib()
int getAttributeType(int index) const; // can be used for glGetActiveAttrib()
const TType* getUniformTType(int index) const; // returns a TType*
const TType* getUniformBlockTType(int index) const; // returns a TType*
const TType* getAttributeTType(int index) const; // returns a TType*
void dumpReflection();
// I/O mapping: apply base offsets and map live unbound variables
// If resolver is not provided it uses the previous approach
// and respects auto assignment and offsets.
bool mapIO(TIoMapResolver* resolver = NULL);
protected:
bool linkStage(EShLanguage, EShMessages);
TPoolAllocator* pool;
std::list<TShader*> stages[EShLangCount];
TIntermediate* intermediate[EShLangCount];
bool newedIntermediate[EShLangCount]; // track which intermediate were "new" versus reusing a singleton unit in a stage
TInfoSink* infoSink;
TReflection* reflection;
TIoMapper* ioMapper;
bool linked;
private:
TProgram(TProgram&);
TProgram& operator=(TProgram&);
};
} // end namespace glslang
#endif // _COMPILER_INTERFACE_INCLUDED_

View File

@ -0,0 +1,3 @@
#!/usr/bin/env bash
bison --defines=MachineIndependent/glslang_tab.cpp.h -t MachineIndependent/glslang.y -o MachineIndependent/glslang_tab.cpp

View File

@ -0,0 +1,84 @@
set(SOURCES
GlslangToSpv.cpp
InReadableOrder.cpp
Logger.cpp
SpvBuilder.cpp
doc.cpp
disassemble.cpp)
set(SPVREMAP_SOURCES
SPVRemapper.cpp
doc.cpp)
set(HEADERS
bitutils.h
spirv.hpp
GLSL.std.450.h
GLSL.ext.EXT.h
GLSL.ext.KHR.h
GlslangToSpv.h
hex_float.h
Logger.h
SpvBuilder.h
spvIR.h
doc.h
disassemble.h)
set(SPVREMAP_HEADERS
SPVRemapper.h
doc.h)
if(ENABLE_AMD_EXTENSIONS)
list(APPEND
HEADERS
GLSL.ext.AMD.h)
endif(ENABLE_AMD_EXTENSIONS)
if(ENABLE_NV_EXTENSIONS)
list(APPEND
HEADERS
GLSL.ext.NV.h)
endif(ENABLE_NV_EXTENSIONS)
add_library(SPIRV ${LIB_TYPE} ${SOURCES} ${HEADERS})
set_property(TARGET SPIRV PROPERTY FOLDER glslang)
set_property(TARGET SPIRV PROPERTY POSITION_INDEPENDENT_CODE ON)
target_include_directories(SPIRV PUBLIC ..)
add_library(SPVRemapper ${LIB_TYPE} ${SPVREMAP_SOURCES} ${SPVREMAP_HEADERS})
set_property(TARGET SPVRemapper PROPERTY FOLDER glslang)
set_property(TARGET SPVRemapper PROPERTY POSITION_INDEPENDENT_CODE ON)
if(WIN32 AND BUILD_SHARED_LIBS)
set_target_properties(SPIRV PROPERTIES PREFIX "")
set_target_properties(SPVRemapper PROPERTIES PREFIX "")
endif()
if(ENABLE_OPT)
target_include_directories(SPIRV
PRIVATE ${spirv-tools_SOURCE_DIR}/include
PRIVATE ${spirv-tools_SOURCE_DIR}/source
)
target_link_libraries(SPIRV glslang SPIRV-Tools-opt)
target_include_directories(SPIRV PUBLIC ../External)
else()
target_link_libraries(SPIRV glslang)
endif(ENABLE_OPT)
if(WIN32)
source_group("Source" FILES ${SOURCES} ${HEADERS})
source_group("Source" FILES ${SPVREMAP_SOURCES} ${SPVREMAP_HEADERS})
endif(WIN32)
if(ENABLE_GLSLANG_INSTALL)
if(BUILD_SHARED_LIBS)
install(TARGETS SPIRV SPVRemapper
ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}
LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR})
else()
install(TARGETS SPIRV SPVRemapper
ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR})
endif()
install(FILES ${HEADERS} ${SPVREMAP_HEADERS} DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/SPIRV/)
endif(ENABLE_GLSLANG_INSTALL)

View File

@ -0,0 +1,108 @@
/*
** Copyright (c) 2014-2016 The Khronos Group Inc.
**
** Permission is hereby granted, free of charge, to any person obtaining a copy
** of this software and/or associated documentation files (the "Materials"),
** to deal in the Materials without restriction, including without limitation
** the rights to use, copy, modify, merge, publish, distribute, sublicense,
** and/or sell copies of the Materials, and to permit persons to whom the
** Materials are furnished to do so, subject to the following conditions:
**
** The above copyright notice and this permission notice shall be included in
** all copies or substantial portions of the Materials.
**
** MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS KHRONOS
** STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS SPECIFICATIONS AND
** HEADER INFORMATION ARE LOCATED AT https://www.khronos.org/registry/
**
** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
** OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
** THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
** FROM,OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS
** IN THE MATERIALS.
*/
#ifndef GLSLextAMD_H
#define GLSLextAMD_H
static const int GLSLextAMDVersion = 100;
static const int GLSLextAMDRevision = 7;
// SPV_AMD_shader_ballot
static const char* const E_SPV_AMD_shader_ballot = "SPV_AMD_shader_ballot";
enum ShaderBallotAMD {
ShaderBallotBadAMD = 0, // Don't use
SwizzleInvocationsAMD = 1,
SwizzleInvocationsMaskedAMD = 2,
WriteInvocationAMD = 3,
MbcntAMD = 4,
ShaderBallotCountAMD
};
// SPV_AMD_shader_trinary_minmax
static const char* const E_SPV_AMD_shader_trinary_minmax = "SPV_AMD_shader_trinary_minmax";
enum ShaderTrinaryMinMaxAMD {
ShaderTrinaryMinMaxBadAMD = 0, // Don't use
FMin3AMD = 1,
UMin3AMD = 2,
SMin3AMD = 3,
FMax3AMD = 4,
UMax3AMD = 5,
SMax3AMD = 6,
FMid3AMD = 7,
UMid3AMD = 8,
SMid3AMD = 9,
ShaderTrinaryMinMaxCountAMD
};
// SPV_AMD_shader_explicit_vertex_parameter
static const char* const E_SPV_AMD_shader_explicit_vertex_parameter = "SPV_AMD_shader_explicit_vertex_parameter";
enum ShaderExplicitVertexParameterAMD {
ShaderExplicitVertexParameterBadAMD = 0, // Don't use
InterpolateAtVertexAMD = 1,
ShaderExplicitVertexParameterCountAMD
};
// SPV_AMD_gcn_shader
static const char* const E_SPV_AMD_gcn_shader = "SPV_AMD_gcn_shader";
enum GcnShaderAMD {
GcnShaderBadAMD = 0, // Don't use
CubeFaceIndexAMD = 1,
CubeFaceCoordAMD = 2,
TimeAMD = 3,
GcnShaderCountAMD
};
// SPV_AMD_gpu_shader_half_float
static const char* const E_SPV_AMD_gpu_shader_half_float = "SPV_AMD_gpu_shader_half_float";
// SPV_AMD_texture_gather_bias_lod
static const char* const E_SPV_AMD_texture_gather_bias_lod = "SPV_AMD_texture_gather_bias_lod";
// SPV_AMD_gpu_shader_int16
static const char* const E_SPV_AMD_gpu_shader_int16 = "SPV_AMD_gpu_shader_int16";
// SPV_AMD_shader_image_load_store_lod
static const char* const E_SPV_AMD_shader_image_load_store_lod = "SPV_AMD_shader_image_load_store_lod";
// SPV_AMD_shader_fragment_mask
static const char* const E_SPV_AMD_shader_fragment_mask = "SPV_AMD_shader_fragment_mask";
// SPV_AMD_gpu_shader_half_float_fetch
static const char* const E_SPV_AMD_gpu_shader_half_float_fetch = "SPV_AMD_gpu_shader_half_float_fetch";
#endif // #ifndef GLSLextAMD_H

View File

@ -0,0 +1,37 @@
/*
** Copyright (c) 2014-2016 The Khronos Group Inc.
**
** Permission is hereby granted, free of charge, to any person obtaining a copy
** of this software and/or associated documentation files (the "Materials"),
** to deal in the Materials without restriction, including without limitation
** the rights to use, copy, modify, merge, publish, distribute, sublicense,
** and/or sell copies of the Materials, and to permit persons to whom the
** Materials are furnished to do so, subject to the following conditions:
**
** The above copyright notice and this permission notice shall be included in
** all copies or substantial portions of the Materials.
**
** MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS KHRONOS
** STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS SPECIFICATIONS AND
** HEADER INFORMATION ARE LOCATED AT https://www.khronos.org/registry/
**
** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
** OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
** THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
** FROM,OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS
** IN THE MATERIALS.
*/
#ifndef GLSLextEXT_H
#define GLSLextEXT_H
static const int GLSLextEXTVersion = 100;
static const int GLSLextEXTRevision = 1;
static const char* const E_SPV_EXT_shader_stencil_export = "SPV_EXT_shader_stencil_export";
static const char* const E_SPV_EXT_shader_viewport_index_layer = "SPV_EXT_shader_viewport_index_layer";
static const char* const E_SPV_EXT_fragment_fully_covered = "SPV_EXT_fragment_fully_covered";
#endif // #ifndef GLSLextEXT_H

View File

@ -0,0 +1,43 @@
/*
** Copyright (c) 2014-2016 The Khronos Group Inc.
**
** Permission is hereby granted, free of charge, to any person obtaining a copy
** of this software and/or associated documentation files (the "Materials"),
** to deal in the Materials without restriction, including without limitation
** the rights to use, copy, modify, merge, publish, distribute, sublicense,
** and/or sell copies of the Materials, and to permit persons to whom the
** Materials are furnished to do so, subject to the following conditions:
**
** The above copyright notice and this permission notice shall be included in
** all copies or substantial portions of the Materials.
**
** MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS KHRONOS
** STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS SPECIFICATIONS AND
** HEADER INFORMATION ARE LOCATED AT https://www.khronos.org/registry/
**
** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
** OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
** THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
** FROM,OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS
** IN THE MATERIALS.
*/
#ifndef GLSLextKHR_H
#define GLSLextKHR_H
static const int GLSLextKHRVersion = 100;
static const int GLSLextKHRRevision = 2;
static const char* const E_SPV_KHR_shader_ballot = "SPV_KHR_shader_ballot";
static const char* const E_SPV_KHR_subgroup_vote = "SPV_KHR_subgroup_vote";
static const char* const E_SPV_KHR_device_group = "SPV_KHR_device_group";
static const char* const E_SPV_KHR_multiview = "SPV_KHR_multiview";
static const char* const E_SPV_KHR_shader_draw_parameters = "SPV_KHR_shader_draw_parameters";
static const char* const E_SPV_KHR_16bit_storage = "SPV_KHR_16bit_storage";
static const char* const E_SPV_KHR_8bit_storage = "SPV_KHR_8bit_storage";
static const char* const E_SPV_KHR_storage_buffer_storage_class = "SPV_KHR_storage_buffer_storage_class";
static const char* const E_SPV_KHR_post_depth_coverage = "SPV_KHR_post_depth_coverage";
#endif // #ifndef GLSLextKHR_H

View File

@ -0,0 +1,57 @@
/*
** Copyright (c) 2014-2017 The Khronos Group Inc.
**
** Permission is hereby granted, free of charge, to any person obtaining a copy
** of this software and/or associated documentation files (the "Materials"),
** to deal in the Materials without restriction, including without limitation
** the rights to use, copy, modify, merge, publish, distribute, sublicense,
** and/or sell copies of the Materials, and to permit persons to whom the
** Materials are furnished to do so, subject to the following conditions:
**
** The above copyright notice and this permission notice shall be included in
** all copies or substantial portions of the Materials.
**
** MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS KHRONOS
** STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS SPECIFICATIONS AND
** HEADER INFORMATION ARE LOCATED AT https://www.khronos.org/registry/
**
** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
** OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
** THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
** FROM,OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS
** IN THE MATERIALS.
*/
#ifndef GLSLextNV_H
#define GLSLextNV_H
enum BuiltIn;
enum Decoration;
enum Op;
enum Capability;
static const int GLSLextNVVersion = 100;
static const int GLSLextNVRevision = 5;
//SPV_NV_sample_mask_override_coverage
const char* const E_SPV_NV_sample_mask_override_coverage = "SPV_NV_sample_mask_override_coverage";
//SPV_NV_geometry_shader_passthrough
const char* const E_SPV_NV_geometry_shader_passthrough = "SPV_NV_geometry_shader_passthrough";
//SPV_NV_viewport_array2
const char* const E_SPV_NV_viewport_array2 = "SPV_NV_viewport_array2";
const char* const E_ARB_shader_viewport_layer_array = "SPV_ARB_shader_viewport_layer_array";
//SPV_NV_stereo_view_rendering
const char* const E_SPV_NV_stereo_view_rendering = "SPV_NV_stereo_view_rendering";
//SPV_NVX_multiview_per_view_attributes
const char* const E_SPV_NVX_multiview_per_view_attributes = "SPV_NVX_multiview_per_view_attributes";
//SPV_NV_shader_subgroup_partitioned
const char* const E_SPV_NV_shader_subgroup_partitioned = "SPV_NV_shader_subgroup_partitioned";
#endif // #ifndef GLSLextNV_H

View File

@ -0,0 +1,131 @@
/*
** Copyright (c) 2014-2016 The Khronos Group Inc.
**
** Permission is hereby granted, free of charge, to any person obtaining a copy
** of this software and/or associated documentation files (the "Materials"),
** to deal in the Materials without restriction, including without limitation
** the rights to use, copy, modify, merge, publish, distribute, sublicense,
** and/or sell copies of the Materials, and to permit persons to whom the
** Materials are furnished to do so, subject to the following conditions:
**
** The above copyright notice and this permission notice shall be included in
** all copies or substantial portions of the Materials.
**
** MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS KHRONOS
** STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS SPECIFICATIONS AND
** HEADER INFORMATION ARE LOCATED AT https://www.khronos.org/registry/
**
** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
** OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
** THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
** FROM,OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS
** IN THE MATERIALS.
*/
#ifndef GLSLstd450_H
#define GLSLstd450_H
static const int GLSLstd450Version = 100;
static const int GLSLstd450Revision = 1;
enum GLSLstd450 {
GLSLstd450Bad = 0, // Don't use
GLSLstd450Round = 1,
GLSLstd450RoundEven = 2,
GLSLstd450Trunc = 3,
GLSLstd450FAbs = 4,
GLSLstd450SAbs = 5,
GLSLstd450FSign = 6,
GLSLstd450SSign = 7,
GLSLstd450Floor = 8,
GLSLstd450Ceil = 9,
GLSLstd450Fract = 10,
GLSLstd450Radians = 11,
GLSLstd450Degrees = 12,
GLSLstd450Sin = 13,
GLSLstd450Cos = 14,
GLSLstd450Tan = 15,
GLSLstd450Asin = 16,
GLSLstd450Acos = 17,
GLSLstd450Atan = 18,
GLSLstd450Sinh = 19,
GLSLstd450Cosh = 20,
GLSLstd450Tanh = 21,
GLSLstd450Asinh = 22,
GLSLstd450Acosh = 23,
GLSLstd450Atanh = 24,
GLSLstd450Atan2 = 25,
GLSLstd450Pow = 26,
GLSLstd450Exp = 27,
GLSLstd450Log = 28,
GLSLstd450Exp2 = 29,
GLSLstd450Log2 = 30,
GLSLstd450Sqrt = 31,
GLSLstd450InverseSqrt = 32,
GLSLstd450Determinant = 33,
GLSLstd450MatrixInverse = 34,
GLSLstd450Modf = 35, // second operand needs an OpVariable to write to
GLSLstd450ModfStruct = 36, // no OpVariable operand
GLSLstd450FMin = 37,
GLSLstd450UMin = 38,
GLSLstd450SMin = 39,
GLSLstd450FMax = 40,
GLSLstd450UMax = 41,
GLSLstd450SMax = 42,
GLSLstd450FClamp = 43,
GLSLstd450UClamp = 44,
GLSLstd450SClamp = 45,
GLSLstd450FMix = 46,
GLSLstd450IMix = 47, // Reserved
GLSLstd450Step = 48,
GLSLstd450SmoothStep = 49,
GLSLstd450Fma = 50,
GLSLstd450Frexp = 51, // second operand needs an OpVariable to write to
GLSLstd450FrexpStruct = 52, // no OpVariable operand
GLSLstd450Ldexp = 53,
GLSLstd450PackSnorm4x8 = 54,
GLSLstd450PackUnorm4x8 = 55,
GLSLstd450PackSnorm2x16 = 56,
GLSLstd450PackUnorm2x16 = 57,
GLSLstd450PackHalf2x16 = 58,
GLSLstd450PackDouble2x32 = 59,
GLSLstd450UnpackSnorm2x16 = 60,
GLSLstd450UnpackUnorm2x16 = 61,
GLSLstd450UnpackHalf2x16 = 62,
GLSLstd450UnpackSnorm4x8 = 63,
GLSLstd450UnpackUnorm4x8 = 64,
GLSLstd450UnpackDouble2x32 = 65,
GLSLstd450Length = 66,
GLSLstd450Distance = 67,
GLSLstd450Cross = 68,
GLSLstd450Normalize = 69,
GLSLstd450FaceForward = 70,
GLSLstd450Reflect = 71,
GLSLstd450Refract = 72,
GLSLstd450FindILsb = 73,
GLSLstd450FindSMsb = 74,
GLSLstd450FindUMsb = 75,
GLSLstd450InterpolateAtCentroid = 76,
GLSLstd450InterpolateAtSample = 77,
GLSLstd450InterpolateAtOffset = 78,
GLSLstd450NMin = 79,
GLSLstd450NMax = 80,
GLSLstd450NClamp = 81,
GLSLstd450Count
};
#endif // #ifndef GLSLstd450_H

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,67 @@
//
// Copyright (C) 2014 LunarG, Inc.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
#pragma once
#if defined(_MSC_VER) && _MSC_VER >= 1900
#pragma warning(disable : 4464) // relative include path contains '..'
#endif
#include "../glslang/Include/intermediate.h"
#include <string>
#include <vector>
#include "Logger.h"
namespace glslang {
struct SpvOptions {
SpvOptions() : generateDebugInfo(false), disableOptimizer(true),
optimizeSize(false) { }
bool generateDebugInfo;
bool disableOptimizer;
bool optimizeSize;
};
void GetSpirvVersion(std::string&);
int GetSpirvGeneratorVersion();
void GlslangToSpv(const glslang::TIntermediate& intermediate, std::vector<unsigned int>& spirv,
SpvOptions* options = nullptr);
void GlslangToSpv(const glslang::TIntermediate& intermediate, std::vector<unsigned int>& spirv,
spv::SpvBuildLogger* logger, SpvOptions* options = nullptr);
void OutputSpvBin(const std::vector<unsigned int>& spirv, const char* baseName);
void OutputSpvHex(const std::vector<unsigned int>& spirv, const char* baseName, const char* varName);
}

View File

@ -0,0 +1,113 @@
//
// Copyright (C) 2016 Google, Inc.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
// The SPIR-V spec requires code blocks to appear in an order satisfying the
// dominator-tree direction (ie, dominator before the dominated). This is,
// actually, easy to achieve: any pre-order CFG traversal algorithm will do it.
// Because such algorithms visit a block only after traversing some path to it
// from the root, they necessarily visit the block's idom first.
//
// But not every graph-traversal algorithm outputs blocks in an order that
// appears logical to human readers. The problem is that unrelated branches may
// be interspersed with each other, and merge blocks may come before some of the
// branches being merged.
//
// A good, human-readable order of blocks may be achieved by performing
// depth-first search but delaying merge nodes until after all their branches
// have been visited. This is implemented below by the inReadableOrder()
// function.
#include "spvIR.h"
#include <cassert>
#include <unordered_set>
using spv::Block;
using spv::Id;
namespace {
// Traverses CFG in a readable order, invoking a pre-set callback on each block.
// Use by calling visit() on the root block.
class ReadableOrderTraverser {
public:
explicit ReadableOrderTraverser(std::function<void(Block*)> callback) : callback_(callback) {}
// Visits the block if it hasn't been visited already and isn't currently
// being delayed. Invokes callback(block), then descends into its
// successors. Delays merge-block and continue-block processing until all
// the branches have been completed.
void visit(Block* block)
{
assert(block);
if (visited_.count(block) || delayed_.count(block))
return;
callback_(block);
visited_.insert(block);
Block* mergeBlock = nullptr;
Block* continueBlock = nullptr;
auto mergeInst = block->getMergeInstruction();
if (mergeInst) {
Id mergeId = mergeInst->getIdOperand(0);
mergeBlock = block->getParent().getParent().getInstruction(mergeId)->getBlock();
delayed_.insert(mergeBlock);
if (mergeInst->getOpCode() == spv::OpLoopMerge) {
Id continueId = mergeInst->getIdOperand(1);
continueBlock =
block->getParent().getParent().getInstruction(continueId)->getBlock();
delayed_.insert(continueBlock);
}
}
const auto successors = block->getSuccessors();
for (auto it = successors.cbegin(); it != successors.cend(); ++it)
visit(*it);
if (continueBlock) {
delayed_.erase(continueBlock);
visit(continueBlock);
}
if (mergeBlock) {
delayed_.erase(mergeBlock);
visit(mergeBlock);
}
}
private:
std::function<void(Block*)> callback_;
// Whether a block has already been visited or is being delayed.
std::unordered_set<Block *> visited_, delayed_;
};
}
void spv::inReadableOrder(Block* root, std::function<void(Block*)> callback)
{
ReadableOrderTraverser(callback).visit(root);
}

68
glslang/spirv/Logger.cpp Normal file
View File

@ -0,0 +1,68 @@
//
// Copyright (C) 2016 Google, Inc.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
#include "Logger.h"
#include <algorithm>
#include <iterator>
#include <sstream>
namespace spv {
void SpvBuildLogger::tbdFunctionality(const std::string& f)
{
if (std::find(std::begin(tbdFeatures), std::end(tbdFeatures), f) == std::end(tbdFeatures))
tbdFeatures.push_back(f);
}
void SpvBuildLogger::missingFunctionality(const std::string& f)
{
if (std::find(std::begin(missingFeatures), std::end(missingFeatures), f) == std::end(missingFeatures))
missingFeatures.push_back(f);
}
std::string SpvBuildLogger::getAllMessages() const {
std::ostringstream messages;
for (auto it = tbdFeatures.cbegin(); it != tbdFeatures.cend(); ++it)
messages << "TBD functionality: " << *it << "\n";
for (auto it = missingFeatures.cbegin(); it != missingFeatures.cend(); ++it)
messages << "Missing functionality: " << *it << "\n";
for (auto it = warnings.cbegin(); it != warnings.cend(); ++it)
messages << "warning: " << *it << "\n";
for (auto it = errors.cbegin(); it != errors.cend(); ++it)
messages << "error: " << *it << "\n";
return messages.str();
}
} // end spv namespace

74
glslang/spirv/Logger.h Normal file
View File

@ -0,0 +1,74 @@
//
// Copyright (C) 2016 Google, Inc.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
#ifndef GLSLANG_SPIRV_LOGGER_H
#define GLSLANG_SPIRV_LOGGER_H
#include <string>
#include <vector>
namespace spv {
// A class for holding all SPIR-V build status messages, including
// missing/TBD functionalities, warnings, and errors.
class SpvBuildLogger {
public:
SpvBuildLogger() {}
// Registers a TBD functionality.
void tbdFunctionality(const std::string& f);
// Registers a missing functionality.
void missingFunctionality(const std::string& f);
// Logs a warning.
void warning(const std::string& w) { warnings.push_back(w); }
// Logs an error.
void error(const std::string& e) { errors.push_back(e); }
// Returns all messages accumulated in the order of:
// TBD functionalities, missing functionalities, warnings, errors.
std::string getAllMessages() const;
private:
SpvBuildLogger(const SpvBuildLogger&);
std::vector<std::string> tbdFeatures;
std::vector<std::string> missingFeatures;
std::vector<std::string> warnings;
std::vector<std::string> errors;
};
} // end spv namespace
#endif // GLSLANG_SPIRV_LOGGER_H

File diff suppressed because it is too large Load Diff

304
glslang/spirv/SPVRemapper.h Normal file
View File

@ -0,0 +1,304 @@
//
// Copyright (C) 2015 LunarG, Inc.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
#ifndef SPIRVREMAPPER_H
#define SPIRVREMAPPER_H
#include <string>
#include <vector>
#include <cstdlib>
#include <exception>
namespace spv {
// MSVC defines __cplusplus as an older value, even when it supports almost all of 11.
// We handle that here by making our own symbol.
#if __cplusplus >= 201103L || _MSC_VER >= 1700
# define use_cpp11 1
#endif
class spirvbin_base_t
{
public:
enum Options {
NONE = 0,
STRIP = (1<<0),
MAP_TYPES = (1<<1),
MAP_NAMES = (1<<2),
MAP_FUNCS = (1<<3),
DCE_FUNCS = (1<<4),
DCE_VARS = (1<<5),
DCE_TYPES = (1<<6),
OPT_LOADSTORE = (1<<7),
OPT_FWD_LS = (1<<8), // EXPERIMENTAL: PRODUCES INVALID SCHEMA-0 SPIRV
MAP_ALL = (MAP_TYPES | MAP_NAMES | MAP_FUNCS),
DCE_ALL = (DCE_FUNCS | DCE_VARS | DCE_TYPES),
OPT_ALL = (OPT_LOADSTORE),
ALL_BUT_STRIP = (MAP_ALL | DCE_ALL | OPT_ALL),
DO_EVERYTHING = (STRIP | ALL_BUT_STRIP)
};
};
} // namespace SPV
#if !defined (use_cpp11)
#include <cstdio>
#include <cstdint>
namespace spv {
class spirvbin_t : public spirvbin_base_t
{
public:
spirvbin_t(int /*verbose = 0*/) { }
void remap(std::vector<std::uint32_t>& /*spv*/, unsigned int /*opts = 0*/)
{
printf("Tool not compiled for C++11, which is required for SPIR-V remapping.\n");
exit(5);
}
};
} // namespace SPV
#else // defined (use_cpp11)
#include <functional>
#include <cstdint>
#include <unordered_map>
#include <unordered_set>
#include <map>
#include <set>
#include <cassert>
#include "spirv.hpp"
#include "spvIR.h"
namespace spv {
// class to hold SPIR-V binary data for remapping, DCE, and debug stripping
class spirvbin_t : public spirvbin_base_t
{
public:
spirvbin_t(int verbose = 0) : entryPoint(spv::NoResult), largestNewId(0), verbose(verbose), errorLatch(false)
{ }
virtual ~spirvbin_t() { }
// remap on an existing binary in memory
void remap(std::vector<std::uint32_t>& spv, std::uint32_t opts = DO_EVERYTHING);
// Type for error/log handler functions
typedef std::function<void(const std::string&)> errorfn_t;
typedef std::function<void(const std::string&)> logfn_t;
// Register error/log handling functions (can be lambda fn / functor / etc)
static void registerErrorHandler(errorfn_t handler) { errorHandler = handler; }
static void registerLogHandler(logfn_t handler) { logHandler = handler; }
protected:
// This can be overridden to provide other message behavior if needed
virtual void msg(int minVerbosity, int indent, const std::string& txt) const;
private:
// Local to global, or global to local ID map
typedef std::unordered_map<spv::Id, spv::Id> idmap_t;
typedef std::unordered_set<spv::Id> idset_t;
typedef std::unordered_map<spv::Id, int> blockmap_t;
void remap(std::uint32_t opts = DO_EVERYTHING);
// Map of names to IDs
typedef std::unordered_map<std::string, spv::Id> namemap_t;
typedef std::uint32_t spirword_t;
typedef std::pair<unsigned, unsigned> range_t;
typedef std::function<void(spv::Id&)> idfn_t;
typedef std::function<bool(spv::Op, unsigned start)> instfn_t;
// Special Values for ID map:
static const spv::Id unmapped; // unchanged from default value
static const spv::Id unused; // unused ID
static const int header_size; // SPIR header = 5 words
class id_iterator_t;
// For mapping type entries between different shaders
typedef std::vector<spirword_t> typeentry_t;
typedef std::map<spv::Id, typeentry_t> globaltypes_t;
// A set that preserves position order, and a reverse map
typedef std::set<int> posmap_t;
typedef std::unordered_map<spv::Id, int> posmap_rev_t;
// Maps and ID to the size of its base type, if known.
typedef std::unordered_map<spv::Id, unsigned> typesize_map_t;
// handle error
void error(const std::string& txt) const { errorLatch = true; errorHandler(txt); }
bool isConstOp(spv::Op opCode) const;
bool isTypeOp(spv::Op opCode) const;
bool isStripOp(spv::Op opCode) const;
bool isFlowCtrl(spv::Op opCode) const;
range_t literalRange(spv::Op opCode) const;
range_t typeRange(spv::Op opCode) const;
range_t constRange(spv::Op opCode) const;
unsigned typeSizeInWords(spv::Id id) const;
unsigned idTypeSizeInWords(spv::Id id) const;
spv::Id& asId(unsigned word) { return spv[word]; }
const spv::Id& asId(unsigned word) const { return spv[word]; }
spv::Op asOpCode(unsigned word) const { return opOpCode(spv[word]); }
std::uint32_t asOpCodeHash(unsigned word);
spv::Decoration asDecoration(unsigned word) const { return spv::Decoration(spv[word]); }
unsigned asWordCount(unsigned word) const { return opWordCount(spv[word]); }
spv::Id asTypeConstId(unsigned word) const { return asId(word + (isTypeOp(asOpCode(word)) ? 1 : 2)); }
unsigned idPos(spv::Id id) const;
static unsigned opWordCount(spirword_t data) { return data >> spv::WordCountShift; }
static spv::Op opOpCode(spirword_t data) { return spv::Op(data & spv::OpCodeMask); }
// Header access & set methods
spirword_t magic() const { return spv[0]; } // return magic number
spirword_t bound() const { return spv[3]; } // return Id bound from header
spirword_t bound(spirword_t b) { return spv[3] = b; };
spirword_t genmagic() const { return spv[2]; } // generator magic
spirword_t genmagic(spirword_t m) { return spv[2] = m; }
spirword_t schemaNum() const { return spv[4]; } // schema number from header
// Mapping fns: get
spv::Id localId(spv::Id id) const { return idMapL[id]; }
// Mapping fns: set
inline spv::Id localId(spv::Id id, spv::Id newId);
void countIds(spv::Id id);
// Return next unused new local ID.
// NOTE: boost::dynamic_bitset would be more efficient due to find_next(),
// which std::vector<bool> doens't have.
inline spv::Id nextUnusedId(spv::Id id);
void buildLocalMaps();
std::string literalString(unsigned word) const; // Return literal as a std::string
int literalStringWords(const std::string& str) const { return (int(str.size())+4)/4; }
bool isNewIdMapped(spv::Id newId) const { return isMapped(newId); }
bool isOldIdUnmapped(spv::Id oldId) const { return localId(oldId) == unmapped; }
bool isOldIdUnused(spv::Id oldId) const { return localId(oldId) == unused; }
bool isOldIdMapped(spv::Id oldId) const { return !isOldIdUnused(oldId) && !isOldIdUnmapped(oldId); }
bool isFunction(spv::Id oldId) const { return fnPos.find(oldId) != fnPos.end(); }
// bool matchType(const globaltypes_t& globalTypes, spv::Id lt, spv::Id gt) const;
// spv::Id findType(const globaltypes_t& globalTypes, spv::Id lt) const;
std::uint32_t hashType(unsigned typeStart) const;
spirvbin_t& process(instfn_t, idfn_t, unsigned begin = 0, unsigned end = 0);
int processInstruction(unsigned word, instfn_t, idfn_t);
void validate() const;
void mapTypeConst();
void mapFnBodies();
void optLoadStore();
void dceFuncs();
void dceVars();
void dceTypes();
void mapNames();
void foldIds(); // fold IDs to smallest space
void forwardLoadStores(); // load store forwarding (EXPERIMENTAL)
void offsetIds(); // create relative offset IDs
void applyMap(); // remap per local name map
void mapRemainder(); // map any IDs we haven't touched yet
void stripDebug(); // strip all debug info
void stripDeadRefs(); // strips debug info for now-dead references after DCE
void strip(); // remove debug symbols
std::vector<spirword_t> spv; // SPIR words
namemap_t nameMap; // ID names from OpName
// Since we want to also do binary ops, we can't use std::vector<bool>. we could use
// boost::dynamic_bitset, but we're trying to avoid a boost dependency.
typedef std::uint64_t bits_t;
std::vector<bits_t> mapped; // which new IDs have been mapped
static const int mBits = sizeof(bits_t) * 4;
bool isMapped(spv::Id id) const { return id < maxMappedId() && ((mapped[id/mBits] & (1LL<<(id%mBits))) != 0); }
void setMapped(spv::Id id) { resizeMapped(id); mapped[id/mBits] |= (1LL<<(id%mBits)); }
void resizeMapped(spv::Id id) { if (id >= maxMappedId()) mapped.resize(id/mBits+1, 0); }
size_t maxMappedId() const { return mapped.size() * mBits; }
// Add a strip range for a given instruction starting at 'start'
// Note: avoiding brace initializers to please older versions os MSVC.
void stripInst(unsigned start) { stripRange.push_back(range_t(start, start + asWordCount(start))); }
// Function start and end. use unordered_map because we'll have
// many fewer functions than IDs.
std::unordered_map<spv::Id, range_t> fnPos;
// Which functions are called, anywhere in the module, with a call count
std::unordered_map<spv::Id, int> fnCalls;
posmap_t typeConstPos; // word positions that define types & consts (ordered)
posmap_rev_t idPosR; // reverse map from IDs to positions
typesize_map_t idTypeSizeMap; // maps each ID to its type size, if known.
std::vector<spv::Id> idMapL; // ID {M}ap from {L}ocal to {G}lobal IDs
spv::Id entryPoint; // module entry point
spv::Id largestNewId; // biggest new ID we have mapped anything to
// Sections of the binary to strip, given as [begin,end)
std::vector<range_t> stripRange;
// processing options:
std::uint32_t options;
int verbose; // verbosity level
// Error latch: this is set if the error handler is ever executed. It would be better to
// use a try/catch block and throw, but that's not desired for certain environments, so
// this is the alternative.
mutable bool errorLatch;
static errorfn_t errorHandler;
static logfn_t logHandler;
};
} // namespace SPV
#endif // defined (use_cpp11)
#endif // SPIRVREMAPPER_H

2777
glslang/spirv/SpvBuilder.cpp Normal file

File diff suppressed because it is too large Load Diff

648
glslang/spirv/SpvBuilder.h Normal file
View File

@ -0,0 +1,648 @@
//
// Copyright (C) 2014-2015 LunarG, Inc.
// Copyright (C) 2015-2016 Google, Inc.
// Copyright (C) 2017 ARM Limited.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// "Builder" is an interface to fully build SPIR-V IR. Allocate one of
// these to build (a thread safe) internal SPIR-V representation (IR),
// and then dump it as a binary stream according to the SPIR-V specification.
//
// A Builder has a 1:1 relationship with a SPIR-V module.
//
#pragma once
#ifndef SpvBuilder_H
#define SpvBuilder_H
#include "Logger.h"
#include "spirv.hpp"
#include "spvIR.h"
#include <algorithm>
#include <map>
#include <memory>
#include <set>
#include <sstream>
#include <stack>
#include <unordered_map>
namespace spv {
class Builder {
public:
Builder(unsigned int spvVersion, unsigned int userNumber, SpvBuildLogger* logger);
virtual ~Builder();
static const int maxMatrixSize = 4;
unsigned int getSpvVersion() const { return spvVersion; }
void setSource(spv::SourceLanguage lang, int version)
{
source = lang;
sourceVersion = version;
}
void setSourceFile(const std::string& file)
{
Instruction* fileString = new Instruction(getUniqueId(), NoType, OpString);
fileString->addStringOperand(file.c_str());
sourceFileStringId = fileString->getResultId();
strings.push_back(std::unique_ptr<Instruction>(fileString));
}
void setSourceText(const std::string& text) { sourceText = text; }
void addSourceExtension(const char* ext) { sourceExtensions.push_back(ext); }
void addModuleProcessed(const std::string& p) { moduleProcesses.push_back(p.c_str()); }
void setEmitOpLines() { emitOpLines = true; }
void addExtension(const char* ext) { extensions.insert(ext); }
Id import(const char*);
void setMemoryModel(spv::AddressingModel addr, spv::MemoryModel mem)
{
addressModel = addr;
memoryModel = mem;
}
void addCapability(spv::Capability cap) { capabilities.insert(cap); }
// To get a new <id> for anything needing a new one.
Id getUniqueId() { return ++uniqueId; }
// To get a set of new <id>s, e.g., for a set of function parameters
Id getUniqueIds(int numIds)
{
Id id = uniqueId + 1;
uniqueId += numIds;
return id;
}
// Log the current line, and if different than the last one,
// issue a new OpLine, using the current file name.
void setLine(int line);
// Low-level OpLine. See setLine() for a layered helper.
void addLine(Id fileName, int line, int column);
// For creating new types (will return old type if the requested one was already made).
Id makeVoidType();
Id makeBoolType();
Id makePointer(StorageClass, Id type);
Id makeIntegerType(int width, bool hasSign); // generic
Id makeIntType(int width) { return makeIntegerType(width, true); }
Id makeUintType(int width) { return makeIntegerType(width, false); }
Id makeFloatType(int width);
Id makeStructType(const std::vector<Id>& members, const char*);
Id makeStructResultType(Id type0, Id type1);
Id makeVectorType(Id component, int size);
Id makeMatrixType(Id component, int cols, int rows);
Id makeArrayType(Id element, Id sizeId, int stride); // 0 stride means no stride decoration
Id makeRuntimeArray(Id element);
Id makeFunctionType(Id returnType, const std::vector<Id>& paramTypes);
Id makeImageType(Id sampledType, Dim, bool depth, bool arrayed, bool ms, unsigned sampled, ImageFormat format);
Id makeSamplerType();
Id makeSampledImageType(Id imageType);
// For querying about types.
Id getTypeId(Id resultId) const { return module.getTypeId(resultId); }
Id getDerefTypeId(Id resultId) const;
Op getOpCode(Id id) const { return module.getInstruction(id)->getOpCode(); }
Op getTypeClass(Id typeId) const { return getOpCode(typeId); }
Op getMostBasicTypeClass(Id typeId) const;
int getNumComponents(Id resultId) const { return getNumTypeComponents(getTypeId(resultId)); }
int getNumTypeConstituents(Id typeId) const;
int getNumTypeComponents(Id typeId) const { return getNumTypeConstituents(typeId); }
Id getScalarTypeId(Id typeId) const;
Id getContainedTypeId(Id typeId) const;
Id getContainedTypeId(Id typeId, int) const;
StorageClass getTypeStorageClass(Id typeId) const { return module.getStorageClass(typeId); }
ImageFormat getImageTypeFormat(Id typeId) const { return (ImageFormat)module.getInstruction(typeId)->getImmediateOperand(6); }
bool isPointer(Id resultId) const { return isPointerType(getTypeId(resultId)); }
bool isScalar(Id resultId) const { return isScalarType(getTypeId(resultId)); }
bool isVector(Id resultId) const { return isVectorType(getTypeId(resultId)); }
bool isMatrix(Id resultId) const { return isMatrixType(getTypeId(resultId)); }
bool isAggregate(Id resultId) const { return isAggregateType(getTypeId(resultId)); }
bool isSampledImage(Id resultId) const { return isSampledImageType(getTypeId(resultId)); }
bool isBoolType(Id typeId) { return groupedTypes[OpTypeBool].size() > 0 && typeId == groupedTypes[OpTypeBool].back()->getResultId(); }
bool isIntType(Id typeId) const { return getTypeClass(typeId) == OpTypeInt && module.getInstruction(typeId)->getImmediateOperand(1) != 0; }
bool isUintType(Id typeId) const { return getTypeClass(typeId) == OpTypeInt && module.getInstruction(typeId)->getImmediateOperand(1) == 0; }
bool isFloatType(Id typeId) const { return getTypeClass(typeId) == OpTypeFloat; }
bool isPointerType(Id typeId) const { return getTypeClass(typeId) == OpTypePointer; }
bool isScalarType(Id typeId) const { return getTypeClass(typeId) == OpTypeFloat || getTypeClass(typeId) == OpTypeInt || getTypeClass(typeId) == OpTypeBool; }
bool isVectorType(Id typeId) const { return getTypeClass(typeId) == OpTypeVector; }
bool isMatrixType(Id typeId) const { return getTypeClass(typeId) == OpTypeMatrix; }
bool isStructType(Id typeId) const { return getTypeClass(typeId) == OpTypeStruct; }
bool isArrayType(Id typeId) const { return getTypeClass(typeId) == OpTypeArray; }
bool isAggregateType(Id typeId) const { return isArrayType(typeId) || isStructType(typeId); }
bool isImageType(Id typeId) const { return getTypeClass(typeId) == OpTypeImage; }
bool isSamplerType(Id typeId) const { return getTypeClass(typeId) == OpTypeSampler; }
bool isSampledImageType(Id typeId) const { return getTypeClass(typeId) == OpTypeSampledImage; }
bool isConstantOpCode(Op opcode) const;
bool isSpecConstantOpCode(Op opcode) const;
bool isConstant(Id resultId) const { return isConstantOpCode(getOpCode(resultId)); }
bool isConstantScalar(Id resultId) const { return getOpCode(resultId) == OpConstant; }
bool isSpecConstant(Id resultId) const { return isSpecConstantOpCode(getOpCode(resultId)); }
unsigned int getConstantScalar(Id resultId) const { return module.getInstruction(resultId)->getImmediateOperand(0); }
StorageClass getStorageClass(Id resultId) const { return getTypeStorageClass(getTypeId(resultId)); }
int getScalarTypeWidth(Id typeId) const
{
Id scalarTypeId = getScalarTypeId(typeId);
assert(getTypeClass(scalarTypeId) == OpTypeInt || getTypeClass(scalarTypeId) == OpTypeFloat);
return module.getInstruction(scalarTypeId)->getImmediateOperand(0);
}
int getTypeNumColumns(Id typeId) const
{
assert(isMatrixType(typeId));
return getNumTypeConstituents(typeId);
}
int getNumColumns(Id resultId) const { return getTypeNumColumns(getTypeId(resultId)); }
int getTypeNumRows(Id typeId) const
{
assert(isMatrixType(typeId));
return getNumTypeComponents(getContainedTypeId(typeId));
}
int getNumRows(Id resultId) const { return getTypeNumRows(getTypeId(resultId)); }
Dim getTypeDimensionality(Id typeId) const
{
assert(isImageType(typeId));
return (Dim)module.getInstruction(typeId)->getImmediateOperand(1);
}
Id getImageType(Id resultId) const
{
Id typeId = getTypeId(resultId);
assert(isImageType(typeId) || isSampledImageType(typeId));
return isSampledImageType(typeId) ? module.getInstruction(typeId)->getIdOperand(0) : typeId;
}
bool isArrayedImageType(Id typeId) const
{
assert(isImageType(typeId));
return module.getInstruction(typeId)->getImmediateOperand(3) != 0;
}
// For making new constants (will return old constant if the requested one was already made).
Id makeBoolConstant(bool b, bool specConstant = false);
Id makeInt8Constant(int i, bool specConstant = false) { return makeIntConstant(makeIntType(8), (unsigned)i, specConstant); }
Id makeUint8Constant(unsigned u, bool specConstant = false) { return makeIntConstant(makeUintType(8), u, specConstant); }
Id makeInt16Constant(int i, bool specConstant = false) { return makeIntConstant(makeIntType(16), (unsigned)i, specConstant); }
Id makeUint16Constant(unsigned u, bool specConstant = false) { return makeIntConstant(makeUintType(16), u, specConstant); }
Id makeIntConstant(int i, bool specConstant = false) { return makeIntConstant(makeIntType(32), (unsigned)i, specConstant); }
Id makeUintConstant(unsigned u, bool specConstant = false) { return makeIntConstant(makeUintType(32), u, specConstant); }
Id makeInt64Constant(long long i, bool specConstant = false) { return makeInt64Constant(makeIntType(64), (unsigned long long)i, specConstant); }
Id makeUint64Constant(unsigned long long u, bool specConstant = false) { return makeInt64Constant(makeUintType(64), u, specConstant); }
Id makeFloatConstant(float f, bool specConstant = false);
Id makeDoubleConstant(double d, bool specConstant = false);
Id makeFloat16Constant(float f16, bool specConstant = false);
Id makeFpConstant(Id type, double d, bool specConstant = false);
// Turn the array of constants into a proper spv constant of the requested type.
Id makeCompositeConstant(Id type, const std::vector<Id>& comps, bool specConst = false);
// Methods for adding information outside the CFG.
Instruction* addEntryPoint(ExecutionModel, Function*, const char* name);
void addExecutionMode(Function*, ExecutionMode mode, int value1 = -1, int value2 = -1, int value3 = -1);
void addName(Id, const char* name);
void addMemberName(Id, int member, const char* name);
void addDecoration(Id, Decoration, int num = -1);
void addDecoration(Id, Decoration, const char*);
void addDecorationId(Id id, Decoration, Id idDecoration);
void addMemberDecoration(Id, unsigned int member, Decoration, int num = -1);
void addMemberDecoration(Id, unsigned int member, Decoration, const char*);
// At the end of what block do the next create*() instructions go?
void setBuildPoint(Block* bp) { buildPoint = bp; }
Block* getBuildPoint() const { return buildPoint; }
// Make the entry-point function. The returned pointer is only valid
// for the lifetime of this builder.
Function* makeEntryPoint(const char*);
// Make a shader-style function, and create its entry block if entry is non-zero.
// Return the function, pass back the entry.
// The returned pointer is only valid for the lifetime of this builder.
Function* makeFunctionEntry(Decoration precision, Id returnType, const char* name, const std::vector<Id>& paramTypes,
const std::vector<std::vector<Decoration>>& precisions, Block **entry = 0);
// Create a return. An 'implicit' return is one not appearing in the source
// code. In the case of an implicit return, no post-return block is inserted.
void makeReturn(bool implicit, Id retVal = 0);
// Generate all the code needed to finish up a function.
void leaveFunction();
// Create a discard.
void makeDiscard();
// Create a global or function local or IO variable.
Id createVariable(StorageClass, Id type, const char* name = 0);
// Create an intermediate with an undefined value.
Id createUndefined(Id type);
// Store into an Id and return the l-value
void createStore(Id rValue, Id lValue);
// Load from an Id and return it
Id createLoad(Id lValue);
// Create an OpAccessChain instruction
Id createAccessChain(StorageClass, Id base, const std::vector<Id>& offsets);
// Create an OpArrayLength instruction
Id createArrayLength(Id base, unsigned int member);
// Create an OpCompositeExtract instruction
Id createCompositeExtract(Id composite, Id typeId, unsigned index);
Id createCompositeExtract(Id composite, Id typeId, const std::vector<unsigned>& indexes);
Id createCompositeInsert(Id object, Id composite, Id typeId, unsigned index);
Id createCompositeInsert(Id object, Id composite, Id typeId, const std::vector<unsigned>& indexes);
Id createVectorExtractDynamic(Id vector, Id typeId, Id componentIndex);
Id createVectorInsertDynamic(Id vector, Id typeId, Id component, Id componentIndex);
void createNoResultOp(Op);
void createNoResultOp(Op, Id operand);
void createNoResultOp(Op, const std::vector<Id>& operands);
void createControlBarrier(Scope execution, Scope memory, MemorySemanticsMask);
void createMemoryBarrier(unsigned executionScope, unsigned memorySemantics);
Id createUnaryOp(Op, Id typeId, Id operand);
Id createBinOp(Op, Id typeId, Id operand1, Id operand2);
Id createTriOp(Op, Id typeId, Id operand1, Id operand2, Id operand3);
Id createOp(Op, Id typeId, const std::vector<Id>& operands);
Id createFunctionCall(spv::Function*, const std::vector<spv::Id>&);
Id createSpecConstantOp(Op, Id typeId, const std::vector<spv::Id>& operands, const std::vector<unsigned>& literals);
// Take an rvalue (source) and a set of channels to extract from it to
// make a new rvalue, which is returned.
Id createRvalueSwizzle(Decoration precision, Id typeId, Id source, const std::vector<unsigned>& channels);
// Take a copy of an lvalue (target) and a source of components, and set the
// source components into the lvalue where the 'channels' say to put them.
// An updated version of the target is returned.
// (No true lvalue or stores are used.)
Id createLvalueSwizzle(Id typeId, Id target, Id source, const std::vector<unsigned>& channels);
// If both the id and precision are valid, the id
// gets tagged with the requested precision.
// The passed in id is always the returned id, to simplify use patterns.
Id setPrecision(Id id, Decoration precision)
{
if (precision != NoPrecision && id != NoResult)
addDecoration(id, precision);
return id;
}
// Can smear a scalar to a vector for the following forms:
// - promoteScalar(scalar, vector) // smear scalar to width of vector
// - promoteScalar(vector, scalar) // smear scalar to width of vector
// - promoteScalar(pointer, scalar) // smear scalar to width of what pointer points to
// - promoteScalar(scalar, scalar) // do nothing
// Other forms are not allowed.
//
// Generally, the type of 'scalar' does not need to be the same type as the components in 'vector'.
// The type of the created vector is a vector of components of the same type as the scalar.
//
// Note: One of the arguments will change, with the result coming back that way rather than
// through the return value.
void promoteScalar(Decoration precision, Id& left, Id& right);
// Make a value by smearing the scalar to fill the type.
// vectorType should be the correct type for making a vector of scalarVal.
// (No conversions are done.)
Id smearScalar(Decoration precision, Id scalarVal, Id vectorType);
// Create a call to a built-in function.
Id createBuiltinCall(Id resultType, Id builtins, int entryPoint, const std::vector<Id>& args);
// List of parameters used to create a texture operation
struct TextureParameters {
Id sampler;
Id coords;
Id bias;
Id lod;
Id Dref;
Id offset;
Id offsets;
Id gradX;
Id gradY;
Id sample;
Id component;
Id texelOut;
Id lodClamp;
};
// Select the correct texture operation based on all inputs, and emit the correct instruction
Id createTextureCall(Decoration precision, Id resultType, bool sparse, bool fetch, bool proj, bool gather, bool noImplicit, const TextureParameters&);
// Emit the OpTextureQuery* instruction that was passed in.
// Figure out the right return value and type, and return it.
Id createTextureQueryCall(Op, const TextureParameters&, bool isUnsignedResult);
Id createSamplePositionCall(Decoration precision, Id, Id);
Id createBitFieldExtractCall(Decoration precision, Id, Id, Id, bool isSigned);
Id createBitFieldInsertCall(Decoration precision, Id, Id, Id, Id);
// Reduction comparison for composites: For equal and not-equal resulting in a scalar.
Id createCompositeCompare(Decoration precision, Id, Id, bool /* true if for equal, false if for not-equal */);
// OpCompositeConstruct
Id createCompositeConstruct(Id typeId, const std::vector<Id>& constituents);
// vector or scalar constructor
Id createConstructor(Decoration precision, const std::vector<Id>& sources, Id resultTypeId);
// matrix constructor
Id createMatrixConstructor(Decoration precision, const std::vector<Id>& sources, Id constructee);
// Helper to use for building nested control flow with if-then-else.
class If {
public:
If(Id condition, unsigned int ctrl, Builder& builder);
~If() {}
void makeBeginElse();
void makeEndIf();
private:
If(const If&);
If& operator=(If&);
Builder& builder;
Id condition;
unsigned int control;
Function* function;
Block* headerBlock;
Block* thenBlock;
Block* elseBlock;
Block* mergeBlock;
};
// Make a switch statement. A switch has 'numSegments' of pieces of code, not containing
// any case/default labels, all separated by one or more case/default labels. Each possible
// case value v is a jump to the caseValues[v] segment. The defaultSegment is also in this
// number space. How to compute the value is given by 'condition', as in switch(condition).
//
// The SPIR-V Builder will maintain the stack of post-switch merge blocks for nested switches.
//
// Use a defaultSegment < 0 if there is no default segment (to branch to post switch).
//
// Returns the right set of basic blocks to start each code segment with, so that the caller's
// recursion stack can hold the memory for it.
//
void makeSwitch(Id condition, unsigned int control, int numSegments, const std::vector<int>& caseValues,
const std::vector<int>& valueToSegment, int defaultSegment, std::vector<Block*>& segmentBB); // return argument
// Add a branch to the innermost switch's merge block.
void addSwitchBreak();
// Move to the next code segment, passing in the return argument in makeSwitch()
void nextSwitchSegment(std::vector<Block*>& segmentBB, int segment);
// Finish off the innermost switch.
void endSwitch(std::vector<Block*>& segmentBB);
struct LoopBlocks {
LoopBlocks(Block& head, Block& body, Block& merge, Block& continue_target) :
head(head), body(body), merge(merge), continue_target(continue_target) { }
Block &head, &body, &merge, &continue_target;
private:
LoopBlocks();
LoopBlocks& operator=(const LoopBlocks&);
};
// Start a new loop and prepare the builder to generate code for it. Until
// closeLoop() is called for this loop, createLoopContinue() and
// createLoopExit() will target its corresponding blocks.
LoopBlocks& makeNewLoop();
// Create a new block in the function containing the build point. Memory is
// owned by the function object.
Block& makeNewBlock();
// Add a branch to the continue_target of the current (innermost) loop.
void createLoopContinue();
// Add an exit (e.g. "break") from the innermost loop that we're currently
// in.
void createLoopExit();
// Close the innermost loop that you're in
void closeLoop();
//
// Access chain design for an R-Value vs. L-Value:
//
// There is a single access chain the builder is building at
// any particular time. Such a chain can be used to either to a load or
// a store, when desired.
//
// Expressions can be r-values, l-values, or both, or only r-values:
// a[b.c].d = .... // l-value
// ... = a[b.c].d; // r-value, that also looks like an l-value
// ++a[b.c].d; // r-value and l-value
// (x + y)[2]; // r-value only, can't possibly be l-value
//
// Computing an r-value means generating code. Hence,
// r-values should only be computed when they are needed, not speculatively.
//
// Computing an l-value means saving away information for later use in the compiler,
// no code is generated until the l-value is later dereferenced. It is okay
// to speculatively generate an l-value, just not okay to speculatively dereference it.
//
// The base of the access chain (the left-most variable or expression
// from which everything is based) can be set either as an l-value
// or as an r-value. Most efficient would be to set an l-value if one
// is available. If an expression was evaluated, the resulting r-value
// can be set as the chain base.
//
// The users of this single access chain can save and restore if they
// want to nest or manage multiple chains.
//
struct AccessChain {
Id base; // for l-values, pointer to the base object, for r-values, the base object
std::vector<Id> indexChain;
Id instr; // cache the instruction that generates this access chain
std::vector<unsigned> swizzle; // each std::vector element selects the next GLSL component number
Id component; // a dynamic component index, can coexist with a swizzle, done after the swizzle, NoResult if not present
Id preSwizzleBaseType; // dereferenced type, before swizzle or component is applied; NoType unless a swizzle or component is present
bool isRValue; // true if 'base' is an r-value, otherwise, base is an l-value
};
//
// the SPIR-V builder maintains a single active chain that
// the following methods operate on
//
// for external save and restore
AccessChain getAccessChain() { return accessChain; }
void setAccessChain(AccessChain newChain) { accessChain = newChain; }
// clear accessChain
void clearAccessChain();
// set new base as an l-value base
void setAccessChainLValue(Id lValue)
{
assert(isPointer(lValue));
accessChain.base = lValue;
}
// set new base value as an r-value
void setAccessChainRValue(Id rValue)
{
accessChain.isRValue = true;
accessChain.base = rValue;
}
// push offset onto the end of the chain
void accessChainPush(Id offset)
{
accessChain.indexChain.push_back(offset);
}
// push new swizzle onto the end of any existing swizzle, merging into a single swizzle
void accessChainPushSwizzle(std::vector<unsigned>& swizzle, Id preSwizzleBaseType);
// push a dynamic component selection onto the access chain, only applicable with a
// non-trivial swizzle or no swizzle
void accessChainPushComponent(Id component, Id preSwizzleBaseType)
{
if (accessChain.swizzle.size() != 1) {
accessChain.component = component;
if (accessChain.preSwizzleBaseType == NoType)
accessChain.preSwizzleBaseType = preSwizzleBaseType;
}
}
// use accessChain and swizzle to store value
void accessChainStore(Id rvalue);
// use accessChain and swizzle to load an r-value
Id accessChainLoad(Decoration precision, Decoration nonUniform, Id ResultType);
// get the direct pointer for an l-value
Id accessChainGetLValue();
// Get the inferred SPIR-V type of the result of the current access chain,
// based on the type of the base and the chain of dereferences.
Id accessChainGetInferredType();
// Remove OpDecorate instructions whose operands are defined in unreachable
// blocks.
void eliminateDeadDecorations();
void dump(std::vector<unsigned int>&) const;
void createBranch(Block* block);
void createConditionalBranch(Id condition, Block* thenBlock, Block* elseBlock);
void createLoopMerge(Block* mergeBlock, Block* continueBlock, unsigned int control, unsigned int dependencyLength);
// Sets to generate opcode for specialization constants.
void setToSpecConstCodeGenMode() { generatingOpCodeForSpecConst = true; }
// Sets to generate opcode for non-specialization constants (normal mode).
void setToNormalCodeGenMode() { generatingOpCodeForSpecConst = false; }
// Check if the builder is generating code for spec constants.
bool isInSpecConstCodeGenMode() { return generatingOpCodeForSpecConst; }
protected:
Id makeIntConstant(Id typeId, unsigned value, bool specConstant);
Id makeInt64Constant(Id typeId, unsigned long long value, bool specConstant);
Id findScalarConstant(Op typeClass, Op opcode, Id typeId, unsigned value);
Id findScalarConstant(Op typeClass, Op opcode, Id typeId, unsigned v1, unsigned v2);
Id findCompositeConstant(Op typeClass, const std::vector<Id>& comps);
Id findStructConstant(Id typeId, const std::vector<Id>& comps);
Id collapseAccessChain();
void remapDynamicSwizzle();
void transferAccessChainSwizzle(bool dynamic);
void simplifyAccessChainSwizzle();
void createAndSetNoPredecessorBlock(const char*);
void createSelectionMerge(Block* mergeBlock, unsigned int control);
void dumpSourceInstructions(std::vector<unsigned int>&) const;
void dumpInstructions(std::vector<unsigned int>&, const std::vector<std::unique_ptr<Instruction> >&) const;
void dumpModuleProcesses(std::vector<unsigned int>&) const;
unsigned int spvVersion; // the version of SPIR-V to emit in the header
SourceLanguage source;
int sourceVersion;
spv::Id sourceFileStringId;
std::string sourceText;
int currentLine;
bool emitOpLines;
std::set<std::string> extensions;
std::vector<const char*> sourceExtensions;
std::vector<const char*> moduleProcesses;
AddressingModel addressModel;
MemoryModel memoryModel;
std::set<spv::Capability> capabilities;
int builderNumber;
Module module;
Block* buildPoint;
Id uniqueId;
Function* entryPointFunction;
bool generatingOpCodeForSpecConst;
AccessChain accessChain;
// special blocks of instructions for output
std::vector<std::unique_ptr<Instruction> > strings;
std::vector<std::unique_ptr<Instruction> > imports;
std::vector<std::unique_ptr<Instruction> > entryPoints;
std::vector<std::unique_ptr<Instruction> > executionModes;
std::vector<std::unique_ptr<Instruction> > names;
std::vector<std::unique_ptr<Instruction> > decorations;
std::vector<std::unique_ptr<Instruction> > constantsTypesGlobals;
std::vector<std::unique_ptr<Instruction> > externals;
std::vector<std::unique_ptr<Function> > functions;
// not output, internally used for quick & dirty canonical (unique) creation
std::unordered_map<unsigned int, std::vector<Instruction*>> groupedConstants; // map type opcodes to constant inst.
std::unordered_map<unsigned int, std::vector<Instruction*>> groupedStructConstants; // map struct-id to constant instructions
std::unordered_map<unsigned int, std::vector<Instruction*>> groupedTypes; // map type opcodes to type instructions
// stack of switches
std::stack<Block*> switchMerges;
// Our loop stack.
std::stack<LoopBlocks> loops;
// The stream for outputting warnings and errors.
SpvBuildLogger* logger;
}; // end Builder class
}; // end spv namespace
#endif // SpvBuilder_H

81
glslang/spirv/bitutils.h Normal file
View File

@ -0,0 +1,81 @@
// Copyright (c) 2015-2016 The Khronos Group Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef LIBSPIRV_UTIL_BITUTILS_H_
#define LIBSPIRV_UTIL_BITUTILS_H_
#include <cstdint>
#include <cstring>
namespace spvutils {
// Performs a bitwise copy of source to the destination type Dest.
template <typename Dest, typename Src>
Dest BitwiseCast(Src source) {
Dest dest;
static_assert(sizeof(source) == sizeof(dest),
"BitwiseCast: Source and destination must have the same size");
std::memcpy(&dest, &source, sizeof(dest));
return dest;
}
// SetBits<T, First, Num> returns an integer of type <T> with bits set
// for position <First> through <First + Num - 1>, counting from the least
// significant bit. In particular when Num == 0, no positions are set to 1.
// A static assert will be triggered if First + Num > sizeof(T) * 8, that is,
// a bit that will not fit in the underlying type is set.
template <typename T, size_t First = 0, size_t Num = 0>
struct SetBits {
static_assert(First < sizeof(T) * 8,
"Tried to set a bit that is shifted too far.");
const static T get = (T(1) << First) | SetBits<T, First + 1, Num - 1>::get;
};
template <typename T, size_t Last>
struct SetBits<T, Last, 0> {
const static T get = T(0);
};
// This is all compile-time so we can put our tests right here.
static_assert(SetBits<uint32_t, 0, 0>::get == uint32_t(0x00000000),
"SetBits failed");
static_assert(SetBits<uint32_t, 0, 1>::get == uint32_t(0x00000001),
"SetBits failed");
static_assert(SetBits<uint32_t, 31, 1>::get == uint32_t(0x80000000),
"SetBits failed");
static_assert(SetBits<uint32_t, 1, 2>::get == uint32_t(0x00000006),
"SetBits failed");
static_assert(SetBits<uint32_t, 30, 2>::get == uint32_t(0xc0000000),
"SetBits failed");
static_assert(SetBits<uint32_t, 0, 31>::get == uint32_t(0x7FFFFFFF),
"SetBits failed");
static_assert(SetBits<uint32_t, 0, 32>::get == uint32_t(0xFFFFFFFF),
"SetBits failed");
static_assert(SetBits<uint32_t, 16, 16>::get == uint32_t(0xFFFF0000),
"SetBits failed");
static_assert(SetBits<uint64_t, 0, 1>::get == uint64_t(0x0000000000000001LL),
"SetBits failed");
static_assert(SetBits<uint64_t, 63, 1>::get == uint64_t(0x8000000000000000LL),
"SetBits failed");
static_assert(SetBits<uint64_t, 62, 2>::get == uint64_t(0xc000000000000000LL),
"SetBits failed");
static_assert(SetBits<uint64_t, 31, 1>::get == uint64_t(0x0000000080000000LL),
"SetBits failed");
static_assert(SetBits<uint64_t, 16, 16>::get == uint64_t(0x00000000FFFF0000LL),
"SetBits failed");
} // namespace spvutils
#endif // LIBSPIRV_UTIL_BITUTILS_H_

View File

@ -0,0 +1,740 @@
//
// Copyright (C) 2014-2015 LunarG, Inc.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Disassembler for SPIR-V.
//
#include <cstdlib>
#include <cstring>
#include <cassert>
#include <iomanip>
#include <stack>
#include <sstream>
#include <cstring>
#include "disassemble.h"
#include "doc.h"
namespace spv {
extern "C" {
// Include C-based headers that don't have a namespace
#include "GLSL.std.450.h"
#ifdef AMD_EXTENSIONS
#include "GLSL.ext.AMD.h"
#endif
#ifdef NV_EXTENSIONS
#include "GLSL.ext.NV.h"
#endif
}
}
const char* GlslStd450DebugNames[spv::GLSLstd450Count];
namespace spv {
#ifdef AMD_EXTENSIONS
static const char* GLSLextAMDGetDebugNames(const char*, unsigned);
#endif
#ifdef NV_EXTENSIONS
static const char* GLSLextNVGetDebugNames(const char*, unsigned);
#endif
static void Kill(std::ostream& out, const char* message)
{
out << std::endl << "Disassembly failed: " << message << std::endl;
exit(1);
}
// used to identify the extended instruction library imported when printing
enum ExtInstSet {
GLSL450Inst,
#ifdef AMD_EXTENSIONS
GLSLextAMDInst,
#endif
#ifdef NV_EXTENSIONS
GLSLextNVInst,
#endif
OpenCLExtInst,
};
// Container class for a single instance of a SPIR-V stream, with methods for disassembly.
class SpirvStream {
public:
SpirvStream(std::ostream& out, const std::vector<unsigned int>& stream) : out(out), stream(stream), word(0), nextNestedControl(0) { }
virtual ~SpirvStream() { }
void validate();
void processInstructions();
protected:
SpirvStream(const SpirvStream&);
SpirvStream& operator=(const SpirvStream&);
Op getOpCode(int id) const { return idInstruction[id] ? (Op)(stream[idInstruction[id]] & OpCodeMask) : OpNop; }
// Output methods
void outputIndent();
void formatId(Id id, std::stringstream&);
void outputResultId(Id id);
void outputTypeId(Id id);
void outputId(Id id);
void outputMask(OperandClass operandClass, unsigned mask);
void disassembleImmediates(int numOperands);
void disassembleIds(int numOperands);
int disassembleString();
void disassembleInstruction(Id resultId, Id typeId, Op opCode, int numOperands);
// Data
std::ostream& out; // where to write the disassembly
const std::vector<unsigned int>& stream; // the actual word stream
int size; // the size of the word stream
int word; // the next word of the stream to read
// map each <id> to the instruction that created it
Id bound;
std::vector<unsigned int> idInstruction; // the word offset into the stream where the instruction for result [id] starts; 0 if not yet seen (forward reference or function parameter)
std::vector<std::string> idDescriptor; // the best text string known for explaining the <id>
// schema
unsigned int schema;
// stack of structured-merge points
std::stack<Id> nestedControl;
Id nextNestedControl; // need a slight delay for when we are nested
};
void SpirvStream::validate()
{
size = (int)stream.size();
if (size < 4)
Kill(out, "stream is too short");
// Magic number
if (stream[word++] != MagicNumber) {
out << "Bad magic number";
return;
}
// Version
out << "// Module Version " << std::hex << stream[word++] << std::endl;
// Generator's magic number
out << "// Generated by (magic number): " << std::hex << stream[word++] << std::dec << std::endl;
// Result <id> bound
bound = stream[word++];
idInstruction.resize(bound);
idDescriptor.resize(bound);
out << "// Id's are bound by " << bound << std::endl;
out << std::endl;
// Reserved schema, must be 0 for now
schema = stream[word++];
if (schema != 0)
Kill(out, "bad schema, must be 0");
}
// Loop over all the instructions, in order, processing each.
// Boiler plate for each is handled here directly, the rest is dispatched.
void SpirvStream::processInstructions()
{
// Instructions
while (word < size) {
int instructionStart = word;
// Instruction wordCount and opcode
unsigned int firstWord = stream[word];
unsigned wordCount = firstWord >> WordCountShift;
Op opCode = (Op)(firstWord & OpCodeMask);
int nextInst = word + wordCount;
++word;
// Presence of full instruction
if (nextInst > size)
Kill(out, "stream instruction terminated too early");
// Base for computing number of operands; will be updated as more is learned
unsigned numOperands = wordCount - 1;
// Type <id>
Id typeId = 0;
if (InstructionDesc[opCode].hasType()) {
typeId = stream[word++];
--numOperands;
}
// Result <id>
Id resultId = 0;
if (InstructionDesc[opCode].hasResult()) {
resultId = stream[word++];
--numOperands;
// save instruction for future reference
idInstruction[resultId] = instructionStart;
}
outputResultId(resultId);
outputTypeId(typeId);
outputIndent();
// Hand off the Op and all its operands
disassembleInstruction(resultId, typeId, opCode, numOperands);
if (word != nextInst) {
out << " ERROR, incorrect number of operands consumed. At " << word << " instead of " << nextInst << " instruction start was " << instructionStart;
word = nextInst;
}
out << std::endl;
}
}
void SpirvStream::outputIndent()
{
for (int i = 0; i < (int)nestedControl.size(); ++i)
out << " ";
}
void SpirvStream::formatId(Id id, std::stringstream& idStream)
{
if (id != 0) {
// On instructions with no IDs, this is called with "0", which does not
// have to be within ID bounds on null shaders.
if (id >= bound)
Kill(out, "Bad <id>");
idStream << id;
if (idDescriptor[id].size() > 0)
idStream << "(" << idDescriptor[id] << ")";
}
}
void SpirvStream::outputResultId(Id id)
{
const int width = 16;
std::stringstream idStream;
formatId(id, idStream);
out << std::setw(width) << std::right << idStream.str();
if (id != 0)
out << ":";
else
out << " ";
if (nestedControl.size() && id == nestedControl.top())
nestedControl.pop();
}
void SpirvStream::outputTypeId(Id id)
{
const int width = 12;
std::stringstream idStream;
formatId(id, idStream);
out << std::setw(width) << std::right << idStream.str() << " ";
}
void SpirvStream::outputId(Id id)
{
if (id >= bound)
Kill(out, "Bad <id>");
out << id;
if (idDescriptor[id].size() > 0)
out << "(" << idDescriptor[id] << ")";
}
void SpirvStream::outputMask(OperandClass operandClass, unsigned mask)
{
if (mask == 0)
out << "None";
else {
for (int m = 0; m < OperandClassParams[operandClass].ceiling; ++m) {
if (mask & (1 << m))
out << OperandClassParams[operandClass].getName(m) << " ";
}
}
}
void SpirvStream::disassembleImmediates(int numOperands)
{
for (int i = 0; i < numOperands; ++i) {
out << stream[word++];
if (i < numOperands - 1)
out << " ";
}
}
void SpirvStream::disassembleIds(int numOperands)
{
for (int i = 0; i < numOperands; ++i) {
outputId(stream[word++]);
if (i < numOperands - 1)
out << " ";
}
}
// return the number of operands consumed by the string
int SpirvStream::disassembleString()
{
int startWord = word;
out << " \"";
const char* wordString;
bool done = false;
do {
unsigned int content = stream[word];
wordString = (const char*)&content;
for (int charCount = 0; charCount < 4; ++charCount) {
if (*wordString == 0) {
done = true;
break;
}
out << *(wordString++);
}
++word;
} while (! done);
out << "\"";
return word - startWord;
}
void SpirvStream::disassembleInstruction(Id resultId, Id /*typeId*/, Op opCode, int numOperands)
{
// Process the opcode
out << (OpcodeString(opCode) + 2); // leave out the "Op"
if (opCode == OpLoopMerge || opCode == OpSelectionMerge)
nextNestedControl = stream[word];
else if (opCode == OpBranchConditional || opCode == OpSwitch) {
if (nextNestedControl) {
nestedControl.push(nextNestedControl);
nextNestedControl = 0;
}
} else if (opCode == OpExtInstImport) {
idDescriptor[resultId] = (const char*)(&stream[word]);
}
else {
if (resultId != 0 && idDescriptor[resultId].size() == 0) {
switch (opCode) {
case OpTypeInt:
switch (stream[word]) {
case 8: idDescriptor[resultId] = "int8_t"; break;
case 16: idDescriptor[resultId] = "int16_t"; break;
default: assert(0); // fallthrough
case 32: idDescriptor[resultId] = "int"; break;
case 64: idDescriptor[resultId] = "int64_t"; break;
}
break;
case OpTypeFloat:
switch (stream[word]) {
case 16: idDescriptor[resultId] = "float16_t"; break;
default: assert(0); // fallthrough
case 32: idDescriptor[resultId] = "float"; break;
case 64: idDescriptor[resultId] = "float64_t"; break;
}
break;
case OpTypeBool:
idDescriptor[resultId] = "bool";
break;
case OpTypeStruct:
idDescriptor[resultId] = "struct";
break;
case OpTypePointer:
idDescriptor[resultId] = "ptr";
break;
case OpTypeVector:
if (idDescriptor[stream[word]].size() > 0) {
idDescriptor[resultId].append(idDescriptor[stream[word]].begin(), idDescriptor[stream[word]].begin() + 1);
if (strstr(idDescriptor[stream[word]].c_str(), "8")) {
idDescriptor[resultId].append("8");
}
if (strstr(idDescriptor[stream[word]].c_str(), "16")) {
idDescriptor[resultId].append("16");
}
if (strstr(idDescriptor[stream[word]].c_str(), "64")) {
idDescriptor[resultId].append("64");
}
}
idDescriptor[resultId].append("vec");
switch (stream[word + 1]) {
case 2: idDescriptor[resultId].append("2"); break;
case 3: idDescriptor[resultId].append("3"); break;
case 4: idDescriptor[resultId].append("4"); break;
case 8: idDescriptor[resultId].append("8"); break;
case 16: idDescriptor[resultId].append("16"); break;
case 32: idDescriptor[resultId].append("32"); break;
default: break;
}
break;
default:
break;
}
}
}
// Process the operands. Note, a new context-dependent set could be
// swapped in mid-traversal.
// Handle images specially, so can put out helpful strings.
if (opCode == OpTypeImage) {
out << " ";
disassembleIds(1);
out << " " << DimensionString((Dim)stream[word++]);
out << (stream[word++] != 0 ? " depth" : "");
out << (stream[word++] != 0 ? " array" : "");
out << (stream[word++] != 0 ? " multi-sampled" : "");
switch (stream[word++]) {
case 0: out << " runtime"; break;
case 1: out << " sampled"; break;
case 2: out << " nonsampled"; break;
}
out << " format:" << ImageFormatString((ImageFormat)stream[word++]);
if (numOperands == 8) {
out << " " << AccessQualifierString(stream[word++]);
}
return;
}
// Handle all the parameterized operands
for (int op = 0; op < InstructionDesc[opCode].operands.getNum() && numOperands > 0; ++op) {
out << " ";
OperandClass operandClass = InstructionDesc[opCode].operands.getClass(op);
switch (operandClass) {
case OperandId:
case OperandScope:
case OperandMemorySemantics:
disassembleIds(1);
--numOperands;
// Get names for printing "(XXX)" for readability, *after* this id
if (opCode == OpName)
idDescriptor[stream[word - 1]] = (const char*)(&stream[word]);
break;
case OperandVariableIds:
disassembleIds(numOperands);
return;
case OperandImageOperands:
outputMask(OperandImageOperands, stream[word++]);
--numOperands;
disassembleIds(numOperands);
return;
case OperandOptionalLiteral:
case OperandVariableLiterals:
if ((opCode == OpDecorate && stream[word - 1] == DecorationBuiltIn) ||
(opCode == OpMemberDecorate && stream[word - 1] == DecorationBuiltIn)) {
out << BuiltInString(stream[word++]);
--numOperands;
++op;
}
disassembleImmediates(numOperands);
return;
case OperandVariableIdLiteral:
while (numOperands > 0) {
out << std::endl;
outputResultId(0);
outputTypeId(0);
outputIndent();
out << " Type ";
disassembleIds(1);
out << ", member ";
disassembleImmediates(1);
numOperands -= 2;
}
return;
case OperandVariableLiteralId:
while (numOperands > 0) {
out << std::endl;
outputResultId(0);
outputTypeId(0);
outputIndent();
out << " case ";
disassembleImmediates(1);
out << ": ";
disassembleIds(1);
numOperands -= 2;
}
return;
case OperandLiteralNumber:
disassembleImmediates(1);
--numOperands;
if (opCode == OpExtInst) {
ExtInstSet extInstSet = GLSL450Inst;
const char* name = idDescriptor[stream[word - 2]].c_str();
if (0 == memcmp("OpenCL", name, 6)) {
extInstSet = OpenCLExtInst;
#ifdef AMD_EXTENSIONS
} else if (strcmp(spv::E_SPV_AMD_shader_ballot, name) == 0 ||
strcmp(spv::E_SPV_AMD_shader_trinary_minmax, name) == 0 ||
strcmp(spv::E_SPV_AMD_shader_explicit_vertex_parameter, name) == 0 ||
strcmp(spv::E_SPV_AMD_gcn_shader, name) == 0) {
extInstSet = GLSLextAMDInst;
#endif
#ifdef NV_EXTENSIONS
}else if (strcmp(spv::E_SPV_NV_sample_mask_override_coverage, name) == 0 ||
strcmp(spv::E_SPV_NV_geometry_shader_passthrough, name) == 0 ||
strcmp(spv::E_SPV_NV_viewport_array2, name) == 0 ||
strcmp(spv::E_SPV_NVX_multiview_per_view_attributes, name) == 0) {
extInstSet = GLSLextNVInst;
#endif
}
unsigned entrypoint = stream[word - 1];
if (extInstSet == GLSL450Inst) {
if (entrypoint < GLSLstd450Count) {
out << "(" << GlslStd450DebugNames[entrypoint] << ")";
}
#ifdef AMD_EXTENSIONS
} else if (extInstSet == GLSLextAMDInst) {
out << "(" << GLSLextAMDGetDebugNames(name, entrypoint) << ")";
#endif
#ifdef NV_EXTENSIONS
}
else if (extInstSet == GLSLextNVInst) {
out << "(" << GLSLextNVGetDebugNames(name, entrypoint) << ")";
#endif
}
}
break;
case OperandOptionalLiteralString:
case OperandLiteralString:
numOperands -= disassembleString();
break;
default:
assert(operandClass >= OperandSource && operandClass < OperandOpcode);
if (OperandClassParams[operandClass].bitmask)
outputMask(operandClass, stream[word++]);
else
out << OperandClassParams[operandClass].getName(stream[word++]);
--numOperands;
break;
}
}
return;
}
static void GLSLstd450GetDebugNames(const char** names)
{
for (int i = 0; i < GLSLstd450Count; ++i)
names[i] = "Unknown";
names[GLSLstd450Round] = "Round";
names[GLSLstd450RoundEven] = "RoundEven";
names[GLSLstd450Trunc] = "Trunc";
names[GLSLstd450FAbs] = "FAbs";
names[GLSLstd450SAbs] = "SAbs";
names[GLSLstd450FSign] = "FSign";
names[GLSLstd450SSign] = "SSign";
names[GLSLstd450Floor] = "Floor";
names[GLSLstd450Ceil] = "Ceil";
names[GLSLstd450Fract] = "Fract";
names[GLSLstd450Radians] = "Radians";
names[GLSLstd450Degrees] = "Degrees";
names[GLSLstd450Sin] = "Sin";
names[GLSLstd450Cos] = "Cos";
names[GLSLstd450Tan] = "Tan";
names[GLSLstd450Asin] = "Asin";
names[GLSLstd450Acos] = "Acos";
names[GLSLstd450Atan] = "Atan";
names[GLSLstd450Sinh] = "Sinh";
names[GLSLstd450Cosh] = "Cosh";
names[GLSLstd450Tanh] = "Tanh";
names[GLSLstd450Asinh] = "Asinh";
names[GLSLstd450Acosh] = "Acosh";
names[GLSLstd450Atanh] = "Atanh";
names[GLSLstd450Atan2] = "Atan2";
names[GLSLstd450Pow] = "Pow";
names[GLSLstd450Exp] = "Exp";
names[GLSLstd450Log] = "Log";
names[GLSLstd450Exp2] = "Exp2";
names[GLSLstd450Log2] = "Log2";
names[GLSLstd450Sqrt] = "Sqrt";
names[GLSLstd450InverseSqrt] = "InverseSqrt";
names[GLSLstd450Determinant] = "Determinant";
names[GLSLstd450MatrixInverse] = "MatrixInverse";
names[GLSLstd450Modf] = "Modf";
names[GLSLstd450ModfStruct] = "ModfStruct";
names[GLSLstd450FMin] = "FMin";
names[GLSLstd450SMin] = "SMin";
names[GLSLstd450UMin] = "UMin";
names[GLSLstd450FMax] = "FMax";
names[GLSLstd450SMax] = "SMax";
names[GLSLstd450UMax] = "UMax";
names[GLSLstd450FClamp] = "FClamp";
names[GLSLstd450SClamp] = "SClamp";
names[GLSLstd450UClamp] = "UClamp";
names[GLSLstd450FMix] = "FMix";
names[GLSLstd450Step] = "Step";
names[GLSLstd450SmoothStep] = "SmoothStep";
names[GLSLstd450Fma] = "Fma";
names[GLSLstd450Frexp] = "Frexp";
names[GLSLstd450FrexpStruct] = "FrexpStruct";
names[GLSLstd450Ldexp] = "Ldexp";
names[GLSLstd450PackSnorm4x8] = "PackSnorm4x8";
names[GLSLstd450PackUnorm4x8] = "PackUnorm4x8";
names[GLSLstd450PackSnorm2x16] = "PackSnorm2x16";
names[GLSLstd450PackUnorm2x16] = "PackUnorm2x16";
names[GLSLstd450PackHalf2x16] = "PackHalf2x16";
names[GLSLstd450PackDouble2x32] = "PackDouble2x32";
names[GLSLstd450UnpackSnorm2x16] = "UnpackSnorm2x16";
names[GLSLstd450UnpackUnorm2x16] = "UnpackUnorm2x16";
names[GLSLstd450UnpackHalf2x16] = "UnpackHalf2x16";
names[GLSLstd450UnpackSnorm4x8] = "UnpackSnorm4x8";
names[GLSLstd450UnpackUnorm4x8] = "UnpackUnorm4x8";
names[GLSLstd450UnpackDouble2x32] = "UnpackDouble2x32";
names[GLSLstd450Length] = "Length";
names[GLSLstd450Distance] = "Distance";
names[GLSLstd450Cross] = "Cross";
names[GLSLstd450Normalize] = "Normalize";
names[GLSLstd450FaceForward] = "FaceForward";
names[GLSLstd450Reflect] = "Reflect";
names[GLSLstd450Refract] = "Refract";
names[GLSLstd450FindILsb] = "FindILsb";
names[GLSLstd450FindSMsb] = "FindSMsb";
names[GLSLstd450FindUMsb] = "FindUMsb";
names[GLSLstd450InterpolateAtCentroid] = "InterpolateAtCentroid";
names[GLSLstd450InterpolateAtSample] = "InterpolateAtSample";
names[GLSLstd450InterpolateAtOffset] = "InterpolateAtOffset";
}
#ifdef AMD_EXTENSIONS
static const char* GLSLextAMDGetDebugNames(const char* name, unsigned entrypoint)
{
if (strcmp(name, spv::E_SPV_AMD_shader_ballot) == 0) {
switch (entrypoint) {
case SwizzleInvocationsAMD: return "SwizzleInvocationsAMD";
case SwizzleInvocationsMaskedAMD: return "SwizzleInvocationsMaskedAMD";
case WriteInvocationAMD: return "WriteInvocationAMD";
case MbcntAMD: return "MbcntAMD";
default: return "Bad";
}
} else if (strcmp(name, spv::E_SPV_AMD_shader_trinary_minmax) == 0) {
switch (entrypoint) {
case FMin3AMD: return "FMin3AMD";
case UMin3AMD: return "UMin3AMD";
case SMin3AMD: return "SMin3AMD";
case FMax3AMD: return "FMax3AMD";
case UMax3AMD: return "UMax3AMD";
case SMax3AMD: return "SMax3AMD";
case FMid3AMD: return "FMid3AMD";
case UMid3AMD: return "UMid3AMD";
case SMid3AMD: return "SMid3AMD";
default: return "Bad";
}
} else if (strcmp(name, spv::E_SPV_AMD_shader_explicit_vertex_parameter) == 0) {
switch (entrypoint) {
case InterpolateAtVertexAMD: return "InterpolateAtVertexAMD";
default: return "Bad";
}
}
else if (strcmp(name, spv::E_SPV_AMD_gcn_shader) == 0) {
switch (entrypoint) {
case CubeFaceIndexAMD: return "CubeFaceIndexAMD";
case CubeFaceCoordAMD: return "CubeFaceCoordAMD";
case TimeAMD: return "TimeAMD";
default:
break;
}
}
return "Bad";
}
#endif
#ifdef NV_EXTENSIONS
static const char* GLSLextNVGetDebugNames(const char* name, unsigned entrypoint)
{
if (strcmp(name, spv::E_SPV_NV_sample_mask_override_coverage) == 0 ||
strcmp(name, spv::E_SPV_NV_geometry_shader_passthrough) == 0 ||
strcmp(name, spv::E_ARB_shader_viewport_layer_array) == 0 ||
strcmp(name, spv::E_SPV_NV_viewport_array2) == 0 ||
strcmp(spv::E_SPV_NVX_multiview_per_view_attributes, name) == 0) {
switch (entrypoint) {
case DecorationOverrideCoverageNV: return "OverrideCoverageNV";
case DecorationPassthroughNV: return "PassthroughNV";
case CapabilityGeometryShaderPassthroughNV: return "GeometryShaderPassthroughNV";
case DecorationViewportRelativeNV: return "ViewportRelativeNV";
case BuiltInViewportMaskNV: return "ViewportMaskNV";
case CapabilityShaderViewportMaskNV: return "ShaderViewportMaskNV";
case DecorationSecondaryViewportRelativeNV: return "SecondaryViewportRelativeNV";
case BuiltInSecondaryPositionNV: return "SecondaryPositionNV";
case BuiltInSecondaryViewportMaskNV: return "SecondaryViewportMaskNV";
case CapabilityShaderStereoViewNV: return "ShaderStereoViewNV";
case BuiltInPositionPerViewNV: return "PositionPerViewNV";
case BuiltInViewportMaskPerViewNV: return "ViewportMaskPerViewNV";
case CapabilityPerViewAttributesNV: return "PerViewAttributesNV";
default: return "Bad";
}
}
return "Bad";
}
#endif
void Disassemble(std::ostream& out, const std::vector<unsigned int>& stream)
{
SpirvStream SpirvStream(out, stream);
spv::Parameterize();
GLSLstd450GetDebugNames(GlslStd450DebugNames);
SpirvStream.validate();
SpirvStream.processInstructions();
}
#if ENABLE_OPT
#include "spirv-tools/libspirv.h"
// Use the SPIRV-Tools disassembler to print SPIR-V.
void SpirvToolsDisassemble(std::ostream& out, const std::vector<unsigned int>& spirv)
{
spv_context context = spvContextCreate(SPV_ENV_UNIVERSAL_1_3);
spv_text text;
spv_diagnostic diagnostic = nullptr;
spvBinaryToText(context, &spirv.front(), spirv.size(),
SPV_BINARY_TO_TEXT_OPTION_FRIENDLY_NAMES | SPV_BINARY_TO_TEXT_OPTION_INDENT,
&text, &diagnostic);
if (diagnostic == nullptr)
out << text->str;
else
spvDiagnosticPrint(diagnostic);
}
#endif
}; // end namespace spv

View File

@ -0,0 +1,56 @@
//
// Copyright (C) 2014-2015 LunarG, Inc.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Disassembler for SPIR-V.
//
#pragma once
#ifndef disassembler_H
#define disassembler_H
#include <iostream>
#include <vector>
namespace spv {
// disassemble with glslang custom disassembler
void Disassemble(std::ostream& out, const std::vector<unsigned int>&);
// disassemble with SPIRV-Tools disassembler
void SpirvToolsDisassemble(std::ostream& out, const std::vector<unsigned int>& stream);
}; // end namespace spv
#endif // disassembler_H

2572
glslang/spirv/doc.cpp Normal file

File diff suppressed because it is too large Load Diff

258
glslang/spirv/doc.h Normal file
View File

@ -0,0 +1,258 @@
//
// Copyright (C) 2014-2015 LunarG, Inc.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Parameterize the SPIR-V enumerants.
//
#pragma once
#include "spirv.hpp"
#include <vector>
namespace spv {
// Fill in all the parameters
void Parameterize();
// Return the English names of all the enums.
const char* SourceString(int);
const char* AddressingString(int);
const char* MemoryString(int);
const char* ExecutionModelString(int);
const char* ExecutionModeString(int);
const char* StorageClassString(int);
const char* DecorationString(int);
const char* BuiltInString(int);
const char* DimensionString(int);
const char* SelectControlString(int);
const char* LoopControlString(int);
const char* FunctionControlString(int);
const char* SamplerAddressingModeString(int);
const char* SamplerFilterModeString(int);
const char* ImageFormatString(int);
const char* ImageChannelOrderString(int);
const char* ImageChannelTypeString(int);
const char* ImageChannelDataTypeString(int type);
const char* ImageOperandsString(int format);
const char* ImageOperands(int);
const char* FPFastMathString(int);
const char* FPRoundingModeString(int);
const char* LinkageTypeString(int);
const char* FuncParamAttrString(int);
const char* AccessQualifierString(int);
const char* MemorySemanticsString(int);
const char* MemoryAccessString(int);
const char* ExecutionScopeString(int);
const char* GroupOperationString(int);
const char* KernelEnqueueFlagsString(int);
const char* KernelProfilingInfoString(int);
const char* CapabilityString(int);
const char* OpcodeString(int);
const char* ScopeString(int mem);
// For grouping opcodes into subsections
enum OpcodeClass {
OpClassMisc,
OpClassDebug,
OpClassAnnotate,
OpClassExtension,
OpClassMode,
OpClassType,
OpClassConstant,
OpClassMemory,
OpClassFunction,
OpClassImage,
OpClassConvert,
OpClassComposite,
OpClassArithmetic,
OpClassBit,
OpClassRelationalLogical,
OpClassDerivative,
OpClassFlowControl,
OpClassAtomic,
OpClassPrimitive,
OpClassBarrier,
OpClassGroup,
OpClassDeviceSideEnqueue,
OpClassPipe,
OpClassCount,
OpClassMissing // all instructions start out as missing
};
// For parameterizing operands.
enum OperandClass {
OperandNone,
OperandId,
OperandVariableIds,
OperandOptionalLiteral,
OperandOptionalLiteralString,
OperandVariableLiterals,
OperandVariableIdLiteral,
OperandVariableLiteralId,
OperandLiteralNumber,
OperandLiteralString,
OperandSource,
OperandExecutionModel,
OperandAddressing,
OperandMemory,
OperandExecutionMode,
OperandStorage,
OperandDimensionality,
OperandSamplerAddressingMode,
OperandSamplerFilterMode,
OperandSamplerImageFormat,
OperandImageChannelOrder,
OperandImageChannelDataType,
OperandImageOperands,
OperandFPFastMath,
OperandFPRoundingMode,
OperandLinkageType,
OperandAccessQualifier,
OperandFuncParamAttr,
OperandDecoration,
OperandBuiltIn,
OperandSelect,
OperandLoop,
OperandFunction,
OperandMemorySemantics,
OperandMemoryAccess,
OperandScope,
OperandGroupOperation,
OperandKernelEnqueueFlags,
OperandKernelProfilingInfo,
OperandCapability,
OperandOpcode,
OperandCount
};
// Any specific enum can have a set of capabilities that allow it:
typedef std::vector<Capability> EnumCaps;
// Parameterize a set of operands with their OperandClass(es) and descriptions.
class OperandParameters {
public:
OperandParameters() { }
void push(OperandClass oc, const char* d, bool opt = false)
{
opClass.push_back(oc);
desc.push_back(d);
optional.push_back(opt);
}
void setOptional();
OperandClass getClass(int op) const { return opClass[op]; }
const char* getDesc(int op) const { return desc[op]; }
bool isOptional(int op) const { return optional[op]; }
int getNum() const { return (int)opClass.size(); }
protected:
std::vector<OperandClass> opClass;
std::vector<const char*> desc;
std::vector<bool> optional;
};
// Parameterize an enumerant
class EnumParameters {
public:
EnumParameters() : desc(0) { }
const char* desc;
};
// Parameterize a set of enumerants that form an enum
class EnumDefinition : public EnumParameters {
public:
EnumDefinition() :
ceiling(0), bitmask(false), getName(0), enumParams(0), operandParams(0) { }
void set(int ceil, const char* (*name)(int), EnumParameters* ep, bool mask = false)
{
ceiling = ceil;
getName = name;
bitmask = mask;
enumParams = ep;
}
void setOperands(OperandParameters* op) { operandParams = op; }
int ceiling; // ceiling of enumerants
bool bitmask; // true if these enumerants combine into a bitmask
const char* (*getName)(int); // a function that returns the name for each enumerant value (or shift)
EnumParameters* enumParams; // parameters for each individual enumerant
OperandParameters* operandParams; // sets of operands
};
// Parameterize an instruction's logical format, including its known set of operands,
// per OperandParameters above.
class InstructionParameters {
public:
InstructionParameters() :
opDesc("TBD"),
opClass(OpClassMissing),
typePresent(true), // most normal, only exceptions have to be spelled out
resultPresent(true) // most normal, only exceptions have to be spelled out
{ }
void setResultAndType(bool r, bool t)
{
resultPresent = r;
typePresent = t;
}
bool hasResult() const { return resultPresent != 0; }
bool hasType() const { return typePresent != 0; }
const char* opDesc;
OpcodeClass opClass;
OperandParameters operands;
protected:
int typePresent : 1;
int resultPresent : 1;
};
// The set of objects that hold all the instruction/operand
// parameterization information.
extern InstructionParameters InstructionDesc[];
// These hold definitions of the enumerants used for operands
extern EnumDefinition OperandClassParams[];
const char* GetOperandDesc(OperandClass operand);
void PrintImmediateRow(int imm, const char* name, const EnumParameters* enumParams, bool caps, bool hex = false);
const char* AccessQualifierString(int attr);
void PrintOperands(const OperandParameters& operands, int reservedOperands);
}; // end namespace spv

1078
glslang/spirv/hex_float.h Normal file

File diff suppressed because it is too large Load Diff

1102
glslang/spirv/spirv.hpp Normal file

File diff suppressed because it is too large Load Diff

408
glslang/spirv/spvIR.h Normal file
View File

@ -0,0 +1,408 @@
//
// Copyright (C) 2014 LunarG, Inc.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
// SPIRV-IR
//
// Simple in-memory representation (IR) of SPIRV. Just for holding
// Each function's CFG of blocks. Has this hierarchy:
// - Module, which is a list of
// - Function, which is a list of
// - Block, which is a list of
// - Instruction
//
#pragma once
#ifndef spvIR_H
#define spvIR_H
#include "spirv.hpp"
#include <algorithm>
#include <cassert>
#include <functional>
#include <iostream>
#include <memory>
#include <vector>
namespace spv {
class Block;
class Function;
class Module;
const Id NoResult = 0;
const Id NoType = 0;
const Decoration NoPrecision = DecorationMax;
#ifdef __GNUC__
# define POTENTIALLY_UNUSED __attribute__((unused))
#else
# define POTENTIALLY_UNUSED
#endif
POTENTIALLY_UNUSED
const MemorySemanticsMask MemorySemanticsAllMemory =
(MemorySemanticsMask)(MemorySemanticsUniformMemoryMask |
MemorySemanticsWorkgroupMemoryMask |
MemorySemanticsAtomicCounterMemoryMask |
MemorySemanticsImageMemoryMask);
//
// SPIR-V IR instruction.
//
class Instruction {
public:
Instruction(Id resultId, Id typeId, Op opCode) : resultId(resultId), typeId(typeId), opCode(opCode), block(nullptr) { }
explicit Instruction(Op opCode) : resultId(NoResult), typeId(NoType), opCode(opCode), block(nullptr) { }
virtual ~Instruction() {}
void addIdOperand(Id id) { operands.push_back(id); }
void addImmediateOperand(unsigned int immediate) { operands.push_back(immediate); }
void addStringOperand(const char* str)
{
unsigned int word;
char* wordString = (char*)&word;
char* wordPtr = wordString;
int charCount = 0;
char c;
do {
c = *(str++);
*(wordPtr++) = c;
++charCount;
if (charCount == 4) {
addImmediateOperand(word);
wordPtr = wordString;
charCount = 0;
}
} while (c != 0);
// deal with partial last word
if (charCount > 0) {
// pad with 0s
for (; charCount < 4; ++charCount)
*(wordPtr++) = 0;
addImmediateOperand(word);
}
}
void setBlock(Block* b) { block = b; }
Block* getBlock() const { return block; }
Op getOpCode() const { return opCode; }
int getNumOperands() const { return (int)operands.size(); }
Id getResultId() const { return resultId; }
Id getTypeId() const { return typeId; }
Id getIdOperand(int op) const { return operands[op]; }
unsigned int getImmediateOperand(int op) const { return operands[op]; }
// Write out the binary form.
void dump(std::vector<unsigned int>& out) const
{
// Compute the wordCount
unsigned int wordCount = 1;
if (typeId)
++wordCount;
if (resultId)
++wordCount;
wordCount += (unsigned int)operands.size();
// Write out the beginning of the instruction
out.push_back(((wordCount) << WordCountShift) | opCode);
if (typeId)
out.push_back(typeId);
if (resultId)
out.push_back(resultId);
// Write out the operands
for (int op = 0; op < (int)operands.size(); ++op)
out.push_back(operands[op]);
}
protected:
Instruction(const Instruction&);
Id resultId;
Id typeId;
Op opCode;
std::vector<Id> operands;
Block* block;
};
//
// SPIR-V IR block.
//
class Block {
public:
Block(Id id, Function& parent);
virtual ~Block()
{
}
Id getId() { return instructions.front()->getResultId(); }
Function& getParent() const { return parent; }
void addInstruction(std::unique_ptr<Instruction> inst);
void addPredecessor(Block* pred) { predecessors.push_back(pred); pred->successors.push_back(this);}
void addLocalVariable(std::unique_ptr<Instruction> inst) { localVariables.push_back(std::move(inst)); }
const std::vector<Block*>& getPredecessors() const { return predecessors; }
const std::vector<Block*>& getSuccessors() const { return successors; }
const std::vector<std::unique_ptr<Instruction> >& getInstructions() const {
return instructions;
}
void setUnreachable() { unreachable = true; }
bool isUnreachable() const { return unreachable; }
// Returns the block's merge instruction, if one exists (otherwise null).
const Instruction* getMergeInstruction() const {
if (instructions.size() < 2) return nullptr;
const Instruction* nextToLast = (instructions.cend() - 2)->get();
switch (nextToLast->getOpCode()) {
case OpSelectionMerge:
case OpLoopMerge:
return nextToLast;
default:
return nullptr;
}
return nullptr;
}
bool isTerminated() const
{
switch (instructions.back()->getOpCode()) {
case OpBranch:
case OpBranchConditional:
case OpSwitch:
case OpKill:
case OpReturn:
case OpReturnValue:
return true;
default:
return false;
}
}
void dump(std::vector<unsigned int>& out) const
{
instructions[0]->dump(out);
for (int i = 0; i < (int)localVariables.size(); ++i)
localVariables[i]->dump(out);
for (int i = 1; i < (int)instructions.size(); ++i)
instructions[i]->dump(out);
}
protected:
Block(const Block&);
Block& operator=(Block&);
// To enforce keeping parent and ownership in sync:
friend Function;
std::vector<std::unique_ptr<Instruction> > instructions;
std::vector<Block*> predecessors, successors;
std::vector<std::unique_ptr<Instruction> > localVariables;
Function& parent;
// track whether this block is known to be uncreachable (not necessarily
// true for all unreachable blocks, but should be set at least
// for the extraneous ones introduced by the builder).
bool unreachable;
};
// Traverses the control-flow graph rooted at root in an order suited for
// readable code generation. Invokes callback at every node in the traversal
// order.
void inReadableOrder(Block* root, std::function<void(Block*)> callback);
//
// SPIR-V IR Function.
//
class Function {
public:
Function(Id id, Id resultType, Id functionType, Id firstParam, Module& parent);
virtual ~Function()
{
for (int i = 0; i < (int)parameterInstructions.size(); ++i)
delete parameterInstructions[i];
for (int i = 0; i < (int)blocks.size(); ++i)
delete blocks[i];
}
Id getId() const { return functionInstruction.getResultId(); }
Id getParamId(int p) const { return parameterInstructions[p]->getResultId(); }
Id getParamType(int p) const { return parameterInstructions[p]->getTypeId(); }
void addBlock(Block* block) { blocks.push_back(block); }
void removeBlock(Block* block)
{
auto found = find(blocks.begin(), blocks.end(), block);
assert(found != blocks.end());
blocks.erase(found);
delete block;
}
Module& getParent() const { return parent; }
Block* getEntryBlock() const { return blocks.front(); }
Block* getLastBlock() const { return blocks.back(); }
const std::vector<Block*>& getBlocks() const { return blocks; }
void addLocalVariable(std::unique_ptr<Instruction> inst);
Id getReturnType() const { return functionInstruction.getTypeId(); }
void setImplicitThis() { implicitThis = true; }
bool hasImplicitThis() const { return implicitThis; }
void dump(std::vector<unsigned int>& out) const
{
// OpFunction
functionInstruction.dump(out);
// OpFunctionParameter
for (int p = 0; p < (int)parameterInstructions.size(); ++p)
parameterInstructions[p]->dump(out);
// Blocks
inReadableOrder(blocks[0], [&out](const Block* b) { b->dump(out); });
Instruction end(0, 0, OpFunctionEnd);
end.dump(out);
}
protected:
Function(const Function&);
Function& operator=(Function&);
Module& parent;
Instruction functionInstruction;
std::vector<Instruction*> parameterInstructions;
std::vector<Block*> blocks;
bool implicitThis; // true if this is a member function expecting to be passed a 'this' as the first argument
};
//
// SPIR-V IR Module.
//
class Module {
public:
Module() {}
virtual ~Module()
{
// TODO delete things
}
void addFunction(Function *fun) { functions.push_back(fun); }
void mapInstruction(Instruction *instruction)
{
spv::Id resultId = instruction->getResultId();
// map the instruction's result id
if (resultId >= idToInstruction.size())
idToInstruction.resize(resultId + 16);
idToInstruction[resultId] = instruction;
}
Instruction* getInstruction(Id id) const { return idToInstruction[id]; }
const std::vector<Function*>& getFunctions() const { return functions; }
spv::Id getTypeId(Id resultId) const { return idToInstruction[resultId]->getTypeId(); }
StorageClass getStorageClass(Id typeId) const
{
assert(idToInstruction[typeId]->getOpCode() == spv::OpTypePointer);
return (StorageClass)idToInstruction[typeId]->getImmediateOperand(0);
}
void dump(std::vector<unsigned int>& out) const
{
for (int f = 0; f < (int)functions.size(); ++f)
functions[f]->dump(out);
}
protected:
Module(const Module&);
std::vector<Function*> functions;
// map from result id to instruction having that result id
std::vector<Instruction*> idToInstruction;
// map from a result id to its type id
};
//
// Implementation (it's here due to circular type definitions).
//
// Add both
// - the OpFunction instruction
// - all the OpFunctionParameter instructions
__inline Function::Function(Id id, Id resultType, Id functionType, Id firstParamId, Module& parent)
: parent(parent), functionInstruction(id, resultType, OpFunction), implicitThis(false)
{
// OpFunction
functionInstruction.addImmediateOperand(FunctionControlMaskNone);
functionInstruction.addIdOperand(functionType);
parent.mapInstruction(&functionInstruction);
parent.addFunction(this);
// OpFunctionParameter
Instruction* typeInst = parent.getInstruction(functionType);
int numParams = typeInst->getNumOperands() - 1;
for (int p = 0; p < numParams; ++p) {
Instruction* param = new Instruction(firstParamId + p, typeInst->getIdOperand(p + 1), OpFunctionParameter);
parent.mapInstruction(param);
parameterInstructions.push_back(param);
}
}
__inline void Function::addLocalVariable(std::unique_ptr<Instruction> inst)
{
Instruction* raw_instruction = inst.get();
blocks[0]->addLocalVariable(std::move(inst));
parent.mapInstruction(raw_instruction);
}
__inline Block::Block(Id id, Function& parent) : parent(parent), unreachable(false)
{
instructions.push_back(std::unique_ptr<Instruction>(new Instruction(id, NoType, OpLabel)));
instructions.back()->setBlock(this);
parent.getParent().mapInstruction(instructions.back().get());
}
__inline void Block::addInstruction(std::unique_ptr<Instruction> inst)
{
Instruction* raw_instruction = inst.get();
instructions.push_back(std::move(inst));
raw_instruction->setBlock(this);
if (raw_instruction->getResultId())
parent.getParent().mapInstruction(raw_instruction);
}
}; // end spv namespace
#endif // spvIR_H

View File

@ -460,7 +460,7 @@ add_custom_target( revision_check ALL
# Libraries ZDoom needs
message( STATUS "Fluid synth libs: ${FLUIDSYNTH_LIBRARIES}" )
set( ZDOOM_LIBS ${ZDOOM_LIBS} "${ZLIB_LIBRARIES}" "${JPEG_LIBRARIES}" "${BZIP2_LIBRARIES}" "${GME_LIBRARIES}" "${CMAKE_DL_LIBS}" )
set( ZDOOM_LIBS ${ZDOOM_LIBS} "${ZLIB_LIBRARIES}" "${JPEG_LIBRARIES}" "${BZIP2_LIBRARIES}" "${GME_LIBRARIES}" "${CMAKE_DL_LIBS}" "glslang" "SPIRV" "OGLCompiler" )
include_directories( "${ZLIB_INCLUDE_DIR}" "${BZIP2_INCLUDE_DIR}" "${LZMA_INCLUDE_DIR}" "${JPEG_INCLUDE_DIR}" "${GME_INCLUDE_DIR}" )
if( ${HAVE_VM_JIT} )
@ -710,6 +710,9 @@ file( GLOB HEADER_FILES
rendering/hwrenderer/scene/*.h
rendering/hwrenderer/textures/*.h
rendering/hwrenderer/utility/*.h
rendering/vulkan/*.h
rendering/vulkan/system/*.h
rendering/vulkan/textures/*.h
rendering/gl/*.h
rendering/gl/models/*.h
rendering/gl/renderer/*.h
@ -889,6 +892,14 @@ set( FASTMATH_SOURCES
sound/opnmidi/opnmidi_private.cpp
sound/opnmidi/wopn/wopn_file.c
#Vulkan stuff must go into a separate list later because it needs to be disabled for some platforms
rendering/vulkan/system/vk_device.cpp
rendering/vulkan/system/vk_swapchain.cpp
rendering/vulkan/system/vk_framebuffer.cpp
rendering/vulkan/textures/vk_samplers.cpp
rendering/vulkan/textures/vk_hwtexture.cpp
rendering/vulkan/thirdparty/volk/volk.c
rendering/vulkan/thirdparty/vk_mem_alloc/vk_mem_alloc.cpp
)
set (PCH_SOURCES
@ -1348,8 +1359,11 @@ include_directories( .
utility/nodebuilder
scripting
scripting/vm
rendering/vulkan/thirdparty
../gdtoa
../dumb/include
../glslang/glslang/Public
../glslang/spirv
${CMAKE_BINARY_DIR}/gdtoa
${SYSTEM_SOURCES_DIR} )
@ -1477,6 +1491,11 @@ source_group("Rendering\\Hardware Renderer\\Shaders" REGULAR_EXPRESSION "^${CMAK
source_group("Rendering\\Hardware Renderer\\System" REGULAR_EXPRESSION "^${CMAKE_CURRENT_SOURCE_DIR}/rendering/hwrenderer/system/.+")
source_group("Rendering\\Hardware Renderer\\Textures" REGULAR_EXPRESSION "^${CMAKE_CURRENT_SOURCE_DIR}/rendering/hwrenderer/textures/.+")
source_group("Rendering\\Hardware Renderer\\Utilities" REGULAR_EXPRESSION "^${CMAKE_CURRENT_SOURCE_DIR}/rendering/hwrenderer/utility/.+")
source_group("Rendering\\Vulkan Renderer\\System" REGULAR_EXPRESSION "^${CMAKE_CURRENT_SOURCE_DIR}/rendering/vulkan/system/.+")
source_group("Rendering\\Vulkan Renderer\\Textures" REGULAR_EXPRESSION "^${CMAKE_CURRENT_SOURCE_DIR}/rendering/vulkan/textures/.+")
source_group("Rendering\\Vulkan Renderer\\Third Party" REGULAR_EXPRESSION "^${CMAKE_CURRENT_SOURCE_DIR}/rendering/vulkan/thirdparty/.+")
source_group("Rendering\\Vulkan Renderer\\Third Party\\Volk" REGULAR_EXPRESSION "^${CMAKE_CURRENT_SOURCE_DIR}/rendering/vulkan/thirdparty/volk/.+")
source_group("Rendering\\Vulkan Renderer\\Third Party\\Vk_Mem_Alloc" REGULAR_EXPRESSION "^${CMAKE_CURRENT_SOURCE_DIR}/rendering/vulkan/thirdparty/vk_mem_alloc.+")
source_group("Rendering\\OpenGL Loader" REGULAR_EXPRESSION "^${CMAKE_CURRENT_SOURCE_DIR}/rendering/gl_load/.+")
source_group("Rendering\\OpenGL Renderer" REGULAR_EXPRESSION "^${CMAKE_CURRENT_SOURCE_DIR}/rendering/gl/.+")
source_group("Rendering\\OpenGL Renderer\\Data" REGULAR_EXPRESSION "^${CMAKE_CURRENT_SOURCE_DIR}/rendering/gl/data/.+")

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,439 @@
//
//---------------------------------------------------------------------------
//
// Copyright(C) 2018 Christoph Oelckers
// Copyright(C) 2019 Magnus Norddahl
// All rights reserved.
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public License
// along with this program. If not, see http://www.gnu.org/licenses/
//
//--------------------------------------------------------------------------
//
#ifdef _WIN32
#define VK_USE_PLATFORM_WIN32_KHR
#endif
#include "volk/volk.h"
#ifdef _WIN32
#undef max
#undef min
extern HWND Window;
#endif
#include <vector>
#include <array>
#include <set>
#include "vk_device.h"
#include "vk_swapchain.h"
#include "c_cvars.h"
#include "i_system.h"
#include "version.h"
#include "doomerrors.h"
#ifdef NDEBUG
CVAR(Bool, vk_debug, true, 0); // this should be false, once the oversized model can be removed.
#else
CVAR(Bool, vk_debug, true, 0);
#endif
VulkanDevice::VulkanDevice()
{
if (volkInitialize() != VK_SUCCESS)
{
throw std::runtime_error("Unable to find Vulkan");
}
auto iver = volkGetInstanceVersion();
if (iver == 0)
{
throw std::runtime_error("Vulkan not supported");
}
try
{
createInstance();
createSurface();
selectPhysicalDevice();
createDevice();
createAllocator();
RECT clientRect = { 0 };
GetClientRect(Window, &clientRect);
swapChain = std::make_unique<VulkanSwapChain>(this, clientRect.right, clientRect.bottom, true);
createSemaphores();
}
catch (...)
{
releaseResources();
throw;
}
}
VulkanDevice::~VulkanDevice()
{
releaseResources();
}
void VulkanDevice::windowResized()
{
RECT clientRect = { 0 };
GetClientRect(Window, &clientRect);
swapChain.reset();
swapChain = std::make_unique<VulkanSwapChain>(this, clientRect.right, clientRect.bottom, true);
}
void VulkanDevice::waitPresent()
{
vkWaitForFences(device, 1, &renderFinishedFence, VK_TRUE, std::numeric_limits<uint64_t>::max());
vkResetFences(device, 1, &renderFinishedFence);
}
void VulkanDevice::beginFrame()
{
VkResult result = vkAcquireNextImageKHR(device, swapChain->swapChain, std::numeric_limits<uint64_t>::max(), imageAvailableSemaphore, VK_NULL_HANDLE, &presentImageIndex);
if (result != VK_SUCCESS)
throw std::runtime_error("Failed to acquire next image!");
}
void VulkanDevice::presentFrame()
{
VkSemaphore waitSemaphores[] = { renderFinishedSemaphore };
VkSwapchainKHR swapChains[] = { swapChain->swapChain };
VkPresentInfoKHR presentInfo = {};
presentInfo.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR;
presentInfo.waitSemaphoreCount = 1;
presentInfo.pWaitSemaphores = waitSemaphores;
presentInfo.swapchainCount = 1;
presentInfo.pSwapchains = swapChains;
presentInfo.pImageIndices = &presentImageIndex;
presentInfo.pResults = nullptr;
vkQueuePresentKHR(presentQueue, &presentInfo);
}
VkBool32 VulkanDevice::debugCallback(VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity, VkDebugUtilsMessageTypeFlagsEXT messageType, const VkDebugUtilsMessengerCallbackDataEXT* callbackData, void* userData)
{
VulkanDevice *device = (VulkanDevice*)userData;
const char *prefix = "";
if (messageSeverity & VK_DEBUG_REPORT_ERROR_BIT_EXT)
{
prefix = "error";
}
else if (messageSeverity & VK_DEBUG_REPORT_WARNING_BIT_EXT)
{
prefix = "warning";
}
Printf("Vulkan validation layer %s: %s\n", prefix, callbackData->pMessage);
return VK_FALSE;
}
void VulkanDevice::createInstance()
{
VkResult result;
uint32_t layerCount;
vkEnumerateInstanceLayerProperties(&layerCount, nullptr);
availableLayers.resize(layerCount);
vkEnumerateInstanceLayerProperties(&layerCount, availableLayers.data());
uint32_t extensionCount = 0;
result = vkEnumerateInstanceExtensionProperties(nullptr, &extensionCount, nullptr);
extensions.resize(extensionCount);
vkEnumerateInstanceExtensionProperties(nullptr, &extensionCount, extensions.data());
VkApplicationInfo appInfo = {};
appInfo.sType = VK_STRUCTURE_TYPE_APPLICATION_INFO;
appInfo.pApplicationName = "GZDoom";
appInfo.applicationVersion = VK_MAKE_VERSION(VER_MAJOR, VER_MINOR, VER_REVISION);
appInfo.pEngineName = "GZDoom";
appInfo.engineVersion = VK_MAKE_VERSION(ENG_MAJOR, ENG_MINOR, ENG_REVISION);
appInfo.apiVersion = VK_API_VERSION_1_0;
std::vector<const char *> enabledExtensions = { VK_KHR_SURFACE_EXTENSION_NAME, VK_KHR_WIN32_SURFACE_EXTENSION_NAME };
std::vector<const char*> validationLayers;
std::string debugLayer = "VK_LAYER_LUNARG_standard_validation";
bool debugLayerFound = false;
for (const VkLayerProperties &layer : availableLayers)
{
if (layer.layerName == debugLayer)
{
validationLayers.push_back(debugLayer.c_str());
//enabledExtensions.push_back(VK_EXT_DEBUG_REPORT_EXTENSION_NAME);
enabledExtensions.push_back(VK_EXT_DEBUG_UTILS_EXTENSION_NAME);
debugLayerFound = true;
}
}
VkInstanceCreateInfo createInfo = {};
createInfo.sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO;
createInfo.pApplicationInfo = &appInfo;
createInfo.enabledExtensionCount = (uint32_t)enabledExtensions.size();
createInfo.enabledLayerCount = (uint32_t)validationLayers.size();
createInfo.ppEnabledLayerNames = validationLayers.data();
createInfo.ppEnabledExtensionNames = enabledExtensions.data();
createInfo.enabledLayerCount = 0;
result = vkCreateInstance(&createInfo, nullptr, &instance);
if (result != VK_SUCCESS)
throw std::runtime_error("Could not create vulkan instance");
volkLoadInstance(instance);
if (debugLayerFound)
{
VkDebugUtilsMessengerCreateInfoEXT createInfo = {};
createInfo.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT;
createInfo.messageSeverity = VK_DEBUG_UTILS_MESSAGE_SEVERITY_VERBOSE_BIT_EXT | VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT | VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT;
createInfo.messageType = VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT | VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT | VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT;
createInfo.pfnUserCallback = debugCallback;
createInfo.pUserData = this;
result = vkCreateDebugUtilsMessengerEXT(instance, &createInfo, nullptr, &debugMessenger);
if (result != VK_SUCCESS)
throw std::runtime_error("vkCreateDebugUtilsMessengerEXT failed");
/*
VkDebugReportCallbackCreateInfoEXT createInfo = {};
createInfo.sType = VK_STRUCTURE_TYPE_DEBUG_REPORT_CALLBACK_CREATE_INFO_EXT;
createInfo.flags = VK_DEBUG_REPORT_ERROR_BIT_EXT | VK_DEBUG_REPORT_WARNING_BIT_EXT;
createInfo.pfnCallback = debugCallback;
result = vkCreateDebugReportCallbackEXT(instance, &createInfo, nullptr, &vkCallback);
if (result != VK_SUCCESS)
throw std::runtime_error("vkCreateDebugReportCallbackEXT failed");
*/
}
}
void VulkanDevice::createSurface()
{
#ifdef _WIN32
VkWin32SurfaceCreateInfoKHR windowCreateInfo;
windowCreateInfo.sType = VK_STRUCTURE_TYPE_WIN32_SURFACE_CREATE_INFO_KHR;
windowCreateInfo.hwnd = Window;
windowCreateInfo.hinstance = GetModuleHandle(nullptr);
VkResult result = vkCreateWin32SurfaceKHR(instance, &windowCreateInfo, nullptr, &surface);
if (result != VK_SUCCESS)
throw std::runtime_error("Could not create vulkan surface");
#elif defined __APPLE__
// todo
#else
// todo
#endif
}
void VulkanDevice::selectPhysicalDevice()
{
VkResult result;
uint32_t deviceCount = 0;
result = vkEnumeratePhysicalDevices(instance, &deviceCount, nullptr);
if (result != VK_SUCCESS)
throw std::runtime_error("vkEnumeratePhysicalDevices failed");
else if (deviceCount == 0)
throw std::runtime_error("Could not find any vulkan devices");
std::vector<VkPhysicalDevice> devices(deviceCount);
result = vkEnumeratePhysicalDevices(instance, &deviceCount, devices.data());
if (result != VK_SUCCESS)
throw std::runtime_error("vkEnumeratePhysicalDevices failed (2)");
for (const auto &device : devices)
{
VkPhysicalDeviceProperties deviceProperties;
vkGetPhysicalDeviceProperties(device, &deviceProperties);
VkPhysicalDeviceFeatures deviceFeatures;
vkGetPhysicalDeviceFeatures(device, &deviceFeatures);
bool isUsableDevice = deviceProperties.deviceType == VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU && deviceFeatures.geometryShader && deviceFeatures.samplerAnisotropy;
if (!isUsableDevice)
continue;
uint32_t queueFamilyCount = 0;
vkGetPhysicalDeviceQueueFamilyProperties(device, &queueFamilyCount, nullptr);
std::vector<VkQueueFamilyProperties> queueFamilies(queueFamilyCount);
vkGetPhysicalDeviceQueueFamilyProperties(device, &queueFamilyCount, queueFamilies.data());
graphicsFamily = -1;
computeFamily = -1;
transferFamily = -1;
sparseBindingFamily = -1;
presentFamily = -1;
int i = 0;
for (const auto& queueFamily : queueFamilies)
{
// Only accept a decent GPU for now..
VkQueueFlags gpuFlags = (VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT | VK_QUEUE_SPARSE_BINDING_BIT);
if (queueFamily.queueCount > 0 && (queueFamily.queueFlags & gpuFlags) == gpuFlags)
{
graphicsFamily = i;
computeFamily = i;
transferFamily = i;
sparseBindingFamily = i;
}
VkBool32 presentSupport = false;
result = vkGetPhysicalDeviceSurfaceSupportKHR(device, i, surface, &presentSupport);
if (result == VK_SUCCESS && queueFamily.queueCount > 0 && presentSupport) presentFamily = i;
i++;
}
uint32_t deviceExtensionCount;
vkEnumerateDeviceExtensionProperties(device, nullptr, &deviceExtensionCount, nullptr);
availableDeviceExtensions.resize(deviceExtensionCount);
vkEnumerateDeviceExtensionProperties(device, nullptr, &deviceExtensionCount, availableDeviceExtensions.data());
std::set<std::string> requiredExtensionSearch(requiredExtensions.begin(), requiredExtensions.end());
for (const auto &ext : availableDeviceExtensions)
requiredExtensionSearch.erase(ext.extensionName);
if (!requiredExtensionSearch.empty())
continue;
physicalDevice = device;
return;
}
throw std::runtime_error("No Vulkan device supports the minimum requirements of this application");
}
void VulkanDevice::createDevice()
{
float queuePriority = 1.0f;
std::vector<VkDeviceQueueCreateInfo> queueCreateInfos;
std::set<int> neededFamilies;
neededFamilies.insert(graphicsFamily);
neededFamilies.insert(presentFamily);
neededFamilies.insert(computeFamily);
for (int index : neededFamilies)
{
VkDeviceQueueCreateInfo queueCreateInfo = {};
queueCreateInfo.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
queueCreateInfo.queueFamilyIndex = index;
queueCreateInfo.queueCount = 1;
queueCreateInfo.pQueuePriorities = &queuePriority;
queueCreateInfos.push_back(queueCreateInfo);
}
VkPhysicalDeviceFeatures usedDeviceFeatures = {};
usedDeviceFeatures.samplerAnisotropy = VK_TRUE;
VkDeviceCreateInfo deviceCreateInfo = {};
deviceCreateInfo.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
deviceCreateInfo.queueCreateInfoCount = (uint32_t)queueCreateInfos.size();
deviceCreateInfo.pQueueCreateInfos = queueCreateInfos.data();
deviceCreateInfo.pEnabledFeatures = &usedDeviceFeatures;
deviceCreateInfo.enabledExtensionCount = (uint32_t)requiredExtensions.size();
deviceCreateInfo.ppEnabledExtensionNames = requiredExtensions.data();
deviceCreateInfo.enabledLayerCount = 0;
VkResult result = vkCreateDevice(physicalDevice, &deviceCreateInfo, nullptr, &device);
if (result != VK_SUCCESS)
throw std::runtime_error("Could not create vulkan device");
volkLoadDevice(device);
vkGetDeviceQueue(device, graphicsFamily, 0, &graphicsQueue);
vkGetDeviceQueue(device, presentFamily, 0, &presentQueue);
}
void VulkanDevice::createAllocator()
{
VmaAllocatorCreateInfo allocinfo = {};
allocinfo.flags = VMA_ALLOCATOR_CREATE_KHR_DEDICATED_ALLOCATION_BIT;
allocinfo.physicalDevice = physicalDevice;
allocinfo.device = device;
allocinfo.preferredLargeHeapBlockSize = 64 * 1024 * 1024;
if (vmaCreateAllocator(&allocinfo, &allocator) != VK_SUCCESS)
throw std::runtime_error("Unable to create allocator");
}
void VulkanDevice::createSemaphores()
{
VkSemaphoreCreateInfo semaphoreInfo = {};
semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
VkResult result = vkCreateSemaphore(device, &semaphoreInfo, nullptr, &imageAvailableSemaphore);
if (result != VK_SUCCESS)
throw std::runtime_error("Failed to create semaphore!");
result = vkCreateSemaphore(device, &semaphoreInfo, nullptr, &renderFinishedSemaphore);
if (result != VK_SUCCESS)
throw std::runtime_error("Failed to create semaphore!");
VkFenceCreateInfo fenceInfo = {};
fenceInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
result = vkCreateFence(device, &fenceInfo, nullptr, &renderFinishedFence);
if (result != VK_SUCCESS)
throw std::runtime_error("Failed to create fence!");
}
void VulkanDevice::releaseResources()
{
if (device)
vkDeviceWaitIdle(device);
if (!imageAvailableSemaphore)
vkDestroySemaphore(device, imageAvailableSemaphore, nullptr);
imageAvailableSemaphore = 0;
if (!renderFinishedSemaphore)
vkDestroySemaphore(device, renderFinishedSemaphore, nullptr);
renderFinishedSemaphore = 0;
if (!renderFinishedFence)
vkDestroyFence(device, renderFinishedFence, nullptr);
renderFinishedFence = 0;
swapChain.reset();
if (device)
vkDestroyDevice(device, nullptr);
device = nullptr;
if (surface)
vkDestroySurfaceKHR(instance, surface, nullptr);
surface = 0;
if (instance)
vkDestroyInstance(instance, nullptr);
instance = nullptr;
}
uint32_t VulkanDevice::findMemoryType(uint32_t typeFilter, VkMemoryPropertyFlags properties)
{
VkPhysicalDeviceMemoryProperties memProperties;
vkGetPhysicalDeviceMemoryProperties(physicalDevice, &memProperties);
for (uint32_t i = 0; i < memProperties.memoryTypeCount; i++)
{
if ((typeFilter & (1 << i)) && (memProperties.memoryTypes[i].propertyFlags & properties) == properties)
return i;
}
throw std::runtime_error("failed to find suitable memory type!");
}

Some files were not shown because too many files have changed in this diff Show More