Upgrade glslang to 8.13.3559 (stable release January 6, 2020)

This commit is contained in:
Magnus Norddahl 2020-02-15 08:51:03 +01:00
parent 6486380dd1
commit a056307218
71 changed files with 20302 additions and 12485 deletions

View file

@ -61,11 +61,7 @@ enum TBasicType {
EbtSampler,
EbtStruct,
EbtBlock,
#ifdef NV_EXTENSIONS
EbtAccStructNV,
#endif
EbtReference,
// HLSL types that live only temporarily.
@ -94,13 +90,11 @@ enum TStorageQualifier {
EvqBuffer, // read/write, shared with app
EvqShared, // compute shader's read/write 'shared' qualifier
#ifdef NV_EXTENSIONS
EvqPayloadNV,
EvqPayloadInNV,
EvqHitAttrNV,
EvqCallableDataNV,
EvqCallableDataInNV,
#endif
// parameters
EvqIn, // also, for 'in' in the grammar before we know if it's a pipeline input or an 'in' parameter
@ -221,7 +215,6 @@ enum TBuiltInVariable {
EbvSampleMask,
EbvHelperInvocation,
#ifdef AMD_EXTENSIONS
EbvBaryCoordNoPersp,
EbvBaryCoordNoPerspCentroid,
EbvBaryCoordNoPerspSample,
@ -229,7 +222,6 @@ enum TBuiltInVariable {
EbvBaryCoordSmoothCentroid,
EbvBaryCoordSmoothSample,
EbvBaryCoordPullModel,
#endif
EbvViewIndex,
EbvDeviceIndex,
@ -237,7 +229,6 @@ enum TBuiltInVariable {
EbvFragSizeEXT,
EbvFragInvocationCountEXT,
#ifdef NV_EXTENSIONS
EbvViewportMaskNV,
EbvSecondaryPositionNV,
EbvSecondaryViewportMaskNV,
@ -246,7 +237,7 @@ enum TBuiltInVariable {
EbvFragFullyCoveredNV,
EbvFragmentSizeNV,
EbvInvocationsPerPixelNV,
// raytracing
// ray tracing
EbvLaunchIdNV,
EbvLaunchSizeNV,
EbvInstanceCustomIndexNV,
@ -261,8 +252,10 @@ enum TBuiltInVariable {
EbvObjectToWorldNV,
EbvWorldToObjectNV,
EbvIncomingRayFlagsNV,
// barycentrics
EbvBaryCoordNV,
EbvBaryCoordNoPerspNV,
// mesh shaders
EbvTaskCountNV,
EbvPrimitiveCountNV,
EbvPrimitiveIndicesNV,
@ -271,7 +264,12 @@ enum TBuiltInVariable {
EbvLayerPerViewNV,
EbvMeshViewCountNV,
EbvMeshViewIndicesNV,
#endif
// sm builtins
EbvWarpsPerSM,
EbvSMCount,
EbvWarpID,
EbvSMID,
// HLSL built-ins that live only temporarily, until they get remapped
// to one of the above.
@ -291,6 +289,19 @@ enum TBuiltInVariable {
EbvLast
};
// In this enum, order matters; users can assume higher precision is a bigger value
// and EpqNone is 0.
enum TPrecisionQualifier {
EpqNone = 0,
EpqLow,
EpqMedium,
EpqHigh
};
#ifdef GLSLANG_WEB
__inline const char* GetStorageQualifierString(TStorageQualifier q) { return ""; }
__inline const char* GetPrecisionQualifierString(TPrecisionQualifier p) { return ""; }
#else
// These will show up in error messages
__inline const char* GetStorageQualifierString(TStorageQualifier q)
{
@ -317,13 +328,11 @@ __inline const char* GetStorageQualifierString(TStorageQualifier q)
case EvqPointCoord: return "gl_PointCoord"; break;
case EvqFragColor: return "fragColor"; break;
case EvqFragDepth: return "gl_FragDepth"; break;
#ifdef NV_EXTENSIONS
case EvqPayloadNV: return "rayPayloadNV"; break;
case EvqPayloadInNV: return "rayPayloadInNV"; break;
case EvqHitAttrNV: return "hitAttributeNV"; break;
case EvqCallableDataNV: return "callableDataNV"; break;
case EvqCallableDataInNV: return "callableDataInNV"; break;
#endif
default: return "unknown qualifier";
}
}
@ -338,6 +347,8 @@ __inline const char* GetBuiltInVariableString(TBuiltInVariable v)
case EbvLocalInvocationId: return "LocalInvocationID";
case EbvGlobalInvocationId: return "GlobalInvocationID";
case EbvLocalInvocationIndex: return "LocalInvocationIndex";
case EbvNumSubgroups: return "NumSubgroups";
case EbvSubgroupID: return "SubgroupID";
case EbvSubGroupSize: return "SubGroupSize";
case EbvSubGroupInvocation: return "SubGroupInvocation";
case EbvSubGroupEqMask: return "SubGroupEqMask";
@ -345,6 +356,13 @@ __inline const char* GetBuiltInVariableString(TBuiltInVariable v)
case EbvSubGroupGtMask: return "SubGroupGtMask";
case EbvSubGroupLeMask: return "SubGroupLeMask";
case EbvSubGroupLtMask: return "SubGroupLtMask";
case EbvSubgroupSize2: return "SubgroupSize";
case EbvSubgroupInvocation2: return "SubgroupInvocationID";
case EbvSubgroupEqMask2: return "SubgroupEqMask";
case EbvSubgroupGeMask2: return "SubgroupGeMask";
case EbvSubgroupGtMask2: return "SubgroupGtMask";
case EbvSubgroupLeMask2: return "SubgroupLeMask";
case EbvSubgroupLtMask2: return "SubgroupLtMask";
case EbvVertexId: return "VertexId";
case EbvInstanceId: return "InstanceId";
case EbvVertexIndex: return "VertexIndex";
@ -396,7 +414,6 @@ __inline const char* GetBuiltInVariableString(TBuiltInVariable v)
case EbvSampleMask: return "SampleMaskIn";
case EbvHelperInvocation: return "HelperInvocation";
#ifdef AMD_EXTENSIONS
case EbvBaryCoordNoPersp: return "BaryCoordNoPersp";
case EbvBaryCoordNoPerspCentroid: return "BaryCoordNoPerspCentroid";
case EbvBaryCoordNoPerspSample: return "BaryCoordNoPerspSample";
@ -404,7 +421,6 @@ __inline const char* GetBuiltInVariableString(TBuiltInVariable v)
case EbvBaryCoordSmoothCentroid: return "BaryCoordSmoothCentroid";
case EbvBaryCoordSmoothSample: return "BaryCoordSmoothSample";
case EbvBaryCoordPullModel: return "BaryCoordPullModel";
#endif
case EbvViewIndex: return "ViewIndex";
case EbvDeviceIndex: return "DeviceIndex";
@ -412,7 +428,6 @@ __inline const char* GetBuiltInVariableString(TBuiltInVariable v)
case EbvFragSizeEXT: return "FragSizeEXT";
case EbvFragInvocationCountEXT: return "FragInvocationCountEXT";
#ifdef NV_EXTENSIONS
case EbvViewportMaskNV: return "ViewportMaskNV";
case EbvSecondaryPositionNV: return "SecondaryPositionNV";
case EbvSecondaryViewportMaskNV: return "SecondaryViewportMaskNV";
@ -438,6 +453,7 @@ __inline const char* GetBuiltInVariableString(TBuiltInVariable v)
case EbvBaryCoordNV: return "BaryCoordNV";
case EbvBaryCoordNoPerspNV: return "BaryCoordNoPerspNV";
case EbvTaskCountNV: return "TaskCountNV";
case EbvPrimitiveCountNV: return "PrimitiveCountNV";
case EbvPrimitiveIndicesNV: return "PrimitiveIndicesNV";
@ -446,20 +462,16 @@ __inline const char* GetBuiltInVariableString(TBuiltInVariable v)
case EbvLayerPerViewNV: return "LayerPerViewNV";
case EbvMeshViewCountNV: return "MeshViewCountNV";
case EbvMeshViewIndicesNV: return "MeshViewIndicesNV";
#endif
case EbvWarpsPerSM: return "WarpsPerSMNV";
case EbvSMCount: return "SMCountNV";
case EbvWarpID: return "WarpIDNV";
case EbvSMID: return "SMIDNV";
default: return "unknown built-in variable";
}
}
// In this enum, order matters; users can assume higher precision is a bigger value
// and EpqNone is 0.
enum TPrecisionQualifier {
EpqNone = 0,
EpqLow,
EpqMedium,
EpqHigh
};
__inline const char* GetPrecisionQualifierString(TPrecisionQualifier p)
{
switch (p) {
@ -470,6 +482,7 @@ __inline const char* GetPrecisionQualifierString(TPrecisionQualifier p)
default: return "unknown precision qualifier";
}
}
#endif
__inline bool isTypeSignedInt(TBasicType type)
{
@ -514,7 +527,8 @@ __inline bool isTypeFloat(TBasicType type)
}
}
__inline int getTypeRank(TBasicType type) {
__inline int getTypeRank(TBasicType type)
{
int res = -1;
switch(type) {
case EbtInt8:

View file

@ -38,7 +38,7 @@
#define _COMMON_INCLUDED_
#if defined(__ANDROID__) || _MSC_VER < 1700
#if defined(__ANDROID__) || (defined(_MSC_VER) && _MSC_VER < 1700)
#include <sstream>
namespace std {
template<typename T>
@ -83,6 +83,7 @@ std::string to_string(const T& val) {
#endif
#if defined(_MSC_VER)
#undef strdup
#define strdup _strdup
#endif
@ -102,6 +103,7 @@ std::string to_string(const T& val) {
#include <algorithm>
#include <string>
#include <cstdio>
#include <cstdlib>
#include <cassert>
#include "PoolAlloc.h"

View file

@ -213,6 +213,28 @@ public:
return false;
switch (type) {
case EbtInt:
if (constant.iConst == iConst)
return true;
break;
case EbtUint:
if (constant.uConst == uConst)
return true;
break;
case EbtBool:
if (constant.bConst == bConst)
return true;
break;
case EbtDouble:
if (constant.dConst == dConst)
return true;
break;
#ifndef GLSLANG_WEB
case EbtInt16:
if (constant.i16Const == i16Const)
return true;
@ -232,16 +254,6 @@ public:
if (constant.u8Const == u8Const)
return true;
break;
case EbtInt:
if (constant.iConst == iConst)
return true;
break;
case EbtUint:
if (constant.uConst == uConst)
return true;
break;
case EbtInt64:
if (constant.i64Const == i64Const)
@ -253,16 +265,7 @@ public:
return true;
break;
case EbtDouble:
if (constant.dConst == dConst)
return true;
break;
case EbtBool:
if (constant.bConst == bConst)
return true;
break;
#endif
default:
assert(false && "Default missing");
}
@ -329,6 +332,22 @@ public:
{
assert(type == constant.type);
switch (type) {
case EbtInt:
if (iConst > constant.iConst)
return true;
return false;
case EbtUint:
if (uConst > constant.uConst)
return true;
return false;
case EbtDouble:
if (dConst > constant.dConst)
return true;
return false;
#ifndef GLSLANG_WEB
case EbtInt8:
if (i8Const > constant.i8Const)
return true;
@ -348,16 +367,6 @@ public:
if (u16Const > constant.u16Const)
return true;
return false;
case EbtInt:
if (iConst > constant.iConst)
return true;
return false;
case EbtUint:
if (uConst > constant.uConst)
return true;
return false;
case EbtInt64:
if (i64Const > constant.i64Const)
@ -369,11 +378,7 @@ public:
return true;
return false;
case EbtDouble:
if (dConst > constant.dConst)
return true;
return false;
#endif
default:
assert(false && "Default missing");
return false;
@ -384,6 +389,7 @@ public:
{
assert(type == constant.type);
switch (type) {
#ifndef GLSLANG_WEB
case EbtInt8:
if (i8Const < constant.i8Const)
return true;
@ -394,7 +400,7 @@ public:
return true;
return false;
case EbtInt16:
case EbtInt16:
if (i16Const < constant.i16Const)
return true;
@ -402,17 +408,6 @@ public:
case EbtUint16:
if (u16Const < constant.u16Const)
return true;
return false;
case EbtInt:
if (iConst < constant.iConst)
return true;
return false;
case EbtUint:
if (uConst < constant.uConst)
return true;
return false;
case EbtInt64:
if (i64Const < constant.i64Const)
@ -424,10 +419,21 @@ public:
return true;
return false;
#endif
case EbtDouble:
if (dConst < constant.dConst)
return true;
return false;
case EbtInt:
if (iConst < constant.iConst)
return true;
return false;
case EbtUint:
if (uConst < constant.uConst)
return true;
return false;
default:
assert(false && "Default missing");
@ -440,15 +446,17 @@ public:
TConstUnion returnValue;
assert(type == constant.type);
switch (type) {
case EbtInt: returnValue.setIConst(iConst + constant.iConst); break;
case EbtUint: returnValue.setUConst(uConst + constant.uConst); break;
case EbtDouble: returnValue.setDConst(dConst + constant.dConst); break;
#ifndef GLSLANG_WEB
case EbtInt8: returnValue.setI8Const(i8Const + constant.i8Const); break;
case EbtInt16: returnValue.setI16Const(i16Const + constant.i16Const); break;
case EbtInt: returnValue.setIConst(iConst + constant.iConst); break;
case EbtInt64: returnValue.setI64Const(i64Const + constant.i64Const); break;
case EbtUint8: returnValue.setU8Const(u8Const + constant.u8Const); break;
case EbtUint16: returnValue.setU16Const(u16Const + constant.u16Const); break;
case EbtUint: returnValue.setUConst(uConst + constant.uConst); break;
case EbtUint64: returnValue.setU64Const(u64Const + constant.u64Const); break;
case EbtDouble: returnValue.setDConst(dConst + constant.dConst); break;
#endif
default: assert(false && "Default missing");
}
@ -460,15 +468,17 @@ public:
TConstUnion returnValue;
assert(type == constant.type);
switch (type) {
case EbtInt: returnValue.setIConst(iConst - constant.iConst); break;
case EbtUint: returnValue.setUConst(uConst - constant.uConst); break;
case EbtDouble: returnValue.setDConst(dConst - constant.dConst); break;
#ifndef GLSLANG_WEB
case EbtInt8: returnValue.setI8Const(i8Const - constant.i8Const); break;
case EbtInt16: returnValue.setI16Const(i16Const - constant.i16Const); break;
case EbtInt: returnValue.setIConst(iConst - constant.iConst); break;
case EbtInt64: returnValue.setI64Const(i64Const - constant.i64Const); break;
case EbtUint8: returnValue.setU8Const(u8Const - constant.u8Const); break;
case EbtUint16: returnValue.setU16Const(u16Const - constant.u16Const); break;
case EbtUint: returnValue.setUConst(uConst - constant.uConst); break;
case EbtUint64: returnValue.setU64Const(u64Const - constant.u64Const); break;
case EbtDouble: returnValue.setDConst(dConst - constant.dConst); break;
#endif
default: assert(false && "Default missing");
}
@ -480,15 +490,17 @@ public:
TConstUnion returnValue;
assert(type == constant.type);
switch (type) {
case EbtInt: returnValue.setIConst(iConst * constant.iConst); break;
case EbtUint: returnValue.setUConst(uConst * constant.uConst); break;
case EbtDouble: returnValue.setDConst(dConst * constant.dConst); break;
#ifndef GLSLANG_WEB
case EbtInt8: returnValue.setI8Const(i8Const * constant.i8Const); break;
case EbtInt16: returnValue.setI16Const(i16Const * constant.i16Const); break;
case EbtInt: returnValue.setIConst(iConst * constant.iConst); break;
case EbtInt64: returnValue.setI64Const(i64Const * constant.i64Const); break;
case EbtUint8: returnValue.setU8Const(u8Const * constant.u8Const); break;
case EbtUint16: returnValue.setU16Const(u16Const * constant.u16Const); break;
case EbtUint: returnValue.setUConst(uConst * constant.uConst); break;
case EbtUint64: returnValue.setU64Const(u64Const * constant.u64Const); break;
case EbtDouble: returnValue.setDConst(dConst * constant.dConst); break;
#endif
default: assert(false && "Default missing");
}
@ -500,14 +512,16 @@ public:
TConstUnion returnValue;
assert(type == constant.type);
switch (type) {
case EbtInt: returnValue.setIConst(iConst % constant.iConst); break;
case EbtUint: returnValue.setUConst(uConst % constant.uConst); break;
#ifndef GLSLANG_WEB
case EbtInt8: returnValue.setI8Const(i8Const % constant.i8Const); break;
case EbtInt16: returnValue.setI8Const(i8Const % constant.i16Const); break;
case EbtInt: returnValue.setIConst(iConst % constant.iConst); break;
case EbtInt64: returnValue.setI64Const(i64Const % constant.i64Const); break;
case EbtUint8: returnValue.setU8Const(u8Const % constant.u8Const); break;
case EbtUint16: returnValue.setU16Const(u16Const % constant.u16Const); break;
case EbtUint: returnValue.setUConst(uConst % constant.uConst); break;
case EbtUint64: returnValue.setU64Const(u64Const % constant.u64Const); break;
#endif
default: assert(false && "Default missing");
}
@ -518,6 +532,7 @@ public:
{
TConstUnion returnValue;
switch (type) {
#ifndef GLSLANG_WEB
case EbtInt8:
switch (constant.type) {
case EbtInt8: returnValue.setI8Const(i8Const >> constant.i8Const); break;
@ -570,32 +585,38 @@ public:
default: assert(false && "Default missing");
}
break;
#endif
case EbtInt:
switch (constant.type) {
case EbtInt: returnValue.setIConst(iConst >> constant.iConst); break;
case EbtUint: returnValue.setIConst(iConst >> constant.uConst); break;
#ifndef GLSLANG_WEB
case EbtInt8: returnValue.setIConst(iConst >> constant.i8Const); break;
case EbtUint8: returnValue.setIConst(iConst >> constant.u8Const); break;
case EbtInt16: returnValue.setIConst(iConst >> constant.i16Const); break;
case EbtUint16: returnValue.setIConst(iConst >> constant.u16Const); break;
case EbtInt: returnValue.setIConst(iConst >> constant.iConst); break;
case EbtUint: returnValue.setIConst(iConst >> constant.uConst); break;
case EbtInt64: returnValue.setIConst(iConst >> constant.i64Const); break;
case EbtUint64: returnValue.setIConst(iConst >> constant.u64Const); break;
#endif
default: assert(false && "Default missing");
}
break;
case EbtUint:
switch (constant.type) {
case EbtInt: returnValue.setUConst(uConst >> constant.iConst); break;
case EbtUint: returnValue.setUConst(uConst >> constant.uConst); break;
#ifndef GLSLANG_WEB
case EbtInt8: returnValue.setUConst(uConst >> constant.i8Const); break;
case EbtUint8: returnValue.setUConst(uConst >> constant.u8Const); break;
case EbtInt16: returnValue.setUConst(uConst >> constant.i16Const); break;
case EbtUint16: returnValue.setUConst(uConst >> constant.u16Const); break;
case EbtInt: returnValue.setUConst(uConst >> constant.iConst); break;
case EbtUint: returnValue.setUConst(uConst >> constant.uConst); break;
case EbtInt64: returnValue.setUConst(uConst >> constant.i64Const); break;
case EbtUint64: returnValue.setUConst(uConst >> constant.u64Const); break;
#endif
default: assert(false && "Default missing");
}
break;
#ifndef GLSLANG_WEB
case EbtInt64:
switch (constant.type) {
case EbtInt8: returnValue.setI64Const(i64Const >> constant.i8Const); break;
@ -622,6 +643,7 @@ public:
default: assert(false && "Default missing");
}
break;
#endif
default: assert(false && "Default missing");
}
@ -632,6 +654,7 @@ public:
{
TConstUnion returnValue;
switch (type) {
#ifndef GLSLANG_WEB
case EbtInt8:
switch (constant.type) {
case EbtInt8: returnValue.setI8Const(i8Const << constant.i8Const); break;
@ -684,32 +707,6 @@ public:
default: assert(false && "Default missing");
}
break;
case EbtInt:
switch (constant.type) {
case EbtInt8: returnValue.setIConst(iConst << constant.i8Const); break;
case EbtUint8: returnValue.setIConst(iConst << constant.u8Const); break;
case EbtInt16: returnValue.setIConst(iConst << constant.i16Const); break;
case EbtUint16: returnValue.setIConst(iConst << constant.u16Const); break;
case EbtInt: returnValue.setIConst(iConst << constant.iConst); break;
case EbtUint: returnValue.setIConst(iConst << constant.uConst); break;
case EbtInt64: returnValue.setIConst(iConst << constant.i64Const); break;
case EbtUint64: returnValue.setIConst(iConst << constant.u64Const); break;
default: assert(false && "Default missing");
}
break;
case EbtUint:
switch (constant.type) {
case EbtInt8: returnValue.setUConst(uConst << constant.i8Const); break;
case EbtUint8: returnValue.setUConst(uConst << constant.u8Const); break;
case EbtInt16: returnValue.setUConst(uConst << constant.i16Const); break;
case EbtUint16: returnValue.setUConst(uConst << constant.u16Const); break;
case EbtInt: returnValue.setUConst(uConst << constant.iConst); break;
case EbtUint: returnValue.setUConst(uConst << constant.uConst); break;
case EbtInt64: returnValue.setUConst(uConst << constant.i64Const); break;
case EbtUint64: returnValue.setUConst(uConst << constant.u64Const); break;
default: assert(false && "Default missing");
}
break;
case EbtInt64:
switch (constant.type) {
case EbtInt8: returnValue.setI64Const(i64Const << constant.i8Const); break;
@ -736,6 +733,37 @@ public:
default: assert(false && "Default missing");
}
break;
#endif
case EbtInt:
switch (constant.type) {
case EbtInt: returnValue.setIConst(iConst << constant.iConst); break;
case EbtUint: returnValue.setIConst(iConst << constant.uConst); break;
#ifndef GLSLANG_WEB
case EbtInt8: returnValue.setIConst(iConst << constant.i8Const); break;
case EbtUint8: returnValue.setIConst(iConst << constant.u8Const); break;
case EbtInt16: returnValue.setIConst(iConst << constant.i16Const); break;
case EbtUint16: returnValue.setIConst(iConst << constant.u16Const); break;
case EbtInt64: returnValue.setIConst(iConst << constant.i64Const); break;
case EbtUint64: returnValue.setIConst(iConst << constant.u64Const); break;
#endif
default: assert(false && "Default missing");
}
break;
case EbtUint:
switch (constant.type) {
case EbtInt: returnValue.setUConst(uConst << constant.iConst); break;
case EbtUint: returnValue.setUConst(uConst << constant.uConst); break;
#ifndef GLSLANG_WEB
case EbtInt8: returnValue.setUConst(uConst << constant.i8Const); break;
case EbtUint8: returnValue.setUConst(uConst << constant.u8Const); break;
case EbtInt16: returnValue.setUConst(uConst << constant.i16Const); break;
case EbtUint16: returnValue.setUConst(uConst << constant.u16Const); break;
case EbtInt64: returnValue.setUConst(uConst << constant.i64Const); break;
case EbtUint64: returnValue.setUConst(uConst << constant.u64Const); break;
#endif
default: assert(false && "Default missing");
}
break;
default: assert(false && "Default missing");
}
@ -747,14 +775,16 @@ public:
TConstUnion returnValue;
assert(type == constant.type);
switch (type) {
case EbtInt: returnValue.setIConst(iConst & constant.iConst); break;
case EbtUint: returnValue.setUConst(uConst & constant.uConst); break;
#ifndef GLSLANG_WEB
case EbtInt8: returnValue.setI8Const(i8Const & constant.i8Const); break;
case EbtUint8: returnValue.setU8Const(u8Const & constant.u8Const); break;
case EbtInt16: returnValue.setI16Const(i16Const & constant.i16Const); break;
case EbtUint16: returnValue.setU16Const(u16Const & constant.u16Const); break;
case EbtInt: returnValue.setIConst(iConst & constant.iConst); break;
case EbtUint: returnValue.setUConst(uConst & constant.uConst); break;
case EbtInt64: returnValue.setI64Const(i64Const & constant.i64Const); break;
case EbtUint64: returnValue.setU64Const(u64Const & constant.u64Const); break;
#endif
default: assert(false && "Default missing");
}
@ -766,14 +796,16 @@ public:
TConstUnion returnValue;
assert(type == constant.type);
switch (type) {
case EbtInt: returnValue.setIConst(iConst | constant.iConst); break;
case EbtUint: returnValue.setUConst(uConst | constant.uConst); break;
#ifndef GLSLANG_WEB
case EbtInt8: returnValue.setI8Const(i8Const | constant.i8Const); break;
case EbtUint8: returnValue.setU8Const(u8Const | constant.u8Const); break;
case EbtInt16: returnValue.setI16Const(i16Const | constant.i16Const); break;
case EbtUint16: returnValue.setU16Const(u16Const | constant.u16Const); break;
case EbtInt: returnValue.setIConst(iConst | constant.iConst); break;
case EbtUint: returnValue.setUConst(uConst | constant.uConst); break;
case EbtInt64: returnValue.setI64Const(i64Const | constant.i64Const); break;
case EbtUint64: returnValue.setU64Const(u64Const | constant.u64Const); break;
#endif
default: assert(false && "Default missing");
}
@ -785,14 +817,16 @@ public:
TConstUnion returnValue;
assert(type == constant.type);
switch (type) {
case EbtInt: returnValue.setIConst(iConst ^ constant.iConst); break;
case EbtUint: returnValue.setUConst(uConst ^ constant.uConst); break;
#ifndef GLSLANG_WEB
case EbtInt8: returnValue.setI8Const(i8Const ^ constant.i8Const); break;
case EbtUint8: returnValue.setU8Const(u8Const ^ constant.u8Const); break;
case EbtInt16: returnValue.setI16Const(i16Const ^ constant.i16Const); break;
case EbtUint16: returnValue.setU16Const(u16Const ^ constant.u16Const); break;
case EbtInt: returnValue.setIConst(iConst ^ constant.iConst); break;
case EbtUint: returnValue.setUConst(uConst ^ constant.uConst); break;
case EbtInt64: returnValue.setI64Const(i64Const ^ constant.i64Const); break;
case EbtUint64: returnValue.setU64Const(u64Const ^ constant.u64Const); break;
#endif
default: assert(false && "Default missing");
}
@ -803,14 +837,16 @@ public:
{
TConstUnion returnValue;
switch (type) {
case EbtInt: returnValue.setIConst(~iConst); break;
case EbtUint: returnValue.setUConst(~uConst); break;
#ifndef GLSLANG_WEB
case EbtInt8: returnValue.setI8Const(~i8Const); break;
case EbtUint8: returnValue.setU8Const(~u8Const); break;
case EbtInt16: returnValue.setI16Const(~i16Const); break;
case EbtUint16: returnValue.setU16Const(~u16Const); break;
case EbtInt: returnValue.setIConst(~iConst); break;
case EbtUint: returnValue.setUConst(~uConst); break;
case EbtInt64: returnValue.setI64Const(~i64Const); break;
case EbtUint64: returnValue.setU64Const(~u64Const); break;
#endif
default: assert(false && "Default missing");
}

View file

@ -304,7 +304,6 @@ public:
size_type max_size() const { return static_cast<size_type>(-1) / sizeof(T); }
size_type max_size(int size) const { return static_cast<size_type>(-1) / size; }
void setAllocator(TPoolAllocator* a) { allocator = *a; }
TPoolAllocator& getAllocator() const { return allocator; }
protected:

File diff suppressed because it is too large Load diff

View file

@ -254,7 +254,9 @@ struct TArraySizes {
void addInnerSize() { addInnerSize((unsigned)UnsizedArraySize); }
void addInnerSize(int s) { addInnerSize((unsigned)s, nullptr); }
void addInnerSize(int s, TIntermTyped* n) { sizes.push_back((unsigned)s, n); }
void addInnerSize(TArraySize pair) { sizes.push_back(pair.size, pair.node); }
void addInnerSize(TArraySize pair) {
sizes.push_back(pair.size, pair.node);
}
void addInnerSizes(const TArraySizes& s) { sizes.push_back(s.sizes); }
void changeOuterSize(int s) { sizes.changeFront((unsigned)s); }
int getImplicitSize() const { return implicitArraySize; }
@ -318,8 +320,8 @@ struct TArraySizes {
void setVariablyIndexed() { variablyIndexed = true; }
bool isVariablyIndexed() const { return variablyIndexed; }
bool operator==(const TArraySizes& rhs) { return sizes == rhs.sizes; }
bool operator!=(const TArraySizes& rhs) { return sizes != rhs.sizes; }
bool operator==(const TArraySizes& rhs) const { return sizes == rhs.sizes; }
bool operator!=(const TArraySizes& rhs) const { return sizes != rhs.sizes; }
protected:
TSmallArrayVector sizes;

View file

@ -85,6 +85,8 @@ enum TOperator {
EOpPreIncrement,
EOpPreDecrement,
EOpCopyObject,
// (u)int* -> bool
EOpConvInt8ToBool,
EOpConvUint8ToBool,
@ -273,6 +275,10 @@ enum TOperator {
EOpConvUint64ToPtr,
EOpConvPtrToUint64,
// uvec2 <-> pointer
EOpConvUvec2ToPtr,
EOpConvPtrToUvec2,
//
// binary operations
//
@ -420,11 +426,9 @@ enum TOperator {
EOpReflect,
EOpRefract,
#ifdef AMD_EXTENSIONS
EOpMin3,
EOpMax3,
EOpMid3,
#endif
EOpDPdx, // Fragment only
EOpDPdy, // Fragment only
@ -439,10 +443,7 @@ enum TOperator {
EOpInterpolateAtCentroid, // Fragment only
EOpInterpolateAtSample, // Fragment only
EOpInterpolateAtOffset, // Fragment only
#ifdef AMD_EXTENSIONS
EOpInterpolateAtVertex,
#endif
EOpMatrixTimesMatrix,
EOpOuterProduct,
@ -532,7 +533,6 @@ enum TOperator {
EOpSubgroupQuadSwapVertical,
EOpSubgroupQuadSwapDiagonal,
#ifdef NV_EXTENSIONS
EOpSubgroupPartition,
EOpSubgroupPartitionedAdd,
EOpSubgroupPartitionedMul,
@ -555,11 +555,9 @@ enum TOperator {
EOpSubgroupPartitionedExclusiveAnd,
EOpSubgroupPartitionedExclusiveOr,
EOpSubgroupPartitionedExclusiveXor,
#endif
EOpSubgroupGuardStop,
#ifdef AMD_EXTENSIONS
EOpMinInvocations,
EOpMaxInvocations,
EOpAddInvocations,
@ -586,7 +584,6 @@ enum TOperator {
EOpCubeFaceIndex,
EOpCubeFaceCoord,
EOpTime,
#endif
EOpAtomicAdd,
EOpAtomicMin,
@ -615,6 +612,15 @@ enum TOperator {
EOpAny,
EOpAll,
EOpCooperativeMatrixLoad,
EOpCooperativeMatrixStore,
EOpCooperativeMatrixMulAdd,
EOpBeginInvocationInterlock, // Fragment only
EOpEndInvocationInterlock, // Fragment only
EOpIsHelperInvocation,
//
// Branch
//
@ -625,6 +631,7 @@ enum TOperator {
EOpContinue,
EOpCase,
EOpDefault,
EOpDemote, // Fragment only
//
// Constructors
@ -642,9 +649,21 @@ enum TOperator {
EOpConstructBool,
EOpConstructFloat,
EOpConstructDouble,
// Keep vector and matrix constructors in a consistent relative order for
// TParseContext::constructBuiltIn, which converts between 8/16/32 bit
// vector constructors
EOpConstructVec2,
EOpConstructVec3,
EOpConstructVec4,
EOpConstructMat2x2,
EOpConstructMat2x3,
EOpConstructMat2x4,
EOpConstructMat3x2,
EOpConstructMat3x3,
EOpConstructMat3x4,
EOpConstructMat4x2,
EOpConstructMat4x3,
EOpConstructMat4x4,
EOpConstructDVec2,
EOpConstructDVec3,
EOpConstructDVec4,
@ -675,15 +694,6 @@ enum TOperator {
EOpConstructU64Vec2,
EOpConstructU64Vec3,
EOpConstructU64Vec4,
EOpConstructMat2x2,
EOpConstructMat2x3,
EOpConstructMat2x4,
EOpConstructMat3x2,
EOpConstructMat3x3,
EOpConstructMat3x4,
EOpConstructMat4x2,
EOpConstructMat4x3,
EOpConstructMat4x4,
EOpConstructDMat2x2,
EOpConstructDMat2x3,
EOpConstructDMat2x4,
@ -737,6 +747,7 @@ enum TOperator {
EOpConstructTextureSampler,
EOpConstructNonuniform, // expected to be transformed away, not present in final AST
EOpConstructReference,
EOpConstructCooperativeMatrix,
EOpConstructGuardEnd,
//
@ -779,10 +790,8 @@ enum TOperator {
EOpImageQuerySamples,
EOpImageLoad,
EOpImageStore,
#ifdef AMD_EXTENSIONS
EOpImageLoadLod,
EOpImageStoreLod,
#endif
EOpImageAtomicAdd,
EOpImageAtomicMin,
EOpImageAtomicMax,
@ -797,9 +806,7 @@ enum TOperator {
EOpSubpassLoad,
EOpSubpassLoadMS,
EOpSparseImageLoad,
#ifdef AMD_EXTENSIONS
EOpSparseImageLoadLod,
#endif
EOpImageGuardEnd,
@ -837,13 +844,11 @@ enum TOperator {
EOpTextureOffsetClamp,
EOpTextureGradClamp,
EOpTextureGradOffsetClamp,
#ifdef AMD_EXTENSIONS
EOpTextureGatherLod,
EOpTextureGatherLodOffset,
EOpTextureGatherLodOffsets,
EOpFragmentMaskFetch,
EOpFragmentFetch,
#endif
EOpSparseTextureGuardBegin,
@ -863,15 +868,12 @@ enum TOperator {
EOpSparseTextureOffsetClamp,
EOpSparseTextureGradClamp,
EOpSparseTextureGradOffsetClamp,
#ifdef AMD_EXTENSIONS
EOpSparseTextureGatherLod,
EOpSparseTextureGatherLodOffset,
EOpSparseTextureGatherLodOffsets,
#endif
EOpSparseTextureGuardEnd,
#ifdef NV_EXTENSIONS
EOpImageFootprintGuardBegin,
EOpImageSampleFootprintNV,
EOpImageSampleFootprintClampNV,
@ -879,7 +881,6 @@ enum TOperator {
EOpImageSampleFootprintGradNV,
EOpImageSampleFootprintGradClampNV,
EOpImageFootprintGuardEnd,
#endif
EOpSamplingGuardEnd,
EOpTextureGuardEnd,
@ -898,14 +899,21 @@ enum TOperator {
EOpFindLSB,
EOpFindMSB,
#ifdef NV_EXTENSIONS
EOpCountLeadingZeros,
EOpCountTrailingZeros,
EOpAbsDifference,
EOpAddSaturate,
EOpSubSaturate,
EOpAverage,
EOpAverageRounded,
EOpMul32x16,
EOpTraceNV,
EOpReportIntersectionNV,
EOpIgnoreIntersectionNV,
EOpTerminateRayNV,
EOpExecuteCallableNV,
EOpWritePackedPrimitiveIndices4x8NV,
#endif
//
// HLSL operations
//
@ -989,6 +997,10 @@ enum TOperator {
EOpWaveGetLaneIndex, // Will decompose to gl_SubgroupInvocationID.
EOpWaveActiveCountBits, // Will decompose to subgroupBallotBitCount(subgroupBallot()).
EOpWavePrefixCountBits, // Will decompose to subgroupBallotInclusiveBitCount(subgroupBallot()).
// Shader Clock Ops
EOpReadClockSubgroupKHR,
EOpReadClockDeviceKHR,
};
class TIntermTraverser;
@ -1090,6 +1102,8 @@ public:
virtual bool isStruct() const { return type.isStruct(); }
virtual bool isFloatingDomain() const { return type.isFloatingDomain(); }
virtual bool isIntegerDomain() const { return type.isIntegerDomain(); }
bool isAtomic() const { return type.isAtomic(); }
bool isReference() const { return type.isReference(); }
TString getCompleteString() const { return type.getCompleteString(); }
protected:
@ -1109,7 +1123,12 @@ public:
first(testFirst),
unroll(false),
dontUnroll(false),
dependency(0)
dependency(0),
minIterations(0),
maxIterations(iterationsInfinite),
iterationMultiple(1),
peelCount(0),
partialCount(0)
{ }
virtual TIntermLoop* getAsLoopNode() { return this; }
@ -1121,14 +1140,36 @@ public:
bool testFirst() const { return first; }
void setUnroll() { unroll = true; }
void setDontUnroll() { dontUnroll = true; }
void setDontUnroll() {
dontUnroll = true;
peelCount = 0;
partialCount = 0;
}
bool getUnroll() const { return unroll; }
bool getDontUnroll() const { return dontUnroll; }
static const unsigned int dependencyInfinite = 0xFFFFFFFF;
static const unsigned int iterationsInfinite = 0xFFFFFFFF;
void setLoopDependency(int d) { dependency = d; }
int getLoopDependency() const { return dependency; }
void setMinIterations(unsigned int v) { minIterations = v; }
unsigned int getMinIterations() const { return minIterations; }
void setMaxIterations(unsigned int v) { maxIterations = v; }
unsigned int getMaxIterations() const { return maxIterations; }
void setIterationMultiple(unsigned int v) { iterationMultiple = v; }
unsigned int getIterationMultiple() const { return iterationMultiple; }
void setPeelCount(unsigned int v) {
peelCount = v;
dontUnroll = false;
}
unsigned int getPeelCount() const { return peelCount; }
void setPartialCount(unsigned int v) {
partialCount = v;
dontUnroll = false;
}
unsigned int getPartialCount() const { return partialCount; }
protected:
TIntermNode* body; // code to loop over
TIntermTyped* test; // exit condition associated with loop, could be 0 for 'for' loops
@ -1137,6 +1178,11 @@ protected:
bool unroll; // true if unroll requested
bool dontUnroll; // true if request to not unroll
unsigned int dependency; // loop dependency hint; 0 means not set or unknown
unsigned int minIterations; // as per the SPIR-V specification
unsigned int maxIterations; // as per the SPIR-V specification
unsigned int iterationMultiple; // as per the SPIR-V specification
unsigned int peelCount; // as per the SPIR-V specification
unsigned int partialCount; // as per the SPIR-V specification
};
//
@ -1152,6 +1198,7 @@ public:
virtual void traverse(TIntermTraverser*);
TOperator getFlowOp() const { return flowOp; }
TIntermTyped* getExpression() const { return expression; }
void setExpression(TIntermTyped* pExpression) { expression = pExpression; }
protected:
TOperator flowOp;
TIntermTyped* expression;
@ -1185,7 +1232,7 @@ public:
// it is essential to use "symbol = sym" to assign to symbol
TIntermSymbol(int i, const TString& n, const TType& t)
: TIntermTyped(t), id(i),
#ifdef ENABLE_HLSL
#ifndef GLSLANG_WEB
flattenSubset(-1),
#endif
constSubtree(nullptr)
@ -1200,7 +1247,7 @@ public:
const TConstUnionArray& getConstArray() const { return constArray; }
void setConstSubtree(TIntermTyped* subtree) { constSubtree = subtree; }
TIntermTyped* getConstSubtree() const { return constSubtree; }
#ifdef ENABLE_HLSL
#ifndef GLSLANG_WEB
void setFlattenSubset(int subset) { flattenSubset = subset; }
int getFlattenSubset() const { return flattenSubset; } // -1 means full object
#endif
@ -1211,7 +1258,7 @@ public:
protected:
int id; // the unique id of the symbol this node represents
#ifdef ENABLE_HLSL
#ifndef GLSLANG_WEB
int flattenSubset; // how deeply the flattened object rooted at id has been dereferenced
#endif
TString name; // the name of the symbol this node represents
@ -1251,9 +1298,7 @@ struct TCrackedTextureOp {
bool grad;
bool subpass;
bool lodClamp;
#ifdef AMD_EXTENSIONS
bool fragMask;
#endif
};
//
@ -1269,12 +1314,19 @@ public:
bool isConstructor() const;
bool isTexture() const { return op > EOpTextureGuardBegin && op < EOpTextureGuardEnd; }
bool isSampling() const { return op > EOpSamplingGuardBegin && op < EOpSamplingGuardEnd; }
#ifdef GLSLANG_WEB
bool isImage() const { return false; }
bool isSparseTexture() const { return false; }
bool isImageFootprint() const { return false; }
bool isSparseImage() const { return false; }
bool isSubgroup() const { return false; }
#else
bool isImage() const { return op > EOpImageGuardBegin && op < EOpImageGuardEnd; }
bool isSparseTexture() const { return op > EOpSparseTextureGuardBegin && op < EOpSparseTextureGuardEnd; }
#ifdef NV_EXTENSIONS
bool isImageFootprint() const { return op > EOpImageFootprintGuardBegin && op < EOpImageFootprintGuardEnd; }
#endif
bool isSparseImage() const { return op == EOpSparseImageLoad; }
bool isSubgroup() const { return op > EOpSubgroupGuardStart && op < EOpSubgroupGuardStop; }
#endif
void setOperationPrecision(TPrecisionQualifier p) { operationPrecision = p; }
TPrecisionQualifier getOperationPrecision() const { return operationPrecision != EpqNone ?
@ -1304,9 +1356,7 @@ public:
cracked.grad = false;
cracked.subpass = false;
cracked.lodClamp = false;
#ifdef AMD_EXTENSIONS
cracked.fragMask = false;
#endif
switch (op) {
case EOpImageQuerySize:
@ -1321,10 +1371,6 @@ public:
case EOpTexture:
case EOpSparseTexture:
break;
case EOpTextureClamp:
case EOpSparseTextureClamp:
cracked.lodClamp = true;
break;
case EOpTextureProj:
cracked.proj = true;
break;
@ -1336,22 +1382,17 @@ public:
case EOpSparseTextureOffset:
cracked.offset = true;
break;
case EOpTextureOffsetClamp:
case EOpSparseTextureOffsetClamp:
cracked.offset = true;
cracked.lodClamp = true;
break;
case EOpTextureFetch:
case EOpSparseTextureFetch:
cracked.fetch = true;
if (sampler.dim == Esd1D || (sampler.dim == Esd2D && ! sampler.ms) || sampler.dim == Esd3D)
if (sampler.is1D() || (sampler.dim == Esd2D && ! sampler.isMultiSample()) || sampler.dim == Esd3D)
cracked.lod = true;
break;
case EOpTextureFetchOffset:
case EOpSparseTextureFetchOffset:
cracked.fetch = true;
cracked.offset = true;
if (sampler.dim == Esd1D || (sampler.dim == Esd2D && ! sampler.ms) || sampler.dim == Esd3D)
if (sampler.is1D() || (sampler.dim == Esd2D && ! sampler.isMultiSample()) || sampler.dim == Esd3D)
cracked.lod = true;
break;
case EOpTextureProjOffset:
@ -1376,11 +1417,6 @@ public:
case EOpSparseTextureGrad:
cracked.grad = true;
break;
case EOpTextureGradClamp:
case EOpSparseTextureGradClamp:
cracked.grad = true;
cracked.lodClamp = true;
break;
case EOpTextureGradOffset:
case EOpSparseTextureGradOffset:
cracked.grad = true;
@ -1395,6 +1431,21 @@ public:
cracked.offset = true;
cracked.proj = true;
break;
#ifndef GLSLANG_WEB
case EOpTextureClamp:
case EOpSparseTextureClamp:
cracked.lodClamp = true;
break;
case EOpTextureOffsetClamp:
case EOpSparseTextureOffsetClamp:
cracked.offset = true;
cracked.lodClamp = true;
break;
case EOpTextureGradClamp:
case EOpSparseTextureGradClamp:
cracked.grad = true;
cracked.lodClamp = true;
break;
case EOpTextureGradOffsetClamp:
case EOpSparseTextureGradOffsetClamp:
cracked.grad = true;
@ -1415,7 +1466,6 @@ public:
cracked.gather = true;
cracked.offsets = true;
break;
#ifdef AMD_EXTENSIONS
case EOpTextureGatherLod:
case EOpSparseTextureGatherLod:
cracked.gather = true;
@ -1446,8 +1496,6 @@ public:
cracked.subpass = sampler.dim == EsdSubpass;
cracked.fragMask = true;
break;
#endif
#ifdef NV_EXTENSIONS
case EOpImageSampleFootprintNV:
break;
case EOpImageSampleFootprintClampNV:
@ -1463,11 +1511,11 @@ public:
cracked.lodClamp = true;
cracked.grad = true;
break;
#endif
case EOpSubpassLoad:
case EOpSubpassLoadMS:
cracked.subpass = true;
break;
#endif
default:
break;
}

View file

@ -1,3 +1,3 @@
// This header is generated by the make-revision script.
#define GLSLANG_PATCH_LEVEL 3057
#define GLSLANG_PATCH_LEVEL 3559

View file

@ -189,6 +189,24 @@ TIntermTyped* TIntermConstantUnion::fold(TOperator op, const TIntermTyped* right
else
newConstArray[i].setDConst((double)NAN);
break;
case EbtInt:
if (rightUnionArray[i] == 0)
newConstArray[i].setIConst(0x7FFFFFFF);
else if (rightUnionArray[i].getIConst() == -1 && leftUnionArray[i].getIConst() == (int)-0x80000000ll)
newConstArray[i].setIConst((int)-0x80000000ll);
else
newConstArray[i].setIConst(leftUnionArray[i].getIConst() / rightUnionArray[i].getIConst());
break;
case EbtUint:
if (rightUnionArray[i] == 0u)
newConstArray[i].setUConst(0xFFFFFFFFu);
else
newConstArray[i].setUConst(leftUnionArray[i].getUConst() / rightUnionArray[i].getUConst());
break;
#ifndef GLSLANG_WEB
case EbtInt8:
if (rightUnionArray[i] == (signed char)0)
newConstArray[i].setI8Const((signed char)0x7F);
@ -221,22 +239,6 @@ TIntermTyped* TIntermConstantUnion::fold(TOperator op, const TIntermTyped* right
newConstArray[i].setU16Const(leftUnionArray[i].getU16Const() / rightUnionArray[i].getU16Const());
break;
case EbtInt:
if (rightUnionArray[i] == 0)
newConstArray[i].setIConst(0x7FFFFFFF);
else if (rightUnionArray[i].getIConst() == -1 && leftUnionArray[i].getIConst() == (int)-0x80000000ll)
newConstArray[i].setIConst((int)-0x80000000ll);
else
newConstArray[i].setIConst(leftUnionArray[i].getIConst() / rightUnionArray[i].getIConst());
break;
case EbtUint:
if (rightUnionArray[i] == 0u)
newConstArray[i].setUConst(0xFFFFFFFFu);
else
newConstArray[i].setUConst(leftUnionArray[i].getUConst() / rightUnionArray[i].getUConst());
break;
case EbtInt64:
if (rightUnionArray[i] == 0ll)
newConstArray[i].setI64Const(0x7FFFFFFFFFFFFFFFll);
@ -254,6 +256,7 @@ TIntermTyped* TIntermConstantUnion::fold(TOperator op, const TIntermTyped* right
break;
default:
return 0;
#endif
}
}
break;
@ -292,13 +295,12 @@ TIntermTyped* TIntermConstantUnion::fold(TOperator op, const TIntermTyped* right
newConstArray[i].setIConst(0);
break;
} else goto modulo_default;
#ifndef GLSLANG_WEB
case EbtInt64:
if (rightUnionArray[i].getI64Const() == -1 && leftUnionArray[i].getI64Const() == LLONG_MIN) {
newConstArray[i].setI64Const(0);
break;
} else goto modulo_default;
#ifdef AMD_EXTENSIONS
case EbtInt16:
if (rightUnionArray[i].getIConst() == -1 && leftUnionArray[i].getIConst() == SHRT_MIN) {
newConstArray[i].setIConst(0);
@ -415,8 +417,8 @@ TIntermTyped* TIntermConstantUnion::fold(TOperator op, const TType& returnType)
case EOpEmitStreamVertex:
case EOpEndStreamPrimitive:
// These don't actually fold
return 0;
// These don't fold
return nullptr;
case EOpPackSnorm2x16:
case EOpPackUnorm2x16:
@ -491,8 +493,6 @@ TIntermTyped* TIntermConstantUnion::fold(TOperator op, const TType& returnType)
break;
}
// TODO: 3.0 Functionality: unary constant folding: the rest of the ops have to be fleshed out
case EOpPackSnorm2x16:
case EOpPackUnorm2x16:
case EOpPackHalf2x16:
@ -510,7 +510,7 @@ TIntermTyped* TIntermConstantUnion::fold(TOperator op, const TType& returnType)
case EOpDeterminant:
case EOpMatrixInverse:
case EOpTranspose:
return 0;
return nullptr;
default:
assert(componentWise);
@ -529,16 +529,18 @@ TIntermTyped* TIntermConstantUnion::fold(TOperator op, const TType& returnType)
case EbtDouble:
case EbtFloat16:
case EbtFloat: newConstArray[i].setDConst(-unionArray[i].getDConst()); break;
case EbtInt: newConstArray[i].setIConst(-unionArray[i].getIConst()); break;
case EbtUint: newConstArray[i].setUConst(static_cast<unsigned int>(-static_cast<int>(unionArray[i].getUConst()))); break;
#ifndef GLSLANG_WEB
case EbtInt8: newConstArray[i].setI8Const(-unionArray[i].getI8Const()); break;
case EbtUint8: newConstArray[i].setU8Const(static_cast<unsigned int>(-static_cast<signed int>(unionArray[i].getU8Const()))); break;
case EbtInt16: newConstArray[i].setI16Const(-unionArray[i].getI16Const()); break;
case EbtUint16:newConstArray[i].setU16Const(static_cast<unsigned int>(-static_cast<signed int>(unionArray[i].getU16Const()))); break;
case EbtInt: newConstArray[i].setIConst(-unionArray[i].getIConst()); break;
case EbtUint: newConstArray[i].setUConst(static_cast<unsigned int>(-static_cast<int>(unionArray[i].getUConst()))); break;
case EbtInt64: newConstArray[i].setI64Const(-unionArray[i].getI64Const()); break;
case EbtUint64: newConstArray[i].setU64Const(static_cast<unsigned long long>(-static_cast<long long>(unionArray[i].getU64Const()))); break;
#endif
default:
return 0;
return nullptr;
}
break;
case EOpLogicalNot:
@ -546,7 +548,7 @@ TIntermTyped* TIntermConstantUnion::fold(TOperator op, const TType& returnType)
switch (getType().getBasicType()) {
case EbtBool: newConstArray[i].setBConst(!unionArray[i].getBConst()); break;
default:
return 0;
return nullptr;
}
break;
case EOpBitwiseNot:
@ -671,6 +673,48 @@ TIntermTyped* TIntermConstantUnion::fold(TOperator op, const TType& returnType)
break;
}
case EOpConvIntToBool:
newConstArray[i].setBConst(unionArray[i].getIConst() != 0); break;
case EOpConvUintToBool:
newConstArray[i].setBConst(unionArray[i].getUConst() != 0); break;
case EOpConvBoolToInt:
newConstArray[i].setIConst(unionArray[i].getBConst()); break;
case EOpConvBoolToUint:
newConstArray[i].setUConst(unionArray[i].getBConst()); break;
case EOpConvIntToUint:
newConstArray[i].setUConst(unionArray[i].getIConst()); break;
case EOpConvUintToInt:
newConstArray[i].setIConst(unionArray[i].getUConst()); break;
case EOpConvFloatToBool:
case EOpConvDoubleToBool:
newConstArray[i].setBConst(unionArray[i].getDConst() != 0); break;
case EOpConvBoolToFloat:
case EOpConvBoolToDouble:
newConstArray[i].setDConst(unionArray[i].getBConst()); break;
case EOpConvIntToFloat:
case EOpConvIntToDouble:
newConstArray[i].setDConst(unionArray[i].getIConst()); break;
case EOpConvUintToFloat:
case EOpConvUintToDouble:
newConstArray[i].setDConst(unionArray[i].getUConst()); break;
case EOpConvDoubleToFloat:
case EOpConvFloatToDouble:
newConstArray[i].setDConst(unionArray[i].getDConst()); break;
case EOpConvFloatToUint:
case EOpConvDoubleToUint:
newConstArray[i].setUConst(static_cast<unsigned int>(unionArray[i].getDConst())); break;
case EOpConvFloatToInt:
case EOpConvDoubleToInt:
newConstArray[i].setIConst(static_cast<int>(unionArray[i].getDConst())); break;
#ifndef GLSLANG_WEB
case EOpConvInt8ToBool:
newConstArray[i].setBConst(unionArray[i].getI8Const() != 0); break;
case EOpConvUint8ToBool:
@ -679,20 +723,12 @@ TIntermTyped* TIntermConstantUnion::fold(TOperator op, const TType& returnType)
newConstArray[i].setBConst(unionArray[i].getI16Const() != 0); break;
case EOpConvUint16ToBool:
newConstArray[i].setBConst(unionArray[i].getU16Const() != 0); break;
case EOpConvIntToBool:
newConstArray[i].setBConst(unionArray[i].getIConst() != 0); break;
case EOpConvUintToBool:
newConstArray[i].setBConst(unionArray[i].getUConst() != 0); break;
case EOpConvInt64ToBool:
newConstArray[i].setBConst(unionArray[i].getI64Const() != 0); break;
case EOpConvUint64ToBool:
newConstArray[i].setBConst(unionArray[i].getI64Const() != 0); break;
case EOpConvFloat16ToBool:
newConstArray[i].setBConst(unionArray[i].getDConst() != 0); break;
case EOpConvFloatToBool:
newConstArray[i].setBConst(unionArray[i].getDConst() != 0); break;
case EOpConvDoubleToBool:
newConstArray[i].setBConst(unionArray[i].getDConst() != 0); break;
case EOpConvBoolToInt8:
newConstArray[i].setI8Const(unionArray[i].getBConst()); break;
@ -702,20 +738,12 @@ TIntermTyped* TIntermConstantUnion::fold(TOperator op, const TType& returnType)
newConstArray[i].setI16Const(unionArray[i].getBConst()); break;
case EOpConvBoolToUint16:
newConstArray[i].setU16Const(unionArray[i].getBConst()); break;
case EOpConvBoolToInt:
newConstArray[i].setIConst(unionArray[i].getBConst()); break;
case EOpConvBoolToUint:
newConstArray[i].setUConst(unionArray[i].getBConst()); break;
case EOpConvBoolToInt64:
newConstArray[i].setI64Const(unionArray[i].getBConst()); break;
case EOpConvBoolToUint64:
newConstArray[i].setU64Const(unionArray[i].getBConst()); break;
case EOpConvBoolToFloat16:
newConstArray[i].setDConst(unionArray[i].getBConst()); break;
case EOpConvBoolToFloat:
newConstArray[i].setDConst(unionArray[i].getBConst()); break;
case EOpConvBoolToDouble:
newConstArray[i].setDConst(unionArray[i].getBConst()); break;
case EOpConvInt8ToInt16:
newConstArray[i].setI16Const(unionArray[i].getI8Const()); break;
@ -810,8 +838,6 @@ TIntermTyped* TIntermConstantUnion::fold(TOperator op, const TType& returnType)
newConstArray[i].setU8Const((unsigned char)unionArray[i].getIConst()); break;
case EOpConvIntToUint16:
newConstArray[i].setU16Const((unsigned char)unionArray[i].getIConst()); break;
case EOpConvIntToUint:
newConstArray[i].setUConst(unionArray[i].getIConst()); break;
case EOpConvIntToUint64:
newConstArray[i].setU64Const(unionArray[i].getIConst()); break;
@ -819,8 +845,6 @@ TIntermTyped* TIntermConstantUnion::fold(TOperator op, const TType& returnType)
newConstArray[i].setI8Const((signed char)unionArray[i].getUConst()); break;
case EOpConvUintToInt16:
newConstArray[i].setI16Const((signed short)unionArray[i].getUConst()); break;
case EOpConvUintToInt:
newConstArray[i].setIConst(unionArray[i].getUConst()); break;
case EOpConvUintToInt64:
newConstArray[i].setI64Const(unionArray[i].getUConst()); break;
case EOpConvUintToUint8:
@ -831,16 +855,8 @@ TIntermTyped* TIntermConstantUnion::fold(TOperator op, const TType& returnType)
newConstArray[i].setU64Const(unionArray[i].getUConst()); break;
case EOpConvIntToFloat16:
newConstArray[i].setDConst(unionArray[i].getIConst()); break;
case EOpConvIntToFloat:
newConstArray[i].setDConst(unionArray[i].getIConst()); break;
case EOpConvIntToDouble:
newConstArray[i].setDConst(unionArray[i].getIConst()); break;
case EOpConvUintToFloat16:
newConstArray[i].setDConst(unionArray[i].getUConst()); break;
case EOpConvUintToFloat:
newConstArray[i].setDConst(unionArray[i].getUConst()); break;
case EOpConvUintToDouble:
newConstArray[i].setDConst(unionArray[i].getUConst()); break;
case EOpConvInt64ToInt8:
newConstArray[i].setI8Const(static_cast<signed char>(unionArray[i].getI64Const())); break;
case EOpConvInt64ToInt16:
@ -905,44 +921,35 @@ TIntermTyped* TIntermConstantUnion::fold(TOperator op, const TType& returnType)
newConstArray[i].setI8Const(static_cast<signed char>(unionArray[i].getDConst())); break;
case EOpConvFloatToInt16:
newConstArray[i].setI16Const(static_cast<signed short>(unionArray[i].getDConst())); break;
case EOpConvFloatToInt:
newConstArray[i].setIConst(static_cast<int>(unionArray[i].getDConst())); break;
case EOpConvFloatToInt64:
newConstArray[i].setI64Const(static_cast<long long>(unionArray[i].getDConst())); break;
case EOpConvFloatToUint8:
newConstArray[i].setU8Const(static_cast<unsigned char>(unionArray[i].getDConst())); break;
case EOpConvFloatToUint16:
newConstArray[i].setU16Const(static_cast<unsigned short>(unionArray[i].getDConst())); break;
case EOpConvFloatToUint:
newConstArray[i].setUConst(static_cast<unsigned int>(unionArray[i].getDConst())); break;
case EOpConvFloatToUint64:
newConstArray[i].setU64Const(static_cast<unsigned long long>(unionArray[i].getDConst())); break;
case EOpConvFloatToFloat16:
newConstArray[i].setDConst(unionArray[i].getDConst()); break;
case EOpConvFloatToDouble:
newConstArray[i].setDConst(unionArray[i].getDConst()); break;
case EOpConvDoubleToInt8:
newConstArray[i].setI8Const(static_cast<signed char>(unionArray[i].getDConst())); break;
case EOpConvDoubleToInt16:
newConstArray[i].setI16Const(static_cast<signed short>(unionArray[i].getDConst())); break;
case EOpConvDoubleToInt:
newConstArray[i].setIConst(static_cast<int>(unionArray[i].getDConst())); break;
case EOpConvDoubleToInt64:
newConstArray[i].setI64Const(static_cast<long long>(unionArray[i].getDConst())); break;
case EOpConvDoubleToUint8:
newConstArray[i].setU8Const(static_cast<unsigned char>(unionArray[i].getDConst())); break;
case EOpConvDoubleToUint16:
newConstArray[i].setU16Const(static_cast<unsigned short>(unionArray[i].getDConst())); break;
case EOpConvDoubleToUint:
newConstArray[i].setUConst(static_cast<unsigned int>(unionArray[i].getDConst())); break;
case EOpConvDoubleToUint64:
newConstArray[i].setU64Const(static_cast<unsigned long long>(unionArray[i].getDConst())); break;
case EOpConvDoubleToFloat16:
newConstArray[i].setDConst(unionArray[i].getDConst()); break;
case EOpConvDoubleToFloat:
newConstArray[i].setDConst(unionArray[i].getDConst()); break;
case EOpConvPtrToUint64:
case EOpConvUint64ToPtr:
case EOpConstructReference:
newConstArray[i].setU64Const(unionArray[i].getU64Const()); break;
#endif
// TODO: 3.0 Functionality: unary constant folding: the rest of the ops have to be fleshed out
@ -966,7 +973,7 @@ TIntermTyped* TIntermConstantUnion::fold(TOperator op, const TType& returnType)
case EOpInt16BitsToFloat16:
case EOpUint16BitsToFloat16:
default:
return 0;
return nullptr;
}
}
@ -1074,6 +1081,13 @@ TIntermTyped* TIntermediate::fold(TIntermAggregate* aggrNode)
case EbtDouble:
newConstArray[comp].setDConst(std::min(childConstUnions[0][arg0comp].getDConst(), childConstUnions[1][arg1comp].getDConst()));
break;
case EbtInt:
newConstArray[comp].setIConst(std::min(childConstUnions[0][arg0comp].getIConst(), childConstUnions[1][arg1comp].getIConst()));
break;
case EbtUint:
newConstArray[comp].setUConst(std::min(childConstUnions[0][arg0comp].getUConst(), childConstUnions[1][arg1comp].getUConst()));
break;
#ifndef GLSLANG_WEB
case EbtInt8:
newConstArray[comp].setI8Const(std::min(childConstUnions[0][arg0comp].getI8Const(), childConstUnions[1][arg1comp].getI8Const()));
break;
@ -1086,18 +1100,13 @@ TIntermTyped* TIntermediate::fold(TIntermAggregate* aggrNode)
case EbtUint16:
newConstArray[comp].setU16Const(std::min(childConstUnions[0][arg0comp].getU16Const(), childConstUnions[1][arg1comp].getU16Const()));
break;
case EbtInt:
newConstArray[comp].setIConst(std::min(childConstUnions[0][arg0comp].getIConst(), childConstUnions[1][arg1comp].getIConst()));
break;
case EbtUint:
newConstArray[comp].setUConst(std::min(childConstUnions[0][arg0comp].getUConst(), childConstUnions[1][arg1comp].getUConst()));
break;
case EbtInt64:
newConstArray[comp].setI64Const(std::min(childConstUnions[0][arg0comp].getI64Const(), childConstUnions[1][arg1comp].getI64Const()));
break;
case EbtUint64:
newConstArray[comp].setU64Const(std::min(childConstUnions[0][arg0comp].getU64Const(), childConstUnions[1][arg1comp].getU64Const()));
break;
#endif
default: assert(false && "Default missing");
}
break;
@ -1108,6 +1117,13 @@ TIntermTyped* TIntermediate::fold(TIntermAggregate* aggrNode)
case EbtDouble:
newConstArray[comp].setDConst(std::max(childConstUnions[0][arg0comp].getDConst(), childConstUnions[1][arg1comp].getDConst()));
break;
case EbtInt:
newConstArray[comp].setIConst(std::max(childConstUnions[0][arg0comp].getIConst(), childConstUnions[1][arg1comp].getIConst()));
break;
case EbtUint:
newConstArray[comp].setUConst(std::max(childConstUnions[0][arg0comp].getUConst(), childConstUnions[1][arg1comp].getUConst()));
break;
#ifndef GLSLANG_WEB
case EbtInt8:
newConstArray[comp].setI8Const(std::max(childConstUnions[0][arg0comp].getI8Const(), childConstUnions[1][arg1comp].getI8Const()));
break;
@ -1120,18 +1136,13 @@ TIntermTyped* TIntermediate::fold(TIntermAggregate* aggrNode)
case EbtUint16:
newConstArray[comp].setU16Const(std::max(childConstUnions[0][arg0comp].getU16Const(), childConstUnions[1][arg1comp].getU16Const()));
break;
case EbtInt:
newConstArray[comp].setIConst(std::max(childConstUnions[0][arg0comp].getIConst(), childConstUnions[1][arg1comp].getIConst()));
break;
case EbtUint:
newConstArray[comp].setUConst(std::max(childConstUnions[0][arg0comp].getUConst(), childConstUnions[1][arg1comp].getUConst()));
break;
case EbtInt64:
newConstArray[comp].setI64Const(std::max(childConstUnions[0][arg0comp].getI64Const(), childConstUnions[1][arg1comp].getI64Const()));
break;
case EbtUint64:
newConstArray[comp].setU64Const(std::max(childConstUnions[0][arg0comp].getU64Const(), childConstUnions[1][arg1comp].getU64Const()));
break;
#endif
default: assert(false && "Default missing");
}
break;
@ -1143,6 +1154,11 @@ TIntermTyped* TIntermediate::fold(TIntermAggregate* aggrNode)
newConstArray[comp].setDConst(std::min(std::max(childConstUnions[0][arg0comp].getDConst(), childConstUnions[1][arg1comp].getDConst()),
childConstUnions[2][arg2comp].getDConst()));
break;
case EbtUint:
newConstArray[comp].setUConst(std::min(std::max(childConstUnions[0][arg0comp].getUConst(), childConstUnions[1][arg1comp].getUConst()),
childConstUnions[2][arg2comp].getUConst()));
break;
#ifndef GLSLANG_WEB
case EbtInt8:
newConstArray[comp].setI8Const(std::min(std::max(childConstUnions[0][arg0comp].getI8Const(), childConstUnions[1][arg1comp].getI8Const()),
childConstUnions[2][arg2comp].getI8Const()));
@ -1163,10 +1179,6 @@ TIntermTyped* TIntermediate::fold(TIntermAggregate* aggrNode)
newConstArray[comp].setIConst(std::min(std::max(childConstUnions[0][arg0comp].getIConst(), childConstUnions[1][arg1comp].getIConst()),
childConstUnions[2][arg2comp].getIConst()));
break;
case EbtUint:
newConstArray[comp].setUConst(std::min(std::max(childConstUnions[0][arg0comp].getUConst(), childConstUnions[1][arg1comp].getUConst()),
childConstUnions[2][arg2comp].getUConst()));
break;
case EbtInt64:
newConstArray[comp].setI64Const(std::min(std::max(childConstUnions[0][arg0comp].getI64Const(), childConstUnions[1][arg1comp].getI64Const()),
childConstUnions[2][arg2comp].getI64Const()));
@ -1175,6 +1187,7 @@ TIntermTyped* TIntermediate::fold(TIntermAggregate* aggrNode)
newConstArray[comp].setU64Const(std::min(std::max(childConstUnions[0][arg0comp].getU64Const(), childConstUnions[1][arg1comp].getU64Const()),
childConstUnions[2][arg2comp].getU64Const()));
break;
#endif
default: assert(false && "Default missing");
}
break;
@ -1197,12 +1210,17 @@ TIntermTyped* TIntermediate::fold(TIntermAggregate* aggrNode)
newConstArray[comp].setBConst(childConstUnions[0][arg0comp] != childConstUnions[1][arg1comp]);
break;
case EOpMix:
if (children[2]->getAsTyped()->getBasicType() == EbtBool)
newConstArray[comp].setDConst(childConstUnions[2][arg2comp].getBConst() ? childConstUnions[1][arg1comp].getDConst() :
childConstUnions[0][arg0comp].getDConst());
else
newConstArray[comp].setDConst(childConstUnions[0][arg0comp].getDConst() * (1.0 - childConstUnions[2][arg2comp].getDConst()) +
childConstUnions[1][arg1comp].getDConst() * childConstUnions[2][arg2comp].getDConst());
if (!children[0]->getAsTyped()->isFloatingDomain())
return aggrNode;
if (children[2]->getAsTyped()->getBasicType() == EbtBool) {
newConstArray[comp].setDConst(childConstUnions[2][arg2comp].getBConst()
? childConstUnions[1][arg1comp].getDConst()
: childConstUnions[0][arg0comp].getDConst());
} else {
newConstArray[comp].setDConst(
childConstUnions[0][arg0comp].getDConst() * (1.0 - childConstUnions[2][arg2comp].getDConst()) +
childConstUnions[1][arg1comp].getDConst() * childConstUnions[2][arg2comp].getDConst());
}
break;
case EOpStep:
newConstArray[comp].setDConst(childConstUnions[1][arg1comp].getDConst() < childConstUnions[0][arg0comp].getDConst() ? 0.0 : 1.0);
@ -1354,7 +1372,9 @@ TIntermTyped* TIntermediate::foldDereference(TIntermTyped* node, int index, cons
// arrays, vectors, matrices, all use simple multiplicative math
// while structures need to add up heterogeneous members
int start;
if (node->isArray() || ! node->isStruct())
if (node->getType().isCoopMat())
start = 0;
else if (node->isArray() || ! node->isStruct())
start = size * index;
else {
// it is a structure

File diff suppressed because it is too large Load diff

View file

@ -91,6 +91,8 @@ public:
void identifyBuiltIns(int version, EProfile profile, const SpvVersion& spvVersion, EShLanguage language, TSymbolTable& symbolTable, const TBuiltInResource &resources);
protected:
void addTabledBuiltins(int version, EProfile profile, const SpvVersion& spvVersion);
void relateTabledBuiltins(int version, EProfile profile, const SpvVersion& spvVersion, EShLanguage, TSymbolTable&);
void add2ndGenerationSamplingImaging(int version, EProfile profile, const SpvVersion& spvVersion);
void addSubpassSampling(TSampler, const TString& typeName, int version, EProfile profile);
void addQueryFunctions(TSampler, const TString& typeName, int version, EProfile profile);

File diff suppressed because it is too large Load diff

View file

@ -67,6 +67,8 @@ void TParseContextBase::outputMessage(const TSourceLoc& loc, const char* szReaso
}
}
#if !defined(GLSLANG_WEB) || defined(GLSLANG_WEB_DEVEL)
void C_DECL TParseContextBase::error(const TSourceLoc& loc, const char* szReason, const char* szToken,
const char* szExtraInfoFormat, ...)
{
@ -113,6 +115,8 @@ void C_DECL TParseContextBase::ppWarn(const TSourceLoc& loc, const char* szReaso
va_end(args);
}
#endif
//
// Both test and if necessary, spit out an error, to see if the node is really
// an l-value that can be operated on this way.
@ -149,11 +153,13 @@ bool TParseContextBase::lValueErrorCheck(const TSourceLoc& loc, const char* op,
case EvqConst: message = "can't modify a const"; break;
case EvqConstReadOnly: message = "can't modify a const"; break;
case EvqUniform: message = "can't modify a uniform"; break;
#ifndef GLSLANG_WEB
case EvqBuffer:
if (node->getQualifier().readonly)
if (node->getQualifier().isReadOnly())
message = "can't modify a readonly buffer";
if (node->getQualifier().isShaderRecordNV())
message = "can't modify a shaderrecordnv qualified buffer";
break;
#ifdef NV_EXTENSIONS
case EvqHitAttrNV:
if (language != EShLangIntersectNV)
message = "cannot modify hitAttributeNV in this stage";
@ -168,13 +174,13 @@ bool TParseContextBase::lValueErrorCheck(const TSourceLoc& loc, const char* op,
case EbtSampler:
message = "can't modify a sampler";
break;
case EbtAtomicUint:
message = "can't modify an atomic_uint";
break;
case EbtVoid:
message = "can't modify void";
break;
#ifdef NV_EXTENSIONS
#ifndef GLSLANG_WEB
case EbtAtomicUint:
message = "can't modify an atomic_uint";
break;
case EbtAccStructNV:
message = "can't modify accelerationStructureNV";
break;
@ -230,7 +236,7 @@ void TParseContextBase::rValueErrorCheck(const TSourceLoc& loc, const char* op,
}
TIntermSymbol* symNode = node->getAsSymbolNode();
if (symNode && symNode->getQualifier().writeonly)
if (symNode && symNode->getQualifier().isWriteOnly())
error(loc, "can't read from writeonly object: ", op, symNode->getName().c_str());
}
@ -250,11 +256,17 @@ void TParseContextBase::trackLinkage(TSymbol& symbol)
// Give an error if not.
void TParseContextBase::checkIndex(const TSourceLoc& loc, const TType& type, int& index)
{
const auto sizeIsSpecializationExpression = [&type]() {
return type.containsSpecializationSize() &&
type.getArraySizes()->getOuterNode() != nullptr &&
type.getArraySizes()->getOuterNode()->getAsSymbolNode() == nullptr; };
if (index < 0) {
error(loc, "", "[", "index out of range '%d'", index);
index = 0;
} else if (type.isArray()) {
if (type.isSizedArray() && index >= type.getOuterArraySize()) {
if (type.isSizedArray() && !sizeIsSpecializationExpression() &&
index >= type.getOuterArraySize()) {
error(loc, "", "[", "array index out of range '%d'", index);
index = type.getOuterArraySize() - 1;
}
@ -564,6 +576,7 @@ void TParseContextBase::parseSwizzleSelector(const TSourceLoc& loc, const TStrin
selector.push_back(0);
}
#ifdef ENABLE_HLSL
//
// Make the passed-in variable information become a member of the
// global uniform block. If this doesn't exist yet, make it.
@ -608,6 +621,7 @@ void TParseContextBase::growGlobalUniformBlock(const TSourceLoc& loc, TType& mem
++firstNewMember;
}
#endif
void TParseContextBase::finish()
{

File diff suppressed because it is too large Load diff

View file

@ -85,6 +85,7 @@ public:
statementNestingLevel(0), loopNestingLevel(0), structNestingLevel(0), controlFlowNestingLevel(0),
postEntryPointReturn(false),
contextPragma(true, false),
beginInvocationInterlockCount(0), endInvocationInterlockCount(0),
parsingBuiltins(parsingBuiltins), scanContext(nullptr), ppContext(nullptr),
limits(resources.limits),
globalUniformBlock(nullptr),
@ -96,6 +97,7 @@ public:
}
virtual ~TParseContextBase() { }
#if !defined(GLSLANG_WEB) || defined(GLSLANG_WEB_DEVEL)
virtual void C_DECL error(const TSourceLoc&, const char* szReason, const char* szToken,
const char* szExtraInfoFormat, ...);
virtual void C_DECL warn(const TSourceLoc&, const char* szReason, const char* szToken,
@ -104,6 +106,7 @@ public:
const char* szExtraInfoFormat, ...);
virtual void C_DECL ppWarn(const TSourceLoc&, const char* szReason, const char* szToken,
const char* szExtraInfoFormat, ...);
#endif
virtual void setLimits(const TBuiltInResource&) = 0;
@ -149,8 +152,10 @@ public:
extensionCallback(line, extension, behavior);
}
#ifdef ENABLE_HLSL
// Manage the global uniform block (default uniforms in GLSL, $Global in HLSL)
virtual void growGlobalUniformBlock(const TSourceLoc&, TType&, const TString& memberName, TTypeList* typeList = nullptr);
#endif
// Potentially rename shader entry point function
void renameShaderFunction(TString*& name) const
@ -182,6 +187,8 @@ public:
// the statementNestingLevel the current switch statement is at, which must match the level of its case statements
TList<int> switchLevel;
struct TPragma contextPragma;
int beginInvocationInterlockCount;
int endInvocationInterlockCount;
protected:
TParseContextBase(TParseContextBase&);
@ -276,7 +283,7 @@ public:
const TString* entryPoint = nullptr);
virtual ~TParseContext();
bool obeyPrecisionQualifiers() const { return precisionManager.respectingPrecisionQualifiers(); };
bool obeyPrecisionQualifiers() const { return precisionManager.respectingPrecisionQualifiers(); }
void setPrecisionDefaults();
void setLimits(const TBuiltInResource&) override;
@ -294,13 +301,15 @@ public:
TIntermTyped* handleBracketDereference(const TSourceLoc&, TIntermTyped* base, TIntermTyped* index);
void handleIndexLimits(const TSourceLoc&, TIntermTyped* base, TIntermTyped* index);
#ifndef GLSLANG_WEB
void makeEditable(TSymbol*&) override;
void ioArrayCheck(const TSourceLoc&, const TType&, const TString& identifier);
#endif
bool isIoResizeArray(const TType&) const;
void fixIoArraySize(const TSourceLoc&, TType&);
void ioArrayCheck(const TSourceLoc&, const TType&, const TString& identifier);
void handleIoResizeArrayAccess(const TSourceLoc&, TIntermTyped* base);
void checkIoArraysConsistency(const TSourceLoc&, bool tailOnly = false, bool isPerPrimitive = false);
int getIoArrayImplicitSize(bool isPerPrimitive = false) const;
void checkIoArraysConsistency(const TSourceLoc&, bool tailOnly = false);
int getIoArrayImplicitSize(const TQualifier&, TString* featureString = nullptr) const;
void checkIoArrayConsistency(const TSourceLoc&, int requiredSize, const char* feature, TType&, const TString&);
TIntermTyped* handleBinaryMath(const TSourceLoc&, const char* str, TOperator op, TIntermTyped* left, TIntermTyped* right);
@ -337,7 +346,7 @@ public:
void globalCheck(const TSourceLoc&, const char* token);
bool constructorError(const TSourceLoc&, TIntermNode*, TFunction&, TOperator, TType&);
bool constructorTextureSamplerError(const TSourceLoc&, const TFunction&);
void arraySizeCheck(const TSourceLoc&, TIntermTyped* expr, TArraySize&);
void arraySizeCheck(const TSourceLoc&, TIntermTyped* expr, TArraySize&, const char *sizeType);
bool arrayQualifierError(const TSourceLoc&, const TQualifier&);
bool arrayError(const TSourceLoc&, const TType&);
void arraySizeRequiredCheck(const TSourceLoc&, const TArraySizes&);
@ -370,6 +379,7 @@ public:
void nestedStructCheck(const TSourceLoc&);
void arrayObjectCheck(const TSourceLoc&, const TType&, const char* op);
void opaqueCheck(const TSourceLoc&, const TType&, const char* op);
void referenceCheck(const TSourceLoc&, const TType&, const char* op);
void storage16BitAssignmentCheck(const TSourceLoc&, const TType&, const char* op);
void specializationCheck(const TSourceLoc&, const TType&, const char* op);
void structTypeCheck(const TSourceLoc&, TPublicType&);
@ -400,6 +410,7 @@ public:
TIntermTyped* addConstructor(const TSourceLoc&, TIntermNode*, const TType&);
TIntermTyped* constructAggregate(TIntermNode*, const TType&, int, const TSourceLoc&);
TIntermTyped* constructBuiltIn(const TType&, TOperator, TIntermTyped*, const TSourceLoc&, bool subset);
void inheritMemoryQualifiers(const TQualifier& from, TQualifier& to);
void declareBlock(const TSourceLoc&, TTypeList& typeList, const TString* instanceName = 0, TArraySizes* arraySizes = 0);
void blockStageIoCheck(const TSourceLoc&, const TQualifier&);
void blockQualifierCheck(const TSourceLoc&, const TQualifier&, bool instanceName);
@ -413,6 +424,7 @@ public:
void wrapupSwitchSubsequence(TIntermAggregate* statements, TIntermNode* branchNode);
TIntermNode* addSwitch(const TSourceLoc&, TIntermTyped* expression, TIntermAggregate* body);
#ifndef GLSLANG_WEB
TAttributeType attributeFromName(const TString& name) const;
TAttributes* makeAttributes(const TString& identifier) const;
TAttributes* makeAttributes(const TString& identifier, TIntermNode* node) const;
@ -421,11 +433,11 @@ public:
// Determine selection control from attributes
void handleSelectionAttributes(const TAttributes& attributes, TIntermNode*);
void handleSwitchAttributes(const TAttributes& attributes, TIntermNode*);
// Determine loop control from attributes
void handleLoopAttributes(const TAttributes& attributes, TIntermNode*);
#endif
void resizeMeshViewDimension(const TSourceLoc&, TType&);
void checkAndResizeMeshViewDim(const TSourceLoc&, TType&, bool isBlockMember);
protected:
void nonInitConstCheck(const TSourceLoc&, TString& identifier, TType& type);
@ -437,7 +449,9 @@ protected:
bool isRuntimeLength(const TIntermTyped&) const;
TIntermNode* executeInitializer(const TSourceLoc&, TIntermTyped* initializer, TVariable* variable);
TIntermTyped* convertInitializerList(const TSourceLoc&, const TType&, TIntermTyped* initializer);
#ifndef GLSLANG_WEB
void finish() override;
#endif
public:
//
@ -463,10 +477,11 @@ protected:
TQualifier globalUniformDefaults;
TQualifier globalInputDefaults;
TQualifier globalOutputDefaults;
int* atomicUintOffsets; // to become an array of the right size to hold an offset per binding point
TString currentCaller; // name of last function body entered (not valid when at global scope)
TIdSetType inductiveLoopIds;
#ifndef GLSLANG_WEB
int* atomicUintOffsets; // to become an array of the right size to hold an offset per binding point
bool anyIndexLimits;
TIdSetType inductiveLoopIds;
TVector<TIntermTyped*> needsIndexLimitationChecking;
//
@ -502,6 +517,7 @@ protected:
// array-sizing declarations
//
TVector<TSymbol*> ioArraySymbolResizeList;
#endif
};
} // end namespace glslang

File diff suppressed because it is too large Load diff

View file

@ -288,6 +288,11 @@ void InitializeStageSymbolTable(TBuiltInParseables& builtInParseables, int versi
EShLanguage language, EShSource source, TInfoSink& infoSink, TSymbolTable** commonTable,
TSymbolTable** symbolTables)
{
#ifdef GLSLANG_WEB
profile = EEsProfile;
version = 310;
#endif
(*symbolTables[language]).adoptLevels(*commonTable[CommonIndex(profile, language)]);
InitializeSymbolTable(builtInParseables.getStageString(language), version, profile, spvVersion, language, source,
infoSink, *symbolTables[language]);
@ -304,6 +309,11 @@ void InitializeStageSymbolTable(TBuiltInParseables& builtInParseables, int versi
//
bool InitializeSymbolTables(TInfoSink& infoSink, TSymbolTable** commonTable, TSymbolTable** symbolTables, int version, EProfile profile, const SpvVersion& spvVersion, EShSource source)
{
#ifdef GLSLANG_WEB
profile = EEsProfile;
version = 310;
#endif
std::unique_ptr<TBuiltInParseables> builtInParseables(CreateBuiltInParseables(infoSink, source));
if (builtInParseables == nullptr)
@ -326,6 +336,7 @@ bool InitializeSymbolTables(TInfoSink& infoSink, TSymbolTable** commonTable, TS
InitializeStageSymbolTable(*builtInParseables, version, profile, spvVersion, EShLangFragment, source,
infoSink, commonTable, symbolTables);
#ifndef GLSLANG_WEB
// check for tessellation
if ((profile != EEsProfile && version >= 150) ||
(profile == EEsProfile && version >= 310)) {
@ -340,6 +351,7 @@ bool InitializeSymbolTables(TInfoSink& infoSink, TSymbolTable** commonTable, TS
(profile == EEsProfile && version >= 310))
InitializeStageSymbolTable(*builtInParseables, version, profile, spvVersion, EShLangGeometry, source,
infoSink, commonTable, symbolTables);
#endif
// check for compute
if ((profile != EEsProfile && version >= 420) ||
@ -347,7 +359,6 @@ bool InitializeSymbolTables(TInfoSink& infoSink, TSymbolTable** commonTable, TS
InitializeStageSymbolTable(*builtInParseables, version, profile, spvVersion, EShLangCompute, source,
infoSink, commonTable, symbolTables);
#ifdef NV_EXTENSIONS
// check for ray tracing stages
if (profile != EEsProfile && version >= 450) {
InitializeStageSymbolTable(*builtInParseables, version, profile, spvVersion, EShLangRayGenNV, source,
@ -375,7 +386,6 @@ bool InitializeSymbolTables(TInfoSink& infoSink, TSymbolTable** commonTable, TS
(profile == EEsProfile && version >= 320))
InitializeStageSymbolTable(*builtInParseables, version, profile, spvVersion, EShLangTaskNV, source,
infoSink, commonTable, symbolTables);
#endif
return true;
}
@ -474,6 +484,18 @@ void SetupBuiltinSymbolTable(int version, EProfile profile, const SpvVersion& sp
glslang::ReleaseGlobalLock();
}
// Function to Print all builtins
void DumpBuiltinSymbolTable(TInfoSink& infoSink, const TSymbolTable& symbolTable)
{
#ifndef GLSLANG_WEB
infoSink.debug << "BuiltinSymbolTable {\n";
symbolTable.dump(infoSink, true);
infoSink.debug << "}\n";
#endif
}
// Return true if the shader was correctly specified for version/profile/stage.
bool DeduceVersionProfile(TInfoSink& infoSink, EShLanguage stage, bool versionNotFirst, int defaultVersion,
EShSource source, int& version, EProfile& profile, const SpvVersion& spvVersion)
@ -569,6 +591,7 @@ bool DeduceVersionProfile(TInfoSink& infoSink, EShLanguage stage, bool versionNo
break;
}
#ifndef GLSLANG_WEB
// Correct for stage type...
switch (stage) {
case EShLangGeometry:
@ -600,7 +623,6 @@ bool DeduceVersionProfile(TInfoSink& infoSink, EShLanguage stage, bool versionNo
version = profile == EEsProfile ? 310 : 420;
}
break;
#ifdef NV_EXTENSIONS
case EShLangRayGenNV:
case EShLangIntersectNV:
case EShLangAnyHitNV:
@ -621,7 +643,6 @@ bool DeduceVersionProfile(TInfoSink& infoSink, EShLanguage stage, bool versionNo
infoSink.info.message(EPrefixError, "#version: mesh/task shaders require es profile with version 320 or above, or non-es profile with version 450 or above");
version = profile == EEsProfile ? 320 : 450;
}
#endif
default:
break;
}
@ -634,15 +655,10 @@ bool DeduceVersionProfile(TInfoSink& infoSink, EShLanguage stage, bool versionNo
// Check for SPIR-V compatibility
if (spvVersion.spv != 0) {
switch (profile) {
case EEsProfile:
if (spvVersion.vulkan > 0 && version < 310) {
case EEsProfile:
if (version < 310) {
correct = false;
infoSink.info.message(EPrefixError, "#version: ES shaders for Vulkan SPIR-V require version 310 or higher");
version = 310;
}
if (spvVersion.openGl >= 100) {
correct = false;
infoSink.info.message(EPrefixError, "#version: ES shaders for OpenGL SPIR-V are not supported");
infoSink.info.message(EPrefixError, "#version: ES shaders for SPIR-V require version 310 or higher");
version = 310;
}
break;
@ -663,6 +679,7 @@ bool DeduceVersionProfile(TInfoSink& infoSink, EShLanguage stage, bool versionNo
break;
}
}
#endif
return correct;
}
@ -821,13 +838,17 @@ bool ProcessDeferred(
// Get all the stages, languages, clients, and other environment
// stuff sorted out.
EShSource source = (messages & EShMsgReadHlsl) != 0 ? EShSourceHlsl : EShSourceGlsl;
EShSource sourceGuess = (messages & EShMsgReadHlsl) != 0 ? EShSourceHlsl : EShSourceGlsl;
SpvVersion spvVersion;
EShLanguage stage = compiler->getLanguage();
TranslateEnvironment(environment, messages, source, stage, spvVersion);
TranslateEnvironment(environment, messages, sourceGuess, stage, spvVersion);
#ifdef ENABLE_HLSL
EShSource source = sourceGuess;
if (environment != nullptr && environment->target.hlslFunctionality1)
intermediate.setHlslFunctionality1();
#else
const EShSource source = EShSourceGlsl;
#endif
// First, without using the preprocessor or parser, find the #version, so we know what
// symbol tables, processing rules, etc. to set up. This does not need the extra strings
// outlined above, just the user shader, after the system and user preambles.
@ -840,6 +861,7 @@ bool ProcessDeferred(
: userInput.scanVersion(version, profile, versionNotFirstToken);
bool versionNotFound = version == 0;
if (forceDefaultVersionAndProfile && source == EShSourceGlsl) {
#ifndef GLSLANG_WEB
if (! (messages & EShMsgSuppressWarnings) && ! versionNotFound &&
(version != defaultVersion || profile != defaultProfile)) {
compiler->infoSink.info << "Warning, (version, profile) forced to be ("
@ -847,7 +869,7 @@ bool ProcessDeferred(
<< "), while in source code it is ("
<< version << ", " << ProfileName(profile) << ")\n";
}
#endif
if (versionNotFound) {
versionNotFirstToken = false;
versionNotFirst = false;
@ -859,7 +881,13 @@ bool ProcessDeferred(
bool goodVersion = DeduceVersionProfile(compiler->infoSink, stage,
versionNotFirst, defaultVersion, source, version, profile, spvVersion);
#ifdef GLSLANG_WEB
profile = EEsProfile;
version = 310;
#endif
bool versionWillBeError = (versionNotFound || (profile == EEsProfile && version >= 300 && versionNotFirst));
#ifndef GLSLANG_WEB
bool warnVersionNotFirst = false;
if (! versionWillBeError && versionNotFirstToken) {
if (messages & EShMsgRelaxedErrors)
@ -867,6 +895,7 @@ bool ProcessDeferred(
else
versionWillBeError = true;
}
#endif
intermediate.setSource(source);
intermediate.setVersion(version);
@ -875,8 +904,10 @@ bool ProcessDeferred(
RecordProcesses(intermediate, messages, sourceEntryPointName);
if (spvVersion.vulkan > 0)
intermediate.setOriginUpperLeft();
#ifdef ENABLE_HLSL
if ((messages & EShMsgHlslOffsets) || source == EShSourceHlsl)
intermediate.setHlslOffsets();
#endif
if (messages & EShMsgDebugInfo) {
intermediate.setSourceFile(names[numPre]);
for (int s = 0; s < numStrings; ++s) {
@ -905,6 +936,9 @@ bool ProcessDeferred(
return false;
}
if (messages & EShMsgBuiltinSymbolTable)
DumpBuiltinSymbolTable(compiler->infoSink, *symbolTable);
//
// Now we can process the full shader under proper symbols and rules.
//
@ -923,11 +957,13 @@ bool ProcessDeferred(
parseContext->setLimits(*resources);
if (! goodVersion)
parseContext->addError();
#ifndef GLSLANG_WEB
if (warnVersionNotFirst) {
TSourceLoc loc;
loc.init();
parseContext->warn(loc, "Illegal to have non-comment, non-whitespace tokens before #version", "#version", "");
}
#endif
parseContext->initializeExtensionBehavior();
@ -958,6 +994,8 @@ bool ProcessDeferred(
return success;
}
#ifndef GLSLANG_WEB
// Responsible for keeping track of the most recent source string and line in
// the preprocessor and outputting newlines appropriately if the source string
// or line changes.
@ -1154,6 +1192,8 @@ struct DoPreprocessing {
std::string* outputString;
};
#endif
// DoFullParse is a valid ProcessingConext template argument for fully
// parsing the shader. It populates the "intermediate" with the AST.
struct DoFullParse{
@ -1184,6 +1224,7 @@ struct DoFullParse{
}
};
#ifndef GLSLANG_WEB
// Take a single compilation unit, and run the preprocessor on it.
// Return: True if there were no issues found in preprocessing,
// False if during preprocessing any unknown version, pragmas or
@ -1216,6 +1257,7 @@ bool PreprocessDeferred(
forwardCompatible, messages, intermediate, parser,
false, includer);
}
#endif
//
// do a partial compile on the given strings for a single compilation unit
@ -1734,6 +1776,11 @@ void TShader::addProcesses(const std::vector<std::string>& p)
intermediate->addProcesses(p);
}
void TShader::setInvertY(bool invert) { intermediate->setInvertY(invert); }
void TShader::setNanMinMaxClamp(bool useNonNan) { intermediate->setNanMinMaxClamp(useNonNan); }
#ifndef GLSLANG_WEB
// Set binding base for given resource type
void TShader::setShiftBinding(TResourceType res, unsigned int base) {
intermediate->setShiftBinding(res, base);
@ -1761,7 +1808,7 @@ void TShader::setShiftSsboBinding(unsigned int base) { setShiftBinding(EResSs
// Enables binding automapping using TIoMapper
void TShader::setAutoMapBindings(bool map) { intermediate->setAutoMapBindings(map); }
// Enables position.Y output negation in vertex shader
void TShader::setInvertY(bool invert) { intermediate->setInvertY(invert); }
// Fragile: currently within one stage: simple auto-assignment of location
void TShader::setAutoMapLocations(bool map) { intermediate->setAutoMapLocations(map); }
void TShader::addUniformLocationOverride(const char* name, int loc)
@ -1772,12 +1819,16 @@ void TShader::setUniformLocationBase(int base)
{
intermediate->setUniformLocationBase(base);
}
// See comment above TDefaultHlslIoMapper in iomapper.cpp:
void TShader::setHlslIoMapping(bool hlslIoMap) { intermediate->setHlslIoMapping(hlslIoMap); }
void TShader::setFlattenUniformArrays(bool flatten) { intermediate->setFlattenUniformArrays(flatten); }
void TShader::setNoStorageFormat(bool useUnknownFormat) { intermediate->setNoStorageFormat(useUnknownFormat); }
void TShader::setResourceSetBinding(const std::vector<std::string>& base) { intermediate->setResourceSetBinding(base); }
void TShader::setTextureSamplerTransformMode(EShTextureSamplerTransformMode mode) { intermediate->setTextureSamplerTransformMode(mode); }
#endif
#ifdef ENABLE_HLSL
// See comment above TDefaultHlslIoMapper in iomapper.cpp:
void TShader::setHlslIoMapping(bool hlslIoMap) { intermediate->setHlslIoMapping(hlslIoMap); }
void TShader::setFlattenUniformArrays(bool flatten) { intermediate->setFlattenUniformArrays(flatten); }
#endif
//
// Turn the shader strings into a parse tree in the TIntermediate.
@ -1801,6 +1852,7 @@ bool TShader::parse(const TBuiltInResource* builtInResources, int defaultVersion
&environment);
}
#ifndef GLSLANG_WEB
// Fill in a string with the result of preprocessing ShaderStrings
// Returns true if all extensions, pragmas and version strings were valid.
//
@ -1825,6 +1877,7 @@ bool TShader::preprocess(const TBuiltInResource* builtInResources,
defaultProfile, forceDefaultVersionAndProfile,
forwardCompatible, message, includer, *intermediate, output_string);
}
#endif
const char* TShader::getInfoLog()
{
@ -1836,7 +1889,11 @@ const char* TShader::getInfoDebugLog()
return infoSink->debug.c_str();
}
TProgram::TProgram() : reflection(0), ioMapper(nullptr), linked(false)
TProgram::TProgram() :
#ifndef GLSLANG_WEB
reflection(0),
#endif
linked(false)
{
pool = new TPoolAllocator;
infoSink = new TInfoSink;
@ -1848,9 +1905,10 @@ TProgram::TProgram() : reflection(0), ioMapper(nullptr), linked(false)
TProgram::~TProgram()
{
delete ioMapper;
delete infoSink;
#ifndef GLSLANG_WEB
delete reflection;
#endif
for (int s = 0; s < EShLangCount; ++s)
if (newedIntermediate[s])
@ -1895,6 +1953,7 @@ bool TProgram::linkStage(EShLanguage stage, EShMessages messages)
if (stages[stage].size() == 0)
return true;
#ifndef GLSLANG_WEB
int numEsShaders = 0, numNonEsShaders = 0;
for (auto it = stages[stage].begin(); it != stages[stage].end(); ++it) {
if ((*it)->intermediate->getProfile() == EEsProfile) {
@ -1943,7 +2002,9 @@ bool TProgram::linkStage(EShLanguage stage, EShMessages messages)
for (it = stages[stage].begin(); it != stages[stage].end(); ++it)
intermediate[stage]->merge(*infoSink, *(*it)->intermediate);
}
#else
intermediate[stage] = stages[stage].front()->intermediate;
#endif
intermediate[stage]->finalCheck(*infoSink, (messages & EShMsgKeepUncalled) != 0);
if (messages & EShMsgAST)
@ -1962,16 +2023,33 @@ const char* TProgram::getInfoDebugLog()
return infoSink->debug.c_str();
}
#ifndef GLSLANG_WEB
//
// Reflection implementation.
//
bool TProgram::buildReflection()
bool TProgram::buildReflection(int opts)
{
if (! linked || reflection)
if (! linked || reflection != nullptr)
return false;
reflection = new TReflection;
int firstStage = EShLangVertex, lastStage = EShLangFragment;
if (opts & EShReflectionIntermediateIO) {
// if we're reflecting intermediate I/O, determine the first and last stage linked and use those as the
// boundaries for which stages generate pipeline inputs/outputs
firstStage = EShLangCount;
lastStage = 0;
for (int s = 0; s < EShLangCount; ++s) {
if (intermediate[s]) {
firstStage = std::min(firstStage, s);
lastStage = std::max(lastStage, s);
}
}
}
reflection = new TReflection((EShReflectionOptions)opts, (EShLanguage)firstStage, (EShLanguage)lastStage);
for (int s = 0; s < EShLangCount; ++s) {
if (intermediate[s]) {
@ -1983,48 +2061,50 @@ bool TProgram::buildReflection()
return true;
}
int TProgram::getNumLiveUniformVariables() const { return reflection->getNumUniforms(); }
int TProgram::getNumLiveUniformBlocks() const { return reflection->getNumUniformBlocks(); }
const char* TProgram::getUniformName(int index) const { return reflection->getUniform(index).name.c_str(); }
const char* TProgram::getUniformBlockName(int index) const { return reflection->getUniformBlock(index).name.c_str(); }
int TProgram::getUniformBlockSize(int index) const { return reflection->getUniformBlock(index).size; }
int TProgram::getUniformIndex(const char* name) const { return reflection->getIndex(name); }
int TProgram::getUniformBinding(int index) const { return reflection->getUniform(index).getBinding(); }
EShLanguageMask TProgram::getUniformStages(int index) const { return reflection->getUniform(index).stages; }
int TProgram::getUniformBlockBinding(int index) const { return reflection->getUniformBlock(index).getBinding(); }
int TProgram::getUniformBlockIndex(int index) const { return reflection->getUniform(index).index; }
int TProgram::getUniformBlockCounterIndex(int index) const { return reflection->getUniformBlock(index).counterIndex; }
int TProgram::getUniformType(int index) const { return reflection->getUniform(index).glDefineType; }
int TProgram::getUniformBufferOffset(int index) const { return reflection->getUniform(index).offset; }
int TProgram::getUniformArraySize(int index) const { return reflection->getUniform(index).size; }
int TProgram::getNumLiveAttributes() const { return reflection->getNumAttributes(); }
const char* TProgram::getAttributeName(int index) const { return reflection->getAttribute(index).name.c_str(); }
int TProgram::getAttributeType(int index) const { return reflection->getAttribute(index).glDefineType; }
const TType* TProgram::getAttributeTType(int index) const { return reflection->getAttribute(index).getType(); }
const TType* TProgram::getUniformTType(int index) const { return reflection->getUniform(index).getType(); }
const TType* TProgram::getUniformBlockTType(int index) const { return reflection->getUniformBlock(index).getType(); }
unsigned TProgram::getLocalSize(int dim) const { return reflection->getLocalSize(dim); }
unsigned TProgram::getLocalSize(int dim) const { return reflection->getLocalSize(dim); }
int TProgram::getReflectionIndex(const char* name) const { return reflection->getIndex(name); }
int TProgram::getReflectionPipeIOIndex(const char* name, const bool inOrOut) const
{ return reflection->getPipeIOIndex(name, inOrOut); }
void TProgram::dumpReflection() { reflection->dump(); }
int TProgram::getNumUniformVariables() const { return reflection->getNumUniforms(); }
const TObjectReflection& TProgram::getUniform(int index) const { return reflection->getUniform(index); }
int TProgram::getNumUniformBlocks() const { return reflection->getNumUniformBlocks(); }
const TObjectReflection& TProgram::getUniformBlock(int index) const { return reflection->getUniformBlock(index); }
int TProgram::getNumPipeInputs() const { return reflection->getNumPipeInputs(); }
const TObjectReflection& TProgram::getPipeInput(int index) const { return reflection->getPipeInput(index); }
int TProgram::getNumPipeOutputs() const { return reflection->getNumPipeOutputs(); }
const TObjectReflection& TProgram::getPipeOutput(int index) const { return reflection->getPipeOutput(index); }
int TProgram::getNumBufferVariables() const { return reflection->getNumBufferVariables(); }
const TObjectReflection& TProgram::getBufferVariable(int index) const { return reflection->getBufferVariable(index); }
int TProgram::getNumBufferBlocks() const { return reflection->getNumStorageBuffers(); }
const TObjectReflection& TProgram::getBufferBlock(int index) const { return reflection->getStorageBufferBlock(index); }
int TProgram::getNumAtomicCounters() const { return reflection->getNumAtomicCounters(); }
const TObjectReflection& TProgram::getAtomicCounter(int index) const { return reflection->getAtomicCounter(index); }
void TProgram::dumpReflection() { if (reflection != nullptr) reflection->dump(); }
//
// I/O mapping implementation.
//
bool TProgram::mapIO(TIoMapResolver* resolver)
bool TProgram::mapIO(TIoMapResolver* pResolver, TIoMapper* pIoMapper)
{
if (! linked || ioMapper)
if (! linked)
return false;
ioMapper = new TIoMapper;
TIoMapper* ioMapper = nullptr;
TIoMapper defaultIOMapper;
if (pIoMapper == nullptr)
ioMapper = &defaultIOMapper;
else
ioMapper = pIoMapper;
for (int s = 0; s < EShLangCount; ++s) {
if (intermediate[s]) {
if (! ioMapper->addStage((EShLanguage)s, *intermediate[s], *infoSink, resolver))
if (! ioMapper->addStage((EShLanguage)s, *intermediate[s], *infoSink, pResolver))
return false;
}
}
return true;
return ioMapper->doMap(pResolver, *infoSink);
}
#endif // GLSLANG_WEB
} // end namespace glslang

View file

@ -61,63 +61,66 @@ void TType::buildMangledName(TString& mangledName) const
switch (basicType) {
case EbtFloat: mangledName += 'f'; break;
case EbtDouble: mangledName += 'd'; break;
case EbtFloat16: mangledName += "f16"; break;
case EbtInt: mangledName += 'i'; break;
case EbtUint: mangledName += 'u'; break;
case EbtBool: mangledName += 'b'; break;
#ifndef GLSLANG_WEB
case EbtDouble: mangledName += 'd'; break;
case EbtFloat16: mangledName += "f16"; break;
case EbtInt8: mangledName += "i8"; break;
case EbtUint8: mangledName += "u8"; break;
case EbtInt16: mangledName += "i16"; break;
case EbtUint16: mangledName += "u16"; break;
case EbtInt64: mangledName += "i64"; break;
case EbtUint64: mangledName += "u64"; break;
case EbtBool: mangledName += 'b'; break;
case EbtAtomicUint: mangledName += "au"; break;
#ifdef NV_EXTENSIONS
case EbtAccStructNV: mangledName += "asnv"; break;
#endif
case EbtSampler:
switch (sampler.type) {
#ifdef AMD_EXTENSIONS
#ifndef GLSLANG_WEB
case EbtFloat16: mangledName += "f16"; break;
#endif
case EbtInt: mangledName += "i"; break;
case EbtUint: mangledName += "u"; break;
default: break; // some compilers want this
}
if (sampler.image)
mangledName += "I"; // a normal image
else if (sampler.sampler)
if (sampler.isImageClass())
mangledName += "I"; // a normal image or subpass
else if (sampler.isPureSampler())
mangledName += "p"; // a "pure" sampler
else if (!sampler.combined)
else if (!sampler.isCombined())
mangledName += "t"; // a "pure" texture
else
mangledName += "s"; // traditional combined sampler
if (sampler.arrayed)
if (sampler.isArrayed())
mangledName += "A";
if (sampler.shadow)
if (sampler.isShadow())
mangledName += "S";
if (sampler.external)
if (sampler.isExternal())
mangledName += "E";
if (sampler.yuv)
if (sampler.isYuv())
mangledName += "Y";
switch (sampler.dim) {
case Esd1D: mangledName += "1"; break;
case Esd2D: mangledName += "2"; break;
case Esd3D: mangledName += "3"; break;
case EsdCube: mangledName += "C"; break;
#ifndef GLSLANG_WEB
case Esd1D: mangledName += "1"; break;
case EsdRect: mangledName += "R2"; break;
case EsdBuffer: mangledName += "B"; break;
case EsdSubpass: mangledName += "P"; break;
#endif
default: break; // some compilers want this
}
#ifdef ENABLE_HLSL
if (sampler.hasReturnStruct()) {
// Name mangle for sampler return struct uses struct table index.
mangledName += "-tx-struct";
char text[16]; // plenty enough space for the small integers.
snprintf(text, sizeof(text), "%d-", sampler.structReturnIndex);
snprintf(text, sizeof(text), "%d-", sampler.getStructReturnIndex());
mangledName += text;
} else {
switch (sampler.getVectorSize()) {
@ -127,8 +130,9 @@ void TType::buildMangledName(TString& mangledName) const
case 4: break; // default to prior name mangle behavior
}
}
#endif
if (sampler.ms)
if (sampler.isMultiSample())
mangledName += "M";
break;
case EbtStruct:
@ -172,44 +176,88 @@ void TType::buildMangledName(TString& mangledName) const
}
}
#ifndef GLSLANG_WEB
//
// Dump functions.
//
void TVariable::dump(TInfoSink& infoSink) const
void TSymbol::dumpExtensions(TInfoSink& infoSink) const
{
infoSink.debug << getName().c_str() << ": " << type.getStorageQualifierString() << " " << type.getBasicTypeString();
if (type.isArray()) {
infoSink.debug << "[0]";
int numExtensions = getNumExtensions();
if (numExtensions) {
infoSink.debug << " <";
for (int i = 0; i < numExtensions; i++)
infoSink.debug << getExtensions()[i] << ",";
infoSink.debug << ">";
}
}
void TVariable::dump(TInfoSink& infoSink, bool complete) const
{
if (complete) {
infoSink.debug << getName().c_str() << ": " << type.getCompleteString();
dumpExtensions(infoSink);
} else {
infoSink.debug << getName().c_str() << ": " << type.getStorageQualifierString() << " "
<< type.getBasicTypeString();
if (type.isArray())
infoSink.debug << "[0]";
}
infoSink.debug << "\n";
}
void TFunction::dump(TInfoSink& infoSink) const
void TFunction::dump(TInfoSink& infoSink, bool complete) const
{
infoSink.debug << getName().c_str() << ": " << returnType.getBasicTypeString() << " " << getMangledName().c_str() << "\n";
if (complete) {
infoSink.debug << getName().c_str() << ": " << returnType.getCompleteString() << " " << getName().c_str()
<< "(";
int numParams = getParamCount();
for (int i = 0; i < numParams; i++) {
const TParameter &param = parameters[i];
infoSink.debug << param.type->getCompleteString() << " "
<< (param.type->isStruct() ? "of " + param.type->getTypeName() + " " : "")
<< (param.name ? *param.name : "") << (i < numParams - 1 ? "," : "");
}
infoSink.debug << ")";
dumpExtensions(infoSink);
} else {
infoSink.debug << getName().c_str() << ": " << returnType.getBasicTypeString() << " "
<< getMangledName().c_str() << "n";
}
infoSink.debug << "\n";
}
void TAnonMember::dump(TInfoSink& TInfoSink) const
void TAnonMember::dump(TInfoSink& TInfoSink, bool) const
{
TInfoSink.debug << "anonymous member " << getMemberNumber() << " of " << getAnonContainer().getName().c_str() << "\n";
TInfoSink.debug << "anonymous member " << getMemberNumber() << " of " << getAnonContainer().getName().c_str()
<< "\n";
}
void TSymbolTableLevel::dump(TInfoSink &infoSink) const
void TSymbolTableLevel::dump(TInfoSink& infoSink, bool complete) const
{
tLevel::const_iterator it;
for (it = level.begin(); it != level.end(); ++it)
(*it).second->dump(infoSink);
(*it).second->dump(infoSink, complete);
}
void TSymbolTable::dump(TInfoSink &infoSink) const
void TSymbolTable::dump(TInfoSink& infoSink, bool complete) const
{
for (int level = currentLevel(); level >= 0; --level) {
infoSink.debug << "LEVEL " << level << "\n";
table[level]->dump(infoSink);
table[level]->dump(infoSink, complete);
}
}
#endif
//
// Functions have buried pointers to delete.
//

View file

@ -116,7 +116,11 @@ public:
}
virtual int getNumExtensions() const { return extensions == nullptr ? 0 : (int)extensions->size(); }
virtual const char** getExtensions() const { return extensions->data(); }
virtual void dump(TInfoSink &infoSink) const = 0;
#ifndef GLSLANG_WEB
virtual void dump(TInfoSink& infoSink, bool complete = false) const = 0;
void dumpExtensions(TInfoSink& infoSink) const;
#endif
virtual bool isReadOnly() const { return ! writable; }
virtual void makeReadOnly() { writable = false; }
@ -192,7 +196,9 @@ public:
}
virtual const char** getMemberExtensions(int member) const { return (*memberExtensions)[member].data(); }
virtual void dump(TInfoSink &infoSink) const;
#ifndef GLSLANG_WEB
virtual void dump(TInfoSink& infoSink, bool complete = false) const;
#endif
protected:
explicit TVariable(const TVariable&);
@ -313,7 +319,9 @@ public:
virtual TParameter& operator[](int i) { assert(writable); return parameters[i]; }
virtual const TParameter& operator[](int i) const { return parameters[i]; }
virtual void dump(TInfoSink &infoSink) const override;
#ifndef GLSLANG_WEB
virtual void dump(TInfoSink& infoSink, bool complete = false) const override;
#endif
protected:
explicit TFunction(const TFunction&);
@ -373,7 +381,9 @@ public:
virtual const char** getExtensions() const override { return anonContainer.getMemberExtensions(memberNumber); }
virtual int getAnonId() const { return anonId; }
virtual void dump(TInfoSink &infoSink) const override;
#ifndef GLSLANG_WEB
virtual void dump(TInfoSink& infoSink, bool complete = false) const override;
#endif
protected:
explicit TAnonMember(const TAnonMember&);
@ -541,7 +551,9 @@ public:
void relateToOperator(const char* name, TOperator op);
void setFunctionExtensions(const char* name, int num, const char* const extensions[]);
void dump(TInfoSink &infoSink) const;
#ifndef GLSLANG_WEB
void dump(TInfoSink& infoSink, bool complete = false) const;
#endif
TSymbolTableLevel* clone() const;
void readOnly();
@ -842,7 +854,9 @@ public:
}
int getMaxSymbolId() { return uniqueId; }
void dump(TInfoSink &infoSink) const;
#ifndef GLSLANG_WEB
void dump(TInfoSink& infoSink, bool complete = false) const;
#endif
void copyTable(const TSymbolTable& copyOf);
void setPreviousDefaultPrecisions(TPrecisionQualifier *p) { table[currentLevel()]->setPreviousDefaultPrecisions(p); }

View file

@ -145,6 +145,8 @@
namespace glslang {
#ifndef GLSLANG_WEB
//
// Initialize all extensions, almost always to 'disable', as once their features
// are incorporated into a core version, their features are supported through allowing that
@ -170,8 +172,10 @@ void TParseVersions::initializeExtensionBehavior()
extensionBehavior[E_GL_ARB_tessellation_shader] = EBhDisable;
extensionBehavior[E_GL_ARB_enhanced_layouts] = EBhDisable;
extensionBehavior[E_GL_ARB_texture_cube_map_array] = EBhDisable;
extensionBehavior[E_GL_ARB_texture_multisample] = EBhDisable;
extensionBehavior[E_GL_ARB_shader_texture_lod] = EBhDisable;
extensionBehavior[E_GL_ARB_explicit_attrib_location] = EBhDisable;
extensionBehavior[E_GL_ARB_explicit_uniform_location] = EBhDisable;
extensionBehavior[E_GL_ARB_shader_image_load_store] = EBhDisable;
extensionBehavior[E_GL_ARB_shader_atomic_counters] = EBhDisable;
extensionBehavior[E_GL_ARB_shader_draw_parameters] = EBhDisable;
@ -187,6 +191,10 @@ void TParseVersions::initializeExtensionBehavior()
// extensionBehavior[E_GL_ARB_cull_distance] = EBhDisable; // present for 4.5, but need extension control over block members
extensionBehavior[E_GL_ARB_post_depth_coverage] = EBhDisable;
extensionBehavior[E_GL_ARB_shader_viewport_layer_array] = EBhDisable;
extensionBehavior[E_GL_ARB_fragment_shader_interlock] = EBhDisable;
extensionBehavior[E_GL_ARB_shader_clock] = EBhDisable;
extensionBehavior[E_GL_ARB_uniform_buffer_object] = EBhDisable;
extensionBehavior[E_GL_ARB_sample_shading] = EBhDisable;
extensionBehavior[E_GL_KHR_shader_subgroup_basic] = EBhDisable;
extensionBehavior[E_GL_KHR_shader_subgroup_vote] = EBhDisable;
@ -209,6 +217,9 @@ void TParseVersions::initializeExtensionBehavior()
extensionBehavior[E_GL_EXT_scalar_block_layout] = EBhDisable;
extensionBehavior[E_GL_EXT_fragment_invocation_density] = EBhDisable;
extensionBehavior[E_GL_EXT_buffer_reference] = EBhDisable;
extensionBehavior[E_GL_EXT_buffer_reference2] = EBhDisable;
extensionBehavior[E_GL_EXT_buffer_reference_uvec2] = EBhDisable;
extensionBehavior[E_GL_EXT_demote_to_helper_invocation] = EBhDisable;
extensionBehavior[E_GL_EXT_shader_16bit_storage] = EBhDisable;
extensionBehavior[E_GL_EXT_shader_8bit_storage] = EBhDisable;
@ -217,7 +228,6 @@ void TParseVersions::initializeExtensionBehavior()
extensionBehavior[E_GL_GOOGLE_cpp_style_line_directive] = EBhDisable;
extensionBehavior[E_GL_GOOGLE_include_directive] = EBhDisable;
#ifdef AMD_EXTENSIONS
extensionBehavior[E_GL_AMD_shader_ballot] = EBhDisable;
extensionBehavior[E_GL_AMD_shader_trinary_minmax] = EBhDisable;
extensionBehavior[E_GL_AMD_shader_explicit_vertex_parameter] = EBhDisable;
@ -228,9 +238,9 @@ void TParseVersions::initializeExtensionBehavior()
extensionBehavior[E_GL_AMD_shader_image_load_store_lod] = EBhDisable;
extensionBehavior[E_GL_AMD_shader_fragment_mask] = EBhDisable;
extensionBehavior[E_GL_AMD_gpu_shader_half_float_fetch] = EBhDisable;
#endif
#ifdef NV_EXTENSIONS
extensionBehavior[E_GL_INTEL_shader_integer_functions2] = EBhDisable;
extensionBehavior[E_GL_NV_sample_mask_override_coverage] = EBhDisable;
extensionBehavior[E_SPV_NV_geometry_shader_passthrough] = EBhDisable;
extensionBehavior[E_GL_NV_viewport_array2] = EBhDisable;
@ -246,7 +256,10 @@ void TParseVersions::initializeExtensionBehavior()
extensionBehavior[E_GL_NV_compute_shader_derivatives] = EBhDisable;
extensionBehavior[E_GL_NV_shader_texture_footprint] = EBhDisable;
extensionBehavior[E_GL_NV_mesh_shader] = EBhDisable;
#endif
extensionBehavior[E_GL_NV_cooperative_matrix] = EBhDisable;
extensionBehavior[E_GL_NV_shader_sm_builtins] = EBhDisable;
extensionBehavior[E_GL_NV_integer_cooperative_matrix] = EBhDisable;
// AEP
extensionBehavior[E_GL_ANDROID_extension_pack_es31a] = EBhDisable;
@ -279,6 +292,7 @@ void TParseVersions::initializeExtensionBehavior()
// EXT extensions
extensionBehavior[E_GL_EXT_device_group] = EBhDisable;
extensionBehavior[E_GL_EXT_multiview] = EBhDisable;
extensionBehavior[E_GL_EXT_shader_realtime_clock] = EBhDisable;
// OVR extensions
extensionBehavior[E_GL_OVR_multiview] = EBhDisable;
@ -293,16 +307,26 @@ void TParseVersions::initializeExtensionBehavior()
extensionBehavior[E_GL_EXT_shader_explicit_arithmetic_types_float16] = EBhDisable;
extensionBehavior[E_GL_EXT_shader_explicit_arithmetic_types_float32] = EBhDisable;
extensionBehavior[E_GL_EXT_shader_explicit_arithmetic_types_float64] = EBhDisable;
// subgroup extended types
extensionBehavior[E_GL_EXT_shader_subgroup_extended_types_int8] = EBhDisable;
extensionBehavior[E_GL_EXT_shader_subgroup_extended_types_int16] = EBhDisable;
extensionBehavior[E_GL_EXT_shader_subgroup_extended_types_int64] = EBhDisable;
extensionBehavior[E_GL_EXT_shader_subgroup_extended_types_float16] = EBhDisable;
}
#endif // GLSLANG_WEB
// Get code that is not part of a shared symbol table, is specific to this shader,
// or needed by the preprocessor (which does not use a shared symbol table).
void TParseVersions::getPreamble(std::string& preamble)
{
if (profile == EEsProfile) {
if (isEsProfile()) {
preamble =
"#define GL_ES 1\n"
"#define GL_FRAGMENT_PRECISION_HIGH 1\n"
#ifdef GLSLANG_WEB
;
#else
"#define GL_OES_texture_3D 1\n"
"#define GL_OES_standard_derivatives 1\n"
"#define GL_EXT_frag_depth 1\n"
@ -342,11 +366,9 @@ void TParseVersions::getPreamble(std::string& preamble)
"#define GL_EXT_shader_non_constant_global_initializers 1\n"
;
#ifdef NV_EXTENSIONS
if (profile == EEsProfile && version >= 300) {
if (isEsProfile() && version >= 300) {
preamble += "#define GL_NV_shader_noperspective_interpolation 1\n";
}
#endif
} else {
preamble =
@ -360,8 +382,10 @@ void TParseVersions::getPreamble(std::string& preamble)
"#define GL_ARB_tessellation_shader 1\n"
"#define GL_ARB_enhanced_layouts 1\n"
"#define GL_ARB_texture_cube_map_array 1\n"
"#define GL_ARB_texture_multisample 1\n"
"#define GL_ARB_shader_texture_lod 1\n"
"#define GL_ARB_explicit_attrib_location 1\n"
"#define GL_ARB_explicit_uniform_location 1\n"
"#define GL_ARB_shader_image_load_store 1\n"
"#define GL_ARB_shader_atomic_counters 1\n"
"#define GL_ARB_shader_draw_parameters 1\n"
@ -374,8 +398,11 @@ void TParseVersions::getPreamble(std::string& preamble)
"#define GL_ARB_sparse_texture2 1\n"
"#define GL_ARB_sparse_texture_clamp 1\n"
"#define GL_ARB_shader_stencil_export 1\n"
"#define GL_ARB_sample_shading 1\n"
// "#define GL_ARB_cull_distance 1\n" // present for 4.5, but need extension control over block members
"#define GL_ARB_post_depth_coverage 1\n"
"#define GL_ARB_fragment_shader_interlock 1\n"
"#define GL_ARB_uniform_buffer_object 1\n"
"#define GL_EXT_shader_non_constant_global_initializers 1\n"
"#define GL_EXT_shader_image_load_formatted 1\n"
"#define GL_EXT_post_depth_coverage 1\n"
@ -387,6 +414,9 @@ void TParseVersions::getPreamble(std::string& preamble)
"#define GL_EXT_scalar_block_layout 1\n"
"#define GL_EXT_fragment_invocation_density 1\n"
"#define GL_EXT_buffer_reference 1\n"
"#define GL_EXT_buffer_reference2 1\n"
"#define GL_EXT_buffer_reference_uvec2 1\n"
"#define GL_EXT_demote_to_helper_invocation 1\n"
// GL_KHR_shader_subgroup
"#define GL_KHR_shader_subgroup_basic 1\n"
@ -399,8 +429,8 @@ void TParseVersions::getPreamble(std::string& preamble)
"#define GL_KHR_shader_subgroup_quad 1\n"
"#define E_GL_EXT_shader_atomic_int64 1\n"
"#define E_GL_EXT_shader_realtime_clock 1\n"
#ifdef AMD_EXTENSIONS
"#define GL_AMD_shader_ballot 1\n"
"#define GL_AMD_shader_trinary_minmax 1\n"
"#define GL_AMD_shader_explicit_vertex_parameter 1\n"
@ -411,9 +441,9 @@ void TParseVersions::getPreamble(std::string& preamble)
"#define GL_AMD_shader_image_load_store_lod 1\n"
"#define GL_AMD_shader_fragment_mask 1\n"
"#define GL_AMD_gpu_shader_half_float_fetch 1\n"
#endif
#ifdef NV_EXTENSIONS
"#define GL_INTEL_shader_integer_functions2 1\n"
"#define GL_NV_sample_mask_override_coverage 1\n"
"#define GL_NV_geometry_shader_passthrough 1\n"
"#define GL_NV_viewport_array2 1\n"
@ -426,7 +456,9 @@ void TParseVersions::getPreamble(std::string& preamble)
"#define GL_NV_compute_shader_derivatives 1\n"
"#define GL_NV_shader_texture_footprint 1\n"
"#define GL_NV_mesh_shader 1\n"
#endif
"#define GL_NV_cooperative_matrix 1\n"
"#define GL_NV_integer_cooperative_matrix 1\n"
"#define GL_EXT_shader_explicit_arithmetic_types 1\n"
"#define GL_EXT_shader_explicit_arithmetic_types_int8 1\n"
"#define GL_EXT_shader_explicit_arithmetic_types_int16 1\n"
@ -435,6 +467,11 @@ void TParseVersions::getPreamble(std::string& preamble)
"#define GL_EXT_shader_explicit_arithmetic_types_float16 1\n"
"#define GL_EXT_shader_explicit_arithmetic_types_float32 1\n"
"#define GL_EXT_shader_explicit_arithmetic_types_float64 1\n"
"#define GL_EXT_shader_subgroup_extended_types_int8 1\n"
"#define GL_EXT_shader_subgroup_extended_types_int16 1\n"
"#define GL_EXT_shader_subgroup_extended_types_int64 1\n"
"#define GL_EXT_shader_subgroup_extended_types_float16 1\n"
;
if (version >= 150) {
@ -444,13 +481,16 @@ void TParseVersions::getPreamble(std::string& preamble)
if (profile == ECompatibilityProfile)
preamble += "#define GL_compatibility_profile 1\n";
}
#endif // GLSLANG_WEB
}
if ((profile != EEsProfile && version >= 140) ||
(profile == EEsProfile && version >= 310)) {
#ifndef GLSLANG_WEB
if ((!isEsProfile() && version >= 140) ||
(isEsProfile() && version >= 310)) {
preamble +=
"#define GL_EXT_device_group 1\n"
"#define GL_EXT_multiview 1\n"
"#define GL_NV_shader_sm_builtins 1\n"
;
}
@ -466,6 +506,7 @@ void TParseVersions::getPreamble(std::string& preamble)
"#define GL_GOOGLE_cpp_style_line_directive 1\n"
"#define GL_GOOGLE_include_directive 1\n"
;
#endif
// #define VULKAN XXXX
const int numberBufSize = 12;
@ -476,6 +517,8 @@ void TParseVersions::getPreamble(std::string& preamble)
preamble += numberBuf;
preamble += "\n";
}
#ifndef GLSLANG_WEB
// #define GL_SPIRV XXXX
if (spvVersion.openGl > 0) {
preamble += "#define GL_SPIRV ";
@ -483,22 +526,7 @@ void TParseVersions::getPreamble(std::string& preamble)
preamble += numberBuf;
preamble += "\n";
}
}
//
// When to use requireProfile():
//
// Use if only some profiles support a feature. However, if within a profile the feature
// is version or extension specific, follow this call with calls to profileRequires().
//
// Operation: If the current profile is not one of the profileMask,
// give an error message.
//
void TParseVersions::requireProfile(const TSourceLoc& loc, int profileMask, const char* featureDesc)
{
if (! (profile & profileMask))
error(loc, "not supported with this profile:", featureDesc, ProfileName(profile));
#endif
}
//
@ -508,12 +536,12 @@ const char* StageName(EShLanguage stage)
{
switch(stage) {
case EShLangVertex: return "vertex";
case EShLangFragment: return "fragment";
case EShLangCompute: return "compute";
#ifndef GLSLANG_WEB
case EShLangTessControl: return "tessellation control";
case EShLangTessEvaluation: return "tessellation evaluation";
case EShLangGeometry: return "geometry";
case EShLangFragment: return "fragment";
case EShLangCompute: return "compute";
#ifdef NV_EXTENSIONS
case EShLangRayGenNV: return "ray-generation";
case EShLangIntersectNV: return "intersection";
case EShLangAnyHitNV: return "any-hit";
@ -527,53 +555,6 @@ const char* StageName(EShLanguage stage)
}
}
//
// When to use profileRequires():
//
// If a set of profiles have the same requirements for what version or extensions
// are needed to support a feature.
//
// It must be called for each profile that needs protection. Use requireProfile() first
// to reduce that set of profiles.
//
// Operation: Will issue warnings/errors based on the current profile, version, and extension
// behaviors. It only checks extensions when the current profile is one of the profileMask.
//
// A minVersion of 0 means no version of the profileMask support this in core,
// the extension must be present.
//
// entry point that takes multiple extensions
void TParseVersions::profileRequires(const TSourceLoc& loc, int profileMask, int minVersion, int numExtensions, const char* const extensions[], const char* featureDesc)
{
if (profile & profileMask) {
bool okay = false;
if (minVersion > 0 && version >= minVersion)
okay = true;
for (int i = 0; i < numExtensions; ++i) {
switch (getExtensionBehavior(extensions[i])) {
case EBhWarn:
infoSink.info.message(EPrefixWarning, ("extension " + TString(extensions[i]) + " is being used for " + featureDesc).c_str(), loc);
// fall through
case EBhRequire:
case EBhEnable:
okay = true;
break;
default: break; // some compilers want this
}
}
if (! okay)
error(loc, "not supported for this version or the enabled extensions", featureDesc, "");
}
}
// entry point for the above that takes a single extension
void TParseVersions::profileRequires(const TSourceLoc& loc, int profileMask, int minVersion, const char* extension, const char* featureDesc)
{
profileRequires(loc, profileMask, minVersion, extension ? 1 : 0, &extension, featureDesc);
}
//
// When to use requireStage()
//
@ -594,6 +575,75 @@ void TParseVersions::requireStage(const TSourceLoc& loc, EShLanguage stage, cons
requireStage(loc, static_cast<EShLanguageMask>(1 << stage), featureDesc);
}
#ifndef GLSLANG_WEB
//
// When to use requireProfile():
//
// Use if only some profiles support a feature. However, if within a profile the feature
// is version or extension specific, follow this call with calls to profileRequires().
//
// Operation: If the current profile is not one of the profileMask,
// give an error message.
//
void TParseVersions::requireProfile(const TSourceLoc& loc, int profileMask, const char* featureDesc)
{
if (! (profile & profileMask))
error(loc, "not supported with this profile:", featureDesc, ProfileName(profile));
}
//
// When to use profileRequires():
//
// If a set of profiles have the same requirements for what version or extensions
// are needed to support a feature.
//
// It must be called for each profile that needs protection. Use requireProfile() first
// to reduce that set of profiles.
//
// Operation: Will issue warnings/errors based on the current profile, version, and extension
// behaviors. It only checks extensions when the current profile is one of the profileMask.
//
// A minVersion of 0 means no version of the profileMask support this in core,
// the extension must be present.
//
// entry point that takes multiple extensions
void TParseVersions::profileRequires(const TSourceLoc& loc, int profileMask, int minVersion, int numExtensions,
const char* const extensions[], const char* featureDesc)
{
if (profile & profileMask) {
bool okay = minVersion > 0 && version >= minVersion;
#ifndef GLSLANG_WEB
for (int i = 0; i < numExtensions; ++i) {
switch (getExtensionBehavior(extensions[i])) {
case EBhWarn:
infoSink.info.message(EPrefixWarning, ("extension " + TString(extensions[i]) + " is being used for " + featureDesc).c_str(), loc);
// fall through
case EBhRequire:
case EBhEnable:
okay = true;
break;
default: break; // some compilers want this
}
}
#endif
if (! okay)
error(loc, "not supported for this version or the enabled extensions", featureDesc, "");
}
}
// entry point for the above that takes a single extension
void TParseVersions::profileRequires(const TSourceLoc& loc, int profileMask, int minVersion, const char* extension,
const char* featureDesc)
{
profileRequires(loc, profileMask, minVersion, extension ? 1 : 0, &extension, featureDesc);
}
void TParseVersions::unimplemented(const TSourceLoc& loc, const char* featureDesc)
{
error(loc, "feature not yet implemented", featureDesc, "");
}
//
// Within a set of profiles, see if a feature is deprecated and give an error or warning based on whether
// a future compatibility context is being use.
@ -627,11 +677,6 @@ void TParseVersions::requireNotRemoved(const TSourceLoc& loc, int profileMask, i
}
}
void TParseVersions::unimplemented(const TSourceLoc& loc, const char* featureDesc)
{
error(loc, "feature not yet implemented", featureDesc, "");
}
// Returns true if at least one of the extensions in the extensions parameter is requested. Otherwise, returns false.
// Warns appropriately if the requested behavior of an extension is "warn".
bool TParseVersions::checkExtensionsRequested(const TSourceLoc& loc, int numExtensions, const char* const extensions[], const char* featureDesc)
@ -800,10 +845,22 @@ void TParseVersions::updateExtensionBehavior(int line, const char* extension, co
updateExtensionBehavior(line, "GL_KHR_shader_subgroup_basic", behaviorString);
else if (strcmp(extension, "GL_KHR_shader_subgroup_quad") == 0)
updateExtensionBehavior(line, "GL_KHR_shader_subgroup_basic", behaviorString);
#ifdef NV_EXTENSIONS
else if (strcmp(extension, "GL_NV_shader_subgroup_partitioned") == 0)
updateExtensionBehavior(line, "GL_KHR_shader_subgroup_basic", behaviorString);
#endif
else if (strcmp(extension, "GL_EXT_buffer_reference2") == 0 ||
strcmp(extension, "GL_EXT_buffer_reference_uvec2") == 0)
updateExtensionBehavior(line, "GL_EXT_buffer_reference", behaviorString);
else if (strcmp(extension, "GL_NV_integer_cooperative_matrix") == 0)
updateExtensionBehavior(line, "GL_NV_cooperative_matrix", behaviorString);
// subgroup extended types to explicit types
else if (strcmp(extension, "GL_EXT_shader_subgroup_extended_types_int8") == 0)
updateExtensionBehavior(line, "GL_EXT_shader_explicit_arithmetic_types_int8", behaviorString);
else if (strcmp(extension, "GL_EXT_shader_subgroup_extended_types_int16") == 0)
updateExtensionBehavior(line, "GL_EXT_shader_explicit_arithmetic_types_int16", behaviorString);
else if (strcmp(extension, "GL_EXT_shader_subgroup_extended_types_int64") == 0)
updateExtensionBehavior(line, "GL_EXT_shader_explicit_arithmetic_types_int64", behaviorString);
else if (strcmp(extension, "GL_EXT_shader_subgroup_extended_types_float16") == 0)
updateExtensionBehavior(line, "GL_EXT_shader_explicit_arithmetic_types_float16", behaviorString);
}
void TParseVersions::updateExtensionBehavior(const char* extension, TExtensionBehavior behavior)
@ -849,7 +906,6 @@ void TParseVersions::updateExtensionBehavior(const char* extension, TExtensionBe
// Check if extension is used with correct shader stage.
void TParseVersions::checkExtensionStage(const TSourceLoc& loc, const char * const extension)
{
#ifdef NV_EXTENSIONS
// GL_NV_mesh_shader extension is only allowed in task/mesh shaders
if (strcmp(extension, "GL_NV_mesh_shader") == 0) {
requireStage(loc, (EShLanguageMask)(EShLangTaskNVMask | EShLangMeshNVMask | EShLangFragmentMask),
@ -857,7 +913,6 @@ void TParseVersions::checkExtensionStage(const TSourceLoc& loc, const char * con
profileRequires(loc, ECoreProfile, 450, 0, "#extension GL_NV_mesh_shader");
profileRequires(loc, EEsProfile, 320, 0, "#extension GL_NV_mesh_shader");
}
#endif
}
// Call for any operation needing full GLSL integer data-type support.
@ -879,9 +934,7 @@ void TParseVersions::float16Check(const TSourceLoc& loc, const char* op, bool bu
{
if (!builtIn) {
const char* const extensions[] = {
#if AMD_EXTENSIONS
E_GL_AMD_gpu_shader_half_float,
#endif
E_GL_EXT_shader_explicit_arithmetic_types,
E_GL_EXT_shader_explicit_arithmetic_types_float16};
requireExtensions(loc, sizeof(extensions)/sizeof(extensions[0]), extensions, op);
@ -891,9 +944,7 @@ void TParseVersions::float16Check(const TSourceLoc& loc, const char* op, bool bu
bool TParseVersions::float16Arithmetic()
{
const char* const extensions[] = {
#if AMD_EXTENSIONS
E_GL_AMD_gpu_shader_half_float,
#endif
E_GL_EXT_shader_explicit_arithmetic_types,
E_GL_EXT_shader_explicit_arithmetic_types_float16};
return extensionsTurnedOn(sizeof(extensions)/sizeof(extensions[0]), extensions);
@ -902,9 +953,7 @@ bool TParseVersions::float16Arithmetic()
bool TParseVersions::int16Arithmetic()
{
const char* const extensions[] = {
#if AMD_EXTENSIONS
E_GL_AMD_gpu_shader_int16,
#endif
E_GL_EXT_shader_explicit_arithmetic_types,
E_GL_EXT_shader_explicit_arithmetic_types_int16};
return extensionsTurnedOn(sizeof(extensions)/sizeof(extensions[0]), extensions);
@ -926,9 +975,7 @@ void TParseVersions::requireFloat16Arithmetic(const TSourceLoc& loc, const char*
combined += featureDesc;
const char* const extensions[] = {
#if AMD_EXTENSIONS
E_GL_AMD_gpu_shader_half_float,
#endif
E_GL_EXT_shader_explicit_arithmetic_types,
E_GL_EXT_shader_explicit_arithmetic_types_float16};
requireExtensions(loc, sizeof(extensions)/sizeof(extensions[0]), extensions, combined.c_str());
@ -942,9 +989,7 @@ void TParseVersions::requireInt16Arithmetic(const TSourceLoc& loc, const char* o
combined += featureDesc;
const char* const extensions[] = {
#if AMD_EXTENSIONS
E_GL_AMD_gpu_shader_int16,
#endif
E_GL_EXT_shader_explicit_arithmetic_types,
E_GL_EXT_shader_explicit_arithmetic_types_int16};
requireExtensions(loc, sizeof(extensions)/sizeof(extensions[0]), extensions, combined.c_str());
@ -967,9 +1012,7 @@ void TParseVersions::float16ScalarVectorCheck(const TSourceLoc& loc, const char*
{
if (!builtIn) {
const char* const extensions[] = {
#if AMD_EXTENSIONS
E_GL_AMD_gpu_shader_half_float,
#endif
E_GL_EXT_shader_16bit_storage,
E_GL_EXT_shader_explicit_arithmetic_types,
E_GL_EXT_shader_explicit_arithmetic_types_float16};
@ -1009,7 +1052,6 @@ void TParseVersions::explicitInt8Check(const TSourceLoc& loc, const char* op, bo
}
}
#ifdef AMD_EXTENSIONS
// Call for any operation needing GLSL float16 opaque-type support
void TParseVersions::float16OpaqueCheck(const TSourceLoc& loc, const char* op, bool builtIn)
{
@ -1019,16 +1061,13 @@ void TParseVersions::float16OpaqueCheck(const TSourceLoc& loc, const char* op, b
profileRequires(loc, ECoreProfile | ECompatibilityProfile, 400, nullptr, op);
}
}
#endif
// Call for any operation needing GLSL explicit int16 data-type support.
void TParseVersions::explicitInt16Check(const TSourceLoc& loc, const char* op, bool builtIn)
{
if (! builtIn) {
const char* const extensions[] = {
#if AMD_EXTENSIONS
const char* const extensions[] = {
E_GL_AMD_gpu_shader_int16,
#endif
E_GL_EXT_shader_explicit_arithmetic_types,
E_GL_EXT_shader_explicit_arithmetic_types_int16};
requireExtensions(loc, sizeof(extensions)/sizeof(extensions[0]), extensions, op);
@ -1039,9 +1078,7 @@ void TParseVersions::int16ScalarVectorCheck(const TSourceLoc& loc, const char* o
{
if (! builtIn) {
const char* const extensions[] = {
#if AMD_EXTENSIONS
E_GL_AMD_gpu_shader_int16,
#endif
E_GL_EXT_shader_16bit_storage,
E_GL_EXT_shader_explicit_arithmetic_types,
E_GL_EXT_shader_explicit_arithmetic_types_int16};
@ -1083,6 +1120,22 @@ void TParseVersions::int64Check(const TSourceLoc& loc, const char* op, bool buil
}
}
void TParseVersions::fcoopmatCheck(const TSourceLoc& loc, const char* op, bool builtIn)
{
if (!builtIn) {
const char* const extensions[] = {E_GL_NV_cooperative_matrix};
requireExtensions(loc, sizeof(extensions)/sizeof(extensions[0]), extensions, op);
}
}
void TParseVersions::intcoopmatCheck(const TSourceLoc& loc, const char* op, bool builtIn)
{
if (!builtIn) {
const char* const extensions[] = {E_GL_NV_integer_cooperative_matrix};
requireExtensions(loc, sizeof(extensions)/sizeof(extensions[0]), extensions, op);
}
}
#endif // GLSLANG_WEB
// Call for any operation removed because SPIR-V is in use.
void TParseVersions::spvRemoved(const TSourceLoc& loc, const char* op)
{
@ -1100,15 +1153,19 @@ void TParseVersions::vulkanRemoved(const TSourceLoc& loc, const char* op)
// Call for any operation that requires Vulkan.
void TParseVersions::requireVulkan(const TSourceLoc& loc, const char* op)
{
#ifndef GLSLANG_WEB
if (spvVersion.vulkan == 0)
error(loc, "only allowed when using GLSL for Vulkan", op, "");
#endif
}
// Call for any operation that requires SPIR-V.
void TParseVersions::requireSpv(const TSourceLoc& loc, const char* op)
{
#ifndef GLSLANG_WEB
if (spvVersion.spv == 0)
error(loc, "only allowed when generating SPIR-V", op, "");
#endif
}
} // end namespace glslang

View file

@ -124,8 +124,10 @@ const char* const E_GL_ARB_compute_shader = "GL_ARB_compute_shader
const char* const E_GL_ARB_tessellation_shader = "GL_ARB_tessellation_shader";
const char* const E_GL_ARB_enhanced_layouts = "GL_ARB_enhanced_layouts";
const char* const E_GL_ARB_texture_cube_map_array = "GL_ARB_texture_cube_map_array";
const char* const E_GL_ARB_texture_multisample = "GL_ARB_texture_multisample";
const char* const E_GL_ARB_shader_texture_lod = "GL_ARB_shader_texture_lod";
const char* const E_GL_ARB_explicit_attrib_location = "GL_ARB_explicit_attrib_location";
const char* const E_GL_ARB_explicit_uniform_location = "GL_ARB_explicit_uniform_location";
const char* const E_GL_ARB_shader_image_load_store = "GL_ARB_shader_image_load_store";
const char* const E_GL_ARB_shader_atomic_counters = "GL_ARB_shader_atomic_counters";
const char* const E_GL_ARB_shader_draw_parameters = "GL_ARB_shader_draw_parameters";
@ -141,6 +143,10 @@ const char* const E_GL_ARB_shader_stencil_export = "GL_ARB_shader_stencil
// const char* const E_GL_ARB_cull_distance = "GL_ARB_cull_distance"; // present for 4.5, but need extension control over block members
const char* const E_GL_ARB_post_depth_coverage = "GL_ARB_post_depth_coverage";
const char* const E_GL_ARB_shader_viewport_layer_array = "GL_ARB_shader_viewport_layer_array";
const char* const E_GL_ARB_fragment_shader_interlock = "GL_ARB_fragment_shader_interlock";
const char* const E_GL_ARB_shader_clock = "GL_ARB_shader_clock";
const char* const E_GL_ARB_uniform_buffer_object = "GL_ARB_uniform_buffer_object";
const char* const E_GL_ARB_sample_shading = "GL_ARB_sample_shading";
const char* const E_GL_KHR_shader_subgroup_basic = "GL_KHR_shader_subgroup_basic";
const char* const E_GL_KHR_shader_subgroup_vote = "GL_KHR_shader_subgroup_vote";
@ -171,6 +177,10 @@ const char* const E_GL_EXT_samplerless_texture_functions = "GL_EXT_samplerles
const char* const E_GL_EXT_scalar_block_layout = "GL_EXT_scalar_block_layout";
const char* const E_GL_EXT_fragment_invocation_density = "GL_EXT_fragment_invocation_density";
const char* const E_GL_EXT_buffer_reference = "GL_EXT_buffer_reference";
const char* const E_GL_EXT_buffer_reference2 = "GL_EXT_buffer_reference2";
const char* const E_GL_EXT_buffer_reference_uvec2 = "GL_EXT_buffer_reference_uvec2";
const char* const E_GL_EXT_demote_to_helper_invocation = "GL_EXT_demote_to_helper_invocation";
const char* const E_GL_EXT_shader_realtime_clock = "GL_EXT_shader_realtime_clock";
// Arrays of extensions for the above viewportEXTs duplications
@ -188,7 +198,6 @@ const int Num_OVR_multiview_EXTs = sizeof(OVR_multiview_EXTs) / sizeof(OVR_multi
const char* const E_GL_GOOGLE_cpp_style_line_directive = "GL_GOOGLE_cpp_style_line_directive";
const char* const E_GL_GOOGLE_include_directive = "GL_GOOGLE_include_directive";
#ifdef AMD_EXTENSIONS
const char* const E_GL_AMD_shader_ballot = "GL_AMD_shader_ballot";
const char* const E_GL_AMD_shader_trinary_minmax = "GL_AMD_shader_trinary_minmax";
const char* const E_GL_AMD_shader_explicit_vertex_parameter = "GL_AMD_shader_explicit_vertex_parameter";
@ -199,9 +208,8 @@ const char* const E_GL_AMD_gpu_shader_int16 = "GL_AMD_gpu_sh
const char* const E_GL_AMD_shader_image_load_store_lod = "GL_AMD_shader_image_load_store_lod";
const char* const E_GL_AMD_shader_fragment_mask = "GL_AMD_shader_fragment_mask";
const char* const E_GL_AMD_gpu_shader_half_float_fetch = "GL_AMD_gpu_shader_half_float_fetch";
#endif
#ifdef NV_EXTENSIONS
const char* const E_GL_INTEL_shader_integer_functions2 = "GL_INTEL_shader_integer_functions2";
const char* const E_GL_NV_sample_mask_override_coverage = "GL_NV_sample_mask_override_coverage";
const char* const E_SPV_NV_geometry_shader_passthrough = "GL_NV_geometry_shader_passthrough";
@ -223,7 +231,10 @@ const char* const E_GL_NV_mesh_shader = "GL_NV_mesh_sh
const char* const viewportEXTs[] = { E_GL_ARB_shader_viewport_layer_array, E_GL_NV_viewport_array2 };
const int Num_viewportEXTs = sizeof(viewportEXTs) / sizeof(viewportEXTs[0]);
#endif
const char* const E_GL_NV_cooperative_matrix = "GL_NV_cooperative_matrix";
const char* const E_GL_NV_shader_sm_builtins = "GL_NV_shader_sm_builtins";
const char* const E_GL_NV_integer_cooperative_matrix = "GL_NV_integer_cooperative_matrix";
// AEP
const char* const E_GL_ANDROID_extension_pack_es31a = "GL_ANDROID_extension_pack_es31a";
@ -253,7 +264,7 @@ const char* const E_GL_OES_tessellation_point_size = "GL_OES_tessel
const char* const E_GL_OES_texture_buffer = "GL_OES_texture_buffer";
const char* const E_GL_OES_texture_cube_map_array = "GL_OES_texture_cube_map_array";
// KHX
// EXT
const char* const E_GL_EXT_shader_explicit_arithmetic_types = "GL_EXT_shader_explicit_arithmetic_types";
const char* const E_GL_EXT_shader_explicit_arithmetic_types_int8 = "GL_EXT_shader_explicit_arithmetic_types_int8";
const char* const E_GL_EXT_shader_explicit_arithmetic_types_int16 = "GL_EXT_shader_explicit_arithmetic_types_int16";
@ -263,6 +274,11 @@ const char* const E_GL_EXT_shader_explicit_arithmetic_types_float16 = "GL_EXT_s
const char* const E_GL_EXT_shader_explicit_arithmetic_types_float32 = "GL_EXT_shader_explicit_arithmetic_types_float32";
const char* const E_GL_EXT_shader_explicit_arithmetic_types_float64 = "GL_EXT_shader_explicit_arithmetic_types_float64";
const char* const E_GL_EXT_shader_subgroup_extended_types_int8 = "GL_EXT_shader_subgroup_extended_types_int8";
const char* const E_GL_EXT_shader_subgroup_extended_types_int16 = "GL_EXT_shader_subgroup_extended_types_int16";
const char* const E_GL_EXT_shader_subgroup_extended_types_int64 = "GL_EXT_shader_subgroup_extended_types_int64";
const char* const E_GL_EXT_shader_subgroup_extended_types_float16 = "GL_EXT_shader_subgroup_extended_types_float16";
// Arrays of extensions for the above AEP duplications
const char* const AEP_geometry_shader[] = { E_GL_EXT_geometry_shader, E_GL_OES_geometry_shader };

View file

@ -34,6 +34,8 @@
// POSSIBILITY OF SUCH DAMAGE.
//
#ifndef GLSLANG_WEB
#include "attribute.h"
#include "../Include/intermediate.h"
#include "ParseHelper.h"
@ -52,6 +54,7 @@ bool TAttributeArgs::getInt(int& value, int argNum) const
return true;
}
// extract strings out of attribute arguments stored in attribute aggregate.
// convert to lower case if converToLower is true (for case-insensitive compare convenience)
bool TAttributeArgs::getString(TString& value, int argNum, bool convertToLower) const
@ -85,6 +88,9 @@ const TConstUnion* TAttributeArgs::getConstUnion(TBasicType basicType, int argNu
if (argNum >= (int)args->getSequence().size())
return nullptr;
if (args->getSequence()[argNum]->getAsConstantUnion() == nullptr)
return nullptr;
const TConstUnion* constVal = &args->getSequence()[argNum]->getAsConstantUnion()->getConstArray()[0];
if (constVal == nullptr || constVal->getType() != basicType)
return nullptr;
@ -107,6 +113,16 @@ TAttributeType TParseContext::attributeFromName(const TString& name) const
return EatDependencyInfinite;
else if (name == "dependency_length")
return EatDependencyLength;
else if (name == "min_iterations")
return EatMinIterations;
else if (name == "max_iterations")
return EatMaxIterations;
else if (name == "iteration_multiple")
return EatIterationMultiple;
else if (name == "peel_count")
return EatPeelCount;
else if (name == "partial_count")
return EatPartialCount;
else
return EatNone;
}
@ -222,29 +238,101 @@ void TParseContext::handleLoopAttributes(const TAttributes& attributes, TIntermN
}
for (auto it = attributes.begin(); it != attributes.end(); ++it) {
if (it->name != EatDependencyLength && it->size() > 0) {
warn(node->getLoc(), "attribute with arguments not recognized, skipping", "", "");
continue;
}
int value;
const auto noArgument = [&](const char* feature) {
if (it->size() > 0) {
warn(node->getLoc(), "expected no arguments", feature, "");
return false;
}
return true;
};
const auto positiveSignedArgument = [&](const char* feature, int& value) {
if (it->size() == 1 && it->getInt(value)) {
if (value <= 0) {
error(node->getLoc(), "must be positive", feature, "");
return false;
}
} else {
warn(node->getLoc(), "expected a single integer argument", feature, "");
return false;
}
return true;
};
const auto unsignedArgument = [&](const char* feature, unsigned int& uiValue) {
int value;
if (!(it->size() == 1 && it->getInt(value))) {
warn(node->getLoc(), "expected a single integer argument", feature, "");
return false;
}
uiValue = (unsigned int)value;
return true;
};
const auto positiveUnsignedArgument = [&](const char* feature, unsigned int& uiValue) {
int value;
if (it->size() == 1 && it->getInt(value)) {
if (value == 0) {
error(node->getLoc(), "must be greater than or equal to 1", feature, "");
return false;
}
} else {
warn(node->getLoc(), "expected a single integer argument", feature, "");
return false;
}
uiValue = (unsigned int)value;
return true;
};
const auto spirv14 = [&](const char* feature) {
if (spvVersion.spv > 0 && spvVersion.spv < EShTargetSpv_1_4)
warn(node->getLoc(), "attribute requires a SPIR-V 1.4 target-env", feature, "");
};
int value = 0;
unsigned uiValue = 0;
switch (it->name) {
case EatUnroll:
loop->setUnroll();
if (noArgument("unroll"))
loop->setUnroll();
break;
case EatLoop:
loop->setDontUnroll();
if (noArgument("dont_unroll"))
loop->setDontUnroll();
break;
case EatDependencyInfinite:
loop->setLoopDependency(TIntermLoop::dependencyInfinite);
if (noArgument("dependency_infinite"))
loop->setLoopDependency(TIntermLoop::dependencyInfinite);
break;
case EatDependencyLength:
if (it->size() == 1 && it->getInt(value)) {
if (value <= 0)
error(node->getLoc(), "must be positive", "dependency_length", "");
if (positiveSignedArgument("dependency_length", value))
loop->setLoopDependency(value);
} else
warn(node->getLoc(), "expected a single integer argument", "dependency_length", "");
break;
case EatMinIterations:
spirv14("min_iterations");
if (unsignedArgument("min_iterations", uiValue))
loop->setMinIterations(uiValue);
break;
case EatMaxIterations:
spirv14("max_iterations");
if (unsignedArgument("max_iterations", uiValue))
loop->setMaxIterations(uiValue);
break;
case EatIterationMultiple:
spirv14("iteration_multiple");
if (positiveUnsignedArgument("iteration_multiple", uiValue))
loop->setIterationMultiple(uiValue);
break;
case EatPeelCount:
spirv14("peel_count");
if (unsignedArgument("peel_count", uiValue))
loop->setPeelCount(uiValue);
break;
case EatPartialCount:
spirv14("partial_count");
if (unsignedArgument("partial_count", uiValue))
loop->setPartialCount(uiValue);
break;
default:
warn(node->getLoc(), "attribute does not apply to a loop", "", "");
@ -253,5 +341,6 @@ void TParseContext::handleLoopAttributes(const TAttributes& attributes, TIntermN
}
}
} // end namespace glslang
#endif // GLSLANG_WEB

View file

@ -71,7 +71,54 @@ namespace glslang {
EatPushConstant,
EatConstantId,
EatDependencyInfinite,
EatDependencyLength
EatDependencyLength,
EatMinIterations,
EatMaxIterations,
EatIterationMultiple,
EatPeelCount,
EatPartialCount,
EatFormatRgba32f,
EatFormatRgba16f,
EatFormatR32f,
EatFormatRgba8,
EatFormatRgba8Snorm,
EatFormatRg32f,
EatFormatRg16f,
EatFormatR11fG11fB10f,
EatFormatR16f,
EatFormatRgba16,
EatFormatRgb10A2,
EatFormatRg16,
EatFormatRg8,
EatFormatR16,
EatFormatR8,
EatFormatRgba16Snorm,
EatFormatRg16Snorm,
EatFormatRg8Snorm,
EatFormatR16Snorm,
EatFormatR8Snorm,
EatFormatRgba32i,
EatFormatRgba16i,
EatFormatRgba8i,
EatFormatR32i,
EatFormatRg32i,
EatFormatRg16i,
EatFormatRg8i,
EatFormatR16i,
EatFormatR8i,
EatFormatRgba32ui,
EatFormatRgba16ui,
EatFormatRgba8ui,
EatFormatR32ui,
EatFormatRgb10a2ui,
EatFormatRg32ui,
EatFormatRg16ui,
EatFormatRg8ui,
EatFormatR16ui,
EatFormatR8ui,
EatFormatUnknown,
EatNonWritable,
EatNonReadable
};
class TIntermAggregate;

View file

@ -78,7 +78,6 @@
#define GL_DOUBLE_MAT4x2 0x8F4D
#define GL_DOUBLE_MAT4x3 0x8F4E
#ifdef AMD_EXTENSIONS
// Those constants are borrowed from extension NV_gpu_shader5
#define GL_FLOAT16_NV 0x8FF8
#define GL_FLOAT16_VEC2_NV 0x8FF9
@ -94,7 +93,6 @@
#define GL_FLOAT16_MAT3x4_AMD 0x91CB
#define GL_FLOAT16_MAT4x2_AMD 0x91CC
#define GL_FLOAT16_MAT4x3_AMD 0x91CD
#endif
#define GL_SAMPLER_1D 0x8B5D
#define GL_SAMPLER_2D 0x8B5E
@ -117,7 +115,6 @@
#define GL_SAMPLER_CUBE_MAP_ARRAY_ARB 0x900C
#define GL_SAMPLER_CUBE_MAP_ARRAY_SHADOW_ARB 0x900D
#ifdef AMD_EXTENSIONS
#define GL_FLOAT16_SAMPLER_1D_AMD 0x91CE
#define GL_FLOAT16_SAMPLER_2D_AMD 0x91CF
#define GL_FLOAT16_SAMPLER_3D_AMD 0x91D0
@ -149,7 +146,6 @@
#define GL_FLOAT16_IMAGE_BUFFER_AMD 0x91E8
#define GL_FLOAT16_IMAGE_2D_MULTISAMPLE_AMD 0x91E9
#define GL_FLOAT16_IMAGE_2D_MULTISAMPLE_ARRAY_AMD 0x91EA
#endif
#define GL_INT_SAMPLER_1D 0x8DC9
#define GL_INT_SAMPLER_2D 0x8DCA

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -45,410 +45,414 @@ extern int yydebug;
# define YYTOKENTYPE
enum yytokentype
{
ATTRIBUTE = 258,
VARYING = 259,
FLOAT16_T = 260,
FLOAT = 261,
FLOAT32_T = 262,
DOUBLE = 263,
FLOAT64_T = 264,
CONST = 265,
BOOL = 266,
INT = 267,
UINT = 268,
INT64_T = 269,
UINT64_T = 270,
INT32_T = 271,
UINT32_T = 272,
INT16_T = 273,
UINT16_T = 274,
INT8_T = 275,
UINT8_T = 276,
BREAK = 277,
CONTINUE = 278,
DO = 279,
ELSE = 280,
FOR = 281,
IF = 282,
DISCARD = 283,
RETURN = 284,
SWITCH = 285,
CASE = 286,
DEFAULT = 287,
SUBROUTINE = 288,
BVEC2 = 289,
BVEC3 = 290,
BVEC4 = 291,
IVEC2 = 292,
IVEC3 = 293,
IVEC4 = 294,
UVEC2 = 295,
UVEC3 = 296,
UVEC4 = 297,
I64VEC2 = 298,
I64VEC3 = 299,
I64VEC4 = 300,
U64VEC2 = 301,
U64VEC3 = 302,
U64VEC4 = 303,
I32VEC2 = 304,
I32VEC3 = 305,
I32VEC4 = 306,
U32VEC2 = 307,
U32VEC3 = 308,
U32VEC4 = 309,
I16VEC2 = 310,
I16VEC3 = 311,
I16VEC4 = 312,
U16VEC2 = 313,
U16VEC3 = 314,
U16VEC4 = 315,
I8VEC2 = 316,
I8VEC3 = 317,
I8VEC4 = 318,
U8VEC2 = 319,
U8VEC3 = 320,
U8VEC4 = 321,
VEC2 = 322,
VEC3 = 323,
VEC4 = 324,
MAT2 = 325,
MAT3 = 326,
MAT4 = 327,
CENTROID = 328,
IN = 329,
OUT = 330,
INOUT = 331,
UNIFORM = 332,
PATCH = 333,
SAMPLE = 334,
BUFFER = 335,
SHARED = 336,
NONUNIFORM = 337,
PAYLOADNV = 338,
PAYLOADINNV = 339,
HITATTRNV = 340,
CALLDATANV = 341,
CALLDATAINNV = 342,
COHERENT = 343,
VOLATILE = 344,
RESTRICT = 345,
READONLY = 346,
WRITEONLY = 347,
DEVICECOHERENT = 348,
QUEUEFAMILYCOHERENT = 349,
WORKGROUPCOHERENT = 350,
SUBGROUPCOHERENT = 351,
NONPRIVATE = 352,
DVEC2 = 353,
DVEC3 = 354,
DVEC4 = 355,
DMAT2 = 356,
DMAT3 = 357,
DMAT4 = 358,
F16VEC2 = 359,
F16VEC3 = 360,
F16VEC4 = 361,
F16MAT2 = 362,
F16MAT3 = 363,
F16MAT4 = 364,
F32VEC2 = 365,
F32VEC3 = 366,
F32VEC4 = 367,
F32MAT2 = 368,
F32MAT3 = 369,
F32MAT4 = 370,
F64VEC2 = 371,
F64VEC3 = 372,
F64VEC4 = 373,
F64MAT2 = 374,
F64MAT3 = 375,
F64MAT4 = 376,
NOPERSPECTIVE = 377,
FLAT = 378,
SMOOTH = 379,
LAYOUT = 380,
EXPLICITINTERPAMD = 381,
PERVERTEXNV = 382,
PERPRIMITIVENV = 383,
PERVIEWNV = 384,
PERTASKNV = 385,
MAT2X2 = 386,
MAT2X3 = 387,
MAT2X4 = 388,
MAT3X2 = 389,
MAT3X3 = 390,
MAT3X4 = 391,
MAT4X2 = 392,
MAT4X3 = 393,
MAT4X4 = 394,
DMAT2X2 = 395,
DMAT2X3 = 396,
DMAT2X4 = 397,
DMAT3X2 = 398,
DMAT3X3 = 399,
DMAT3X4 = 400,
DMAT4X2 = 401,
DMAT4X3 = 402,
DMAT4X4 = 403,
F16MAT2X2 = 404,
F16MAT2X3 = 405,
F16MAT2X4 = 406,
F16MAT3X2 = 407,
F16MAT3X3 = 408,
F16MAT3X4 = 409,
F16MAT4X2 = 410,
F16MAT4X3 = 411,
F16MAT4X4 = 412,
F32MAT2X2 = 413,
F32MAT2X3 = 414,
F32MAT2X4 = 415,
F32MAT3X2 = 416,
F32MAT3X3 = 417,
F32MAT3X4 = 418,
F32MAT4X2 = 419,
F32MAT4X3 = 420,
F32MAT4X4 = 421,
F64MAT2X2 = 422,
F64MAT2X3 = 423,
F64MAT2X4 = 424,
F64MAT3X2 = 425,
F64MAT3X3 = 426,
F64MAT3X4 = 427,
F64MAT4X2 = 428,
F64MAT4X3 = 429,
F64MAT4X4 = 430,
ATOMIC_UINT = 431,
ACCSTRUCTNV = 432,
SAMPLER1D = 433,
SAMPLER2D = 434,
SAMPLER3D = 435,
SAMPLERCUBE = 436,
SAMPLER1DSHADOW = 437,
SAMPLER2DSHADOW = 438,
SAMPLERCUBESHADOW = 439,
SAMPLER1DARRAY = 440,
SAMPLER2DARRAY = 441,
SAMPLER1DARRAYSHADOW = 442,
SAMPLER2DARRAYSHADOW = 443,
ISAMPLER1D = 444,
ISAMPLER2D = 445,
ISAMPLER3D = 446,
ISAMPLERCUBE = 447,
ISAMPLER1DARRAY = 448,
ISAMPLER2DARRAY = 449,
USAMPLER1D = 450,
USAMPLER2D = 451,
USAMPLER3D = 452,
USAMPLERCUBE = 453,
USAMPLER1DARRAY = 454,
USAMPLER2DARRAY = 455,
SAMPLER2DRECT = 456,
SAMPLER2DRECTSHADOW = 457,
ISAMPLER2DRECT = 458,
USAMPLER2DRECT = 459,
SAMPLERBUFFER = 460,
ISAMPLERBUFFER = 461,
USAMPLERBUFFER = 462,
SAMPLERCUBEARRAY = 463,
SAMPLERCUBEARRAYSHADOW = 464,
ISAMPLERCUBEARRAY = 465,
USAMPLERCUBEARRAY = 466,
SAMPLER2DMS = 467,
ISAMPLER2DMS = 468,
USAMPLER2DMS = 469,
SAMPLER2DMSARRAY = 470,
ISAMPLER2DMSARRAY = 471,
USAMPLER2DMSARRAY = 472,
SAMPLEREXTERNALOES = 473,
SAMPLEREXTERNAL2DY2YEXT = 474,
F16SAMPLER1D = 475,
F16SAMPLER2D = 476,
F16SAMPLER3D = 477,
F16SAMPLER2DRECT = 478,
F16SAMPLERCUBE = 479,
F16SAMPLER1DARRAY = 480,
F16SAMPLER2DARRAY = 481,
F16SAMPLERCUBEARRAY = 482,
F16SAMPLERBUFFER = 483,
F16SAMPLER2DMS = 484,
F16SAMPLER2DMSARRAY = 485,
F16SAMPLER1DSHADOW = 486,
F16SAMPLER2DSHADOW = 487,
F16SAMPLER1DARRAYSHADOW = 488,
F16SAMPLER2DARRAYSHADOW = 489,
F16SAMPLER2DRECTSHADOW = 490,
F16SAMPLERCUBESHADOW = 491,
F16SAMPLERCUBEARRAYSHADOW = 492,
SAMPLER = 493,
SAMPLERSHADOW = 494,
TEXTURE1D = 495,
TEXTURE2D = 496,
TEXTURE3D = 497,
TEXTURECUBE = 498,
TEXTURE1DARRAY = 499,
TEXTURE2DARRAY = 500,
ITEXTURE1D = 501,
ITEXTURE2D = 502,
ITEXTURE3D = 503,
ITEXTURECUBE = 504,
ITEXTURE1DARRAY = 505,
ITEXTURE2DARRAY = 506,
UTEXTURE1D = 507,
UTEXTURE2D = 508,
UTEXTURE3D = 509,
UTEXTURECUBE = 510,
UTEXTURE1DARRAY = 511,
UTEXTURE2DARRAY = 512,
TEXTURE2DRECT = 513,
ITEXTURE2DRECT = 514,
UTEXTURE2DRECT = 515,
TEXTUREBUFFER = 516,
ITEXTUREBUFFER = 517,
UTEXTUREBUFFER = 518,
TEXTURECUBEARRAY = 519,
ITEXTURECUBEARRAY = 520,
UTEXTURECUBEARRAY = 521,
TEXTURE2DMS = 522,
ITEXTURE2DMS = 523,
UTEXTURE2DMS = 524,
TEXTURE2DMSARRAY = 525,
ITEXTURE2DMSARRAY = 526,
UTEXTURE2DMSARRAY = 527,
F16TEXTURE1D = 528,
F16TEXTURE2D = 529,
F16TEXTURE3D = 530,
F16TEXTURE2DRECT = 531,
F16TEXTURECUBE = 532,
F16TEXTURE1DARRAY = 533,
F16TEXTURE2DARRAY = 534,
F16TEXTURECUBEARRAY = 535,
F16TEXTUREBUFFER = 536,
F16TEXTURE2DMS = 537,
F16TEXTURE2DMSARRAY = 538,
SUBPASSINPUT = 539,
SUBPASSINPUTMS = 540,
ISUBPASSINPUT = 541,
ISUBPASSINPUTMS = 542,
USUBPASSINPUT = 543,
USUBPASSINPUTMS = 544,
F16SUBPASSINPUT = 545,
F16SUBPASSINPUTMS = 546,
IMAGE1D = 547,
IIMAGE1D = 548,
UIMAGE1D = 549,
IMAGE2D = 550,
IIMAGE2D = 551,
UIMAGE2D = 552,
IMAGE3D = 553,
IIMAGE3D = 554,
UIMAGE3D = 555,
IMAGE2DRECT = 556,
IIMAGE2DRECT = 557,
UIMAGE2DRECT = 558,
IMAGECUBE = 559,
IIMAGECUBE = 560,
UIMAGECUBE = 561,
IMAGEBUFFER = 562,
IIMAGEBUFFER = 563,
UIMAGEBUFFER = 564,
IMAGE1DARRAY = 565,
IIMAGE1DARRAY = 566,
UIMAGE1DARRAY = 567,
IMAGE2DARRAY = 568,
IIMAGE2DARRAY = 569,
UIMAGE2DARRAY = 570,
IMAGECUBEARRAY = 571,
IIMAGECUBEARRAY = 572,
UIMAGECUBEARRAY = 573,
IMAGE2DMS = 574,
IIMAGE2DMS = 575,
UIMAGE2DMS = 576,
IMAGE2DMSARRAY = 577,
IIMAGE2DMSARRAY = 578,
UIMAGE2DMSARRAY = 579,
F16IMAGE1D = 580,
F16IMAGE2D = 581,
F16IMAGE3D = 582,
F16IMAGE2DRECT = 583,
F16IMAGECUBE = 584,
F16IMAGE1DARRAY = 585,
F16IMAGE2DARRAY = 586,
F16IMAGECUBEARRAY = 587,
F16IMAGEBUFFER = 588,
F16IMAGE2DMS = 589,
F16IMAGE2DMSARRAY = 590,
STRUCT = 591,
VOID = 592,
WHILE = 593,
IDENTIFIER = 594,
TYPE_NAME = 595,
FLOATCONSTANT = 596,
DOUBLECONSTANT = 597,
INT16CONSTANT = 598,
UINT16CONSTANT = 599,
INT32CONSTANT = 600,
UINT32CONSTANT = 601,
CONST = 258,
BOOL = 259,
INT = 260,
UINT = 261,
FLOAT = 262,
BVEC2 = 263,
BVEC3 = 264,
BVEC4 = 265,
IVEC2 = 266,
IVEC3 = 267,
IVEC4 = 268,
UVEC2 = 269,
UVEC3 = 270,
UVEC4 = 271,
VEC2 = 272,
VEC3 = 273,
VEC4 = 274,
MAT2 = 275,
MAT3 = 276,
MAT4 = 277,
MAT2X2 = 278,
MAT2X3 = 279,
MAT2X4 = 280,
MAT3X2 = 281,
MAT3X3 = 282,
MAT3X4 = 283,
MAT4X2 = 284,
MAT4X3 = 285,
MAT4X4 = 286,
SAMPLER2D = 287,
SAMPLER3D = 288,
SAMPLERCUBE = 289,
SAMPLER2DSHADOW = 290,
SAMPLERCUBESHADOW = 291,
SAMPLER2DARRAY = 292,
SAMPLER2DARRAYSHADOW = 293,
ISAMPLER2D = 294,
ISAMPLER3D = 295,
ISAMPLERCUBE = 296,
ISAMPLER2DARRAY = 297,
USAMPLER2D = 298,
USAMPLER3D = 299,
USAMPLERCUBE = 300,
USAMPLER2DARRAY = 301,
SAMPLER = 302,
SAMPLERSHADOW = 303,
TEXTURE2D = 304,
TEXTURE3D = 305,
TEXTURECUBE = 306,
TEXTURE2DARRAY = 307,
ITEXTURE2D = 308,
ITEXTURE3D = 309,
ITEXTURECUBE = 310,
ITEXTURE2DARRAY = 311,
UTEXTURE2D = 312,
UTEXTURE3D = 313,
UTEXTURECUBE = 314,
UTEXTURE2DARRAY = 315,
ATTRIBUTE = 316,
VARYING = 317,
FLOAT16_T = 318,
FLOAT32_T = 319,
DOUBLE = 320,
FLOAT64_T = 321,
INT64_T = 322,
UINT64_T = 323,
INT32_T = 324,
UINT32_T = 325,
INT16_T = 326,
UINT16_T = 327,
INT8_T = 328,
UINT8_T = 329,
I64VEC2 = 330,
I64VEC3 = 331,
I64VEC4 = 332,
U64VEC2 = 333,
U64VEC3 = 334,
U64VEC4 = 335,
I32VEC2 = 336,
I32VEC3 = 337,
I32VEC4 = 338,
U32VEC2 = 339,
U32VEC3 = 340,
U32VEC4 = 341,
I16VEC2 = 342,
I16VEC3 = 343,
I16VEC4 = 344,
U16VEC2 = 345,
U16VEC3 = 346,
U16VEC4 = 347,
I8VEC2 = 348,
I8VEC3 = 349,
I8VEC4 = 350,
U8VEC2 = 351,
U8VEC3 = 352,
U8VEC4 = 353,
DVEC2 = 354,
DVEC3 = 355,
DVEC4 = 356,
DMAT2 = 357,
DMAT3 = 358,
DMAT4 = 359,
F16VEC2 = 360,
F16VEC3 = 361,
F16VEC4 = 362,
F16MAT2 = 363,
F16MAT3 = 364,
F16MAT4 = 365,
F32VEC2 = 366,
F32VEC3 = 367,
F32VEC4 = 368,
F32MAT2 = 369,
F32MAT3 = 370,
F32MAT4 = 371,
F64VEC2 = 372,
F64VEC3 = 373,
F64VEC4 = 374,
F64MAT2 = 375,
F64MAT3 = 376,
F64MAT4 = 377,
DMAT2X2 = 378,
DMAT2X3 = 379,
DMAT2X4 = 380,
DMAT3X2 = 381,
DMAT3X3 = 382,
DMAT3X4 = 383,
DMAT4X2 = 384,
DMAT4X3 = 385,
DMAT4X4 = 386,
F16MAT2X2 = 387,
F16MAT2X3 = 388,
F16MAT2X4 = 389,
F16MAT3X2 = 390,
F16MAT3X3 = 391,
F16MAT3X4 = 392,
F16MAT4X2 = 393,
F16MAT4X3 = 394,
F16MAT4X4 = 395,
F32MAT2X2 = 396,
F32MAT2X3 = 397,
F32MAT2X4 = 398,
F32MAT3X2 = 399,
F32MAT3X3 = 400,
F32MAT3X4 = 401,
F32MAT4X2 = 402,
F32MAT4X3 = 403,
F32MAT4X4 = 404,
F64MAT2X2 = 405,
F64MAT2X3 = 406,
F64MAT2X4 = 407,
F64MAT3X2 = 408,
F64MAT3X3 = 409,
F64MAT3X4 = 410,
F64MAT4X2 = 411,
F64MAT4X3 = 412,
F64MAT4X4 = 413,
ATOMIC_UINT = 414,
ACCSTRUCTNV = 415,
FCOOPMATNV = 416,
ICOOPMATNV = 417,
UCOOPMATNV = 418,
SAMPLERCUBEARRAY = 419,
SAMPLERCUBEARRAYSHADOW = 420,
ISAMPLERCUBEARRAY = 421,
USAMPLERCUBEARRAY = 422,
SAMPLER1D = 423,
SAMPLER1DARRAY = 424,
SAMPLER1DARRAYSHADOW = 425,
ISAMPLER1D = 426,
SAMPLER1DSHADOW = 427,
SAMPLER2DRECT = 428,
SAMPLER2DRECTSHADOW = 429,
ISAMPLER2DRECT = 430,
USAMPLER2DRECT = 431,
SAMPLERBUFFER = 432,
ISAMPLERBUFFER = 433,
USAMPLERBUFFER = 434,
SAMPLER2DMS = 435,
ISAMPLER2DMS = 436,
USAMPLER2DMS = 437,
SAMPLER2DMSARRAY = 438,
ISAMPLER2DMSARRAY = 439,
USAMPLER2DMSARRAY = 440,
SAMPLEREXTERNALOES = 441,
SAMPLEREXTERNAL2DY2YEXT = 442,
ISAMPLER1DARRAY = 443,
USAMPLER1D = 444,
USAMPLER1DARRAY = 445,
F16SAMPLER1D = 446,
F16SAMPLER2D = 447,
F16SAMPLER3D = 448,
F16SAMPLER2DRECT = 449,
F16SAMPLERCUBE = 450,
F16SAMPLER1DARRAY = 451,
F16SAMPLER2DARRAY = 452,
F16SAMPLERCUBEARRAY = 453,
F16SAMPLERBUFFER = 454,
F16SAMPLER2DMS = 455,
F16SAMPLER2DMSARRAY = 456,
F16SAMPLER1DSHADOW = 457,
F16SAMPLER2DSHADOW = 458,
F16SAMPLER1DARRAYSHADOW = 459,
F16SAMPLER2DARRAYSHADOW = 460,
F16SAMPLER2DRECTSHADOW = 461,
F16SAMPLERCUBESHADOW = 462,
F16SAMPLERCUBEARRAYSHADOW = 463,
IMAGE1D = 464,
IIMAGE1D = 465,
UIMAGE1D = 466,
IMAGE2D = 467,
IIMAGE2D = 468,
UIMAGE2D = 469,
IMAGE3D = 470,
IIMAGE3D = 471,
UIMAGE3D = 472,
IMAGE2DRECT = 473,
IIMAGE2DRECT = 474,
UIMAGE2DRECT = 475,
IMAGECUBE = 476,
IIMAGECUBE = 477,
UIMAGECUBE = 478,
IMAGEBUFFER = 479,
IIMAGEBUFFER = 480,
UIMAGEBUFFER = 481,
IMAGE1DARRAY = 482,
IIMAGE1DARRAY = 483,
UIMAGE1DARRAY = 484,
IMAGE2DARRAY = 485,
IIMAGE2DARRAY = 486,
UIMAGE2DARRAY = 487,
IMAGECUBEARRAY = 488,
IIMAGECUBEARRAY = 489,
UIMAGECUBEARRAY = 490,
IMAGE2DMS = 491,
IIMAGE2DMS = 492,
UIMAGE2DMS = 493,
IMAGE2DMSARRAY = 494,
IIMAGE2DMSARRAY = 495,
UIMAGE2DMSARRAY = 496,
F16IMAGE1D = 497,
F16IMAGE2D = 498,
F16IMAGE3D = 499,
F16IMAGE2DRECT = 500,
F16IMAGECUBE = 501,
F16IMAGE1DARRAY = 502,
F16IMAGE2DARRAY = 503,
F16IMAGECUBEARRAY = 504,
F16IMAGEBUFFER = 505,
F16IMAGE2DMS = 506,
F16IMAGE2DMSARRAY = 507,
TEXTURECUBEARRAY = 508,
ITEXTURECUBEARRAY = 509,
UTEXTURECUBEARRAY = 510,
TEXTURE1D = 511,
ITEXTURE1D = 512,
UTEXTURE1D = 513,
TEXTURE1DARRAY = 514,
ITEXTURE1DARRAY = 515,
UTEXTURE1DARRAY = 516,
TEXTURE2DRECT = 517,
ITEXTURE2DRECT = 518,
UTEXTURE2DRECT = 519,
TEXTUREBUFFER = 520,
ITEXTUREBUFFER = 521,
UTEXTUREBUFFER = 522,
TEXTURE2DMS = 523,
ITEXTURE2DMS = 524,
UTEXTURE2DMS = 525,
TEXTURE2DMSARRAY = 526,
ITEXTURE2DMSARRAY = 527,
UTEXTURE2DMSARRAY = 528,
F16TEXTURE1D = 529,
F16TEXTURE2D = 530,
F16TEXTURE3D = 531,
F16TEXTURE2DRECT = 532,
F16TEXTURECUBE = 533,
F16TEXTURE1DARRAY = 534,
F16TEXTURE2DARRAY = 535,
F16TEXTURECUBEARRAY = 536,
F16TEXTUREBUFFER = 537,
F16TEXTURE2DMS = 538,
F16TEXTURE2DMSARRAY = 539,
SUBPASSINPUT = 540,
SUBPASSINPUTMS = 541,
ISUBPASSINPUT = 542,
ISUBPASSINPUTMS = 543,
USUBPASSINPUT = 544,
USUBPASSINPUTMS = 545,
F16SUBPASSINPUT = 546,
F16SUBPASSINPUTMS = 547,
LEFT_OP = 548,
RIGHT_OP = 549,
INC_OP = 550,
DEC_OP = 551,
LE_OP = 552,
GE_OP = 553,
EQ_OP = 554,
NE_OP = 555,
AND_OP = 556,
OR_OP = 557,
XOR_OP = 558,
MUL_ASSIGN = 559,
DIV_ASSIGN = 560,
ADD_ASSIGN = 561,
MOD_ASSIGN = 562,
LEFT_ASSIGN = 563,
RIGHT_ASSIGN = 564,
AND_ASSIGN = 565,
XOR_ASSIGN = 566,
OR_ASSIGN = 567,
SUB_ASSIGN = 568,
LEFT_PAREN = 569,
RIGHT_PAREN = 570,
LEFT_BRACKET = 571,
RIGHT_BRACKET = 572,
LEFT_BRACE = 573,
RIGHT_BRACE = 574,
DOT = 575,
COMMA = 576,
COLON = 577,
EQUAL = 578,
SEMICOLON = 579,
BANG = 580,
DASH = 581,
TILDE = 582,
PLUS = 583,
STAR = 584,
SLASH = 585,
PERCENT = 586,
LEFT_ANGLE = 587,
RIGHT_ANGLE = 588,
VERTICAL_BAR = 589,
CARET = 590,
AMPERSAND = 591,
QUESTION = 592,
INVARIANT = 593,
HIGH_PRECISION = 594,
MEDIUM_PRECISION = 595,
LOW_PRECISION = 596,
PRECISION = 597,
PACKED = 598,
RESOURCE = 599,
SUPERP = 600,
FLOATCONSTANT = 601,
INTCONSTANT = 602,
UINTCONSTANT = 603,
INT64CONSTANT = 604,
UINT64CONSTANT = 605,
BOOLCONSTANT = 606,
FLOAT16CONSTANT = 607,
LEFT_OP = 608,
RIGHT_OP = 609,
INC_OP = 610,
DEC_OP = 611,
LE_OP = 612,
GE_OP = 613,
EQ_OP = 614,
NE_OP = 615,
AND_OP = 616,
OR_OP = 617,
XOR_OP = 618,
MUL_ASSIGN = 619,
DIV_ASSIGN = 620,
ADD_ASSIGN = 621,
MOD_ASSIGN = 622,
LEFT_ASSIGN = 623,
RIGHT_ASSIGN = 624,
AND_ASSIGN = 625,
XOR_ASSIGN = 626,
OR_ASSIGN = 627,
SUB_ASSIGN = 628,
LEFT_PAREN = 629,
RIGHT_PAREN = 630,
LEFT_BRACKET = 631,
RIGHT_BRACKET = 632,
LEFT_BRACE = 633,
RIGHT_BRACE = 634,
DOT = 635,
COMMA = 636,
COLON = 637,
EQUAL = 638,
SEMICOLON = 639,
BANG = 640,
DASH = 641,
TILDE = 642,
PLUS = 643,
STAR = 644,
SLASH = 645,
PERCENT = 646,
LEFT_ANGLE = 647,
RIGHT_ANGLE = 648,
VERTICAL_BAR = 649,
CARET = 650,
AMPERSAND = 651,
QUESTION = 652,
INVARIANT = 653,
PRECISE = 654,
HIGH_PRECISION = 655,
MEDIUM_PRECISION = 656,
LOW_PRECISION = 657,
PRECISION = 658,
PACKED = 659,
RESOURCE = 660,
SUPERP = 661
BOOLCONSTANT = 604,
IDENTIFIER = 605,
TYPE_NAME = 606,
CENTROID = 607,
IN = 608,
OUT = 609,
INOUT = 610,
STRUCT = 611,
VOID = 612,
WHILE = 613,
BREAK = 614,
CONTINUE = 615,
DO = 616,
ELSE = 617,
FOR = 618,
IF = 619,
DISCARD = 620,
RETURN = 621,
SWITCH = 622,
CASE = 623,
DEFAULT = 624,
UNIFORM = 625,
SHARED = 626,
BUFFER = 627,
FLAT = 628,
SMOOTH = 629,
LAYOUT = 630,
DOUBLECONSTANT = 631,
INT16CONSTANT = 632,
UINT16CONSTANT = 633,
FLOAT16CONSTANT = 634,
INT32CONSTANT = 635,
UINT32CONSTANT = 636,
INT64CONSTANT = 637,
UINT64CONSTANT = 638,
SUBROUTINE = 639,
DEMOTE = 640,
PAYLOADNV = 641,
PAYLOADINNV = 642,
HITATTRNV = 643,
CALLDATANV = 644,
CALLDATAINNV = 645,
PATCH = 646,
SAMPLE = 647,
NONUNIFORM = 648,
COHERENT = 649,
VOLATILE = 650,
RESTRICT = 651,
READONLY = 652,
WRITEONLY = 653,
DEVICECOHERENT = 654,
QUEUEFAMILYCOHERENT = 655,
WORKGROUPCOHERENT = 656,
SUBGROUPCOHERENT = 657,
NONPRIVATE = 658,
NOPERSPECTIVE = 659,
EXPLICITINTERPAMD = 660,
PERVERTEXNV = 661,
PERPRIMITIVENV = 662,
PERVIEWNV = 663,
PERTASKNV = 664,
PRECISE = 665
};
#endif
@ -457,7 +461,7 @@ extern int yydebug;
union YYSTYPE
{
#line 71 "MachineIndependent/glslang.y" /* yacc.c:1909 */
#line 96 "MachineIndependent/glslang.y" /* yacc.c:1909 */
struct {
glslang::TSourceLoc loc;
@ -490,9 +494,10 @@ union YYSTYPE
glslang::TArraySizes* arraySizes;
glslang::TIdentifierList* identifierList;
};
glslang::TArraySizes* typeParameters;
} interm;
#line 496 "MachineIndependent/glslang_tab.cpp.h" /* yacc.c:1909 */
#line 501 "MachineIndependent/glslang_tab.cpp.h" /* yacc.c:1909 */
};
typedef union YYSTYPE YYSTYPE;

View file

@ -35,6 +35,8 @@
// POSSIBILITY OF SUCH DAMAGE.
//
#ifndef GLSLANG_WEB
#include "localintermediate.h"
#include "../Include/InfoSink.h"
@ -43,6 +45,7 @@
#else
#include <cmath>
#endif
#include <cstdint>
namespace {
@ -173,7 +176,7 @@ bool TOutputTraverser::visitBinary(TVisit /* visit */, TIntermBinary* node)
case EOpIndexIndirect: out.debug << "indirect index"; break;
case EOpIndexDirectStruct:
{
bool reference = node->getLeft()->getType().getBasicType() == EbtReference;
bool reference = node->getLeft()->getType().isReference();
const TTypeList *members = reference ? node->getLeft()->getType().getReferentType()->getStruct() : node->getLeft()->getType().getStruct();
out.debug << (*members)[node->getRight()->getAsConstantUnion()->getConstArray()[0].getIConst()].type->getFieldName();
out.debug << ": direct index for structure"; break;
@ -210,6 +213,13 @@ bool TOutputTraverser::visitBinary(TVisit /* visit */, TIntermBinary* node)
case EOpLogicalXor: out.debug << "logical-xor"; break;
case EOpLogicalAnd: out.debug << "logical-and"; break;
case EOpAbsDifference: out.debug << "absoluteDifference"; break;
case EOpAddSaturate: out.debug << "addSaturate"; break;
case EOpSubSaturate: out.debug << "subtractSaturate"; break;
case EOpAverage: out.debug << "average"; break;
case EOpAverageRounded: out.debug << "averageRounded"; break;
case EOpMul32x16: out.debug << "multiply32x16"; break;
default: out.debug << "<unknown op>";
}
@ -236,6 +246,7 @@ bool TOutputTraverser::visitUnary(TVisit /* visit */, TIntermUnary* node)
case EOpPostDecrement: out.debug << "Post-Decrement"; break;
case EOpPreIncrement: out.debug << "Pre-Increment"; break;
case EOpPreDecrement: out.debug << "Pre-Decrement"; break;
case EOpCopyObject: out.debug << "copy object"; break;
// * -> bool
case EOpConvInt8ToBool: out.debug << "Convert int8_t to bool"; break;
@ -553,6 +564,9 @@ bool TOutputTraverser::visitUnary(TVisit /* visit */, TIntermUnary* node)
case EOpFindLSB: out.debug << "findLSB"; break;
case EOpFindMSB: out.debug << "findMSB"; break;
case EOpCountLeadingZeros: out.debug << "countLeadingZeros"; break;
case EOpCountTrailingZeros: out.debug << "countTrailingZeros"; break;
case EOpNoise: out.debug << "noise"; break;
case EOpBallot: out.debug << "ballot"; break;
@ -613,7 +627,6 @@ bool TOutputTraverser::visitUnary(TVisit /* visit */, TIntermUnary* node)
case EOpSubgroupQuadSwapVertical: out.debug << "subgroupQuadSwapVertical"; break;
case EOpSubgroupQuadSwapDiagonal: out.debug << "subgroupQuadSwapDiagonal"; break;
#ifdef NV_EXTENSIONS
case EOpSubgroupPartition: out.debug << "subgroupPartitionNV"; break;
case EOpSubgroupPartitionedAdd: out.debug << "subgroupPartitionedAddNV"; break;
case EOpSubgroupPartitionedMul: out.debug << "subgroupPartitionedMulNV"; break;
@ -636,7 +649,6 @@ bool TOutputTraverser::visitUnary(TVisit /* visit */, TIntermUnary* node)
case EOpSubgroupPartitionedExclusiveAnd: out.debug << "subgroupPartitionedExclusiveAndNV"; break;
case EOpSubgroupPartitionedExclusiveOr: out.debug << "subgroupPartitionedExclusiveOrNV"; break;
case EOpSubgroupPartitionedExclusiveXor: out.debug << "subgroupPartitionedExclusiveXorNV"; break;
#endif
case EOpClip: out.debug << "clip"; break;
case EOpIsFinite: out.debug << "isfinite"; break;
@ -646,7 +658,6 @@ bool TOutputTraverser::visitUnary(TVisit /* visit */, TIntermUnary* node)
case EOpSparseTexelsResident: out.debug << "sparseTexelsResident"; break;
#ifdef AMD_EXTENSIONS
case EOpMinInvocations: out.debug << "minInvocations"; break;
case EOpMaxInvocations: out.debug << "maxInvocations"; break;
case EOpAddInvocations: out.debug << "addInvocations"; break;
@ -675,7 +686,6 @@ bool TOutputTraverser::visitUnary(TVisit /* visit */, TIntermUnary* node)
case EOpCubeFaceIndex: out.debug << "cubeFaceIndex"; break;
case EOpCubeFaceCoord: out.debug << "cubeFaceCoord"; break;
#endif
case EOpSubpassLoad: out.debug << "subpassLoad"; break;
case EOpSubpassLoadMS: out.debug << "subpassLoadMS"; break;
@ -817,6 +827,7 @@ bool TOutputTraverser::visitAggregate(TVisit /* visit */, TIntermAggregate* node
case EOpConstructStruct: out.debug << "Construct structure"; break;
case EOpConstructTextureSampler: out.debug << "Construct combined texture-sampler"; break;
case EOpConstructReference: out.debug << "Construct reference"; break;
case EOpConstructCooperativeMatrix: out.debug << "Construct cooperative matrix"; break;
case EOpLessThan: out.debug << "Compare Less Than"; break;
case EOpGreaterThan: out.debug << "Compare Greater Than"; break;
@ -860,7 +871,6 @@ bool TOutputTraverser::visitAggregate(TVisit /* visit */, TIntermAggregate* node
case EOpReadInvocation: out.debug << "readInvocation"; break;
#ifdef AMD_EXTENSIONS
case EOpSwizzleInvocations: out.debug << "swizzleInvocations"; break;
case EOpSwizzleInvocationsMasked: out.debug << "swizzleInvocationsMasked"; break;
case EOpWriteInvocation: out.debug << "writeInvocation"; break;
@ -868,9 +878,7 @@ bool TOutputTraverser::visitAggregate(TVisit /* visit */, TIntermAggregate* node
case EOpMin3: out.debug << "min3"; break;
case EOpMax3: out.debug << "max3"; break;
case EOpMid3: out.debug << "mid3"; break;
case EOpTime: out.debug << "time"; break;
#endif
case EOpAtomicAdd: out.debug << "AtomicAdd"; break;
case EOpAtomicMin: out.debug << "AtomicMin"; break;
@ -907,10 +915,8 @@ bool TOutputTraverser::visitAggregate(TVisit /* visit */, TIntermAggregate* node
case EOpImageAtomicCompSwap: out.debug << "imageAtomicCompSwap"; break;
case EOpImageAtomicLoad: out.debug << "imageAtomicLoad"; break;
case EOpImageAtomicStore: out.debug << "imageAtomicStore"; break;
#ifdef AMD_EXTENSIONS
case EOpImageLoadLod: out.debug << "imageLoadLod"; break;
case EOpImageStoreLod: out.debug << "imageStoreLod"; break;
#endif
case EOpTextureQuerySize: out.debug << "textureSize"; break;
case EOpTextureQueryLod: out.debug << "textureQueryLod"; break;
@ -937,11 +943,9 @@ bool TOutputTraverser::visitAggregate(TVisit /* visit */, TIntermAggregate* node
case EOpTextureOffsetClamp: out.debug << "textureOffsetClamp"; break;
case EOpTextureGradClamp: out.debug << "textureGradClamp"; break;
case EOpTextureGradOffsetClamp: out.debug << "textureGradOffsetClamp"; break;
#ifdef AMD_EXTENSIONS
case EOpTextureGatherLod: out.debug << "textureGatherLod"; break;
case EOpTextureGatherLodOffset: out.debug << "textureGatherLodOffset"; break;
case EOpTextureGatherLodOffsets: out.debug << "textureGatherLodOffsets"; break;
#endif
case EOpSparseTexture: out.debug << "sparseTexture"; break;
case EOpSparseTextureOffset: out.debug << "sparseTextureOffset"; break;
@ -959,19 +963,15 @@ bool TOutputTraverser::visitAggregate(TVisit /* visit */, TIntermAggregate* node
case EOpSparseTextureOffsetClamp: out.debug << "sparseTextureOffsetClamp"; break;
case EOpSparseTextureGradClamp: out.debug << "sparseTextureGradClamp"; break;
case EOpSparseTextureGradOffsetClamp: out.debug << "sparseTextureGradOffsetClam"; break;
#ifdef AMD_EXTENSIONS
case EOpSparseTextureGatherLod: out.debug << "sparseTextureGatherLod"; break;
case EOpSparseTextureGatherLodOffset: out.debug << "sparseTextureGatherLodOffset"; break;
case EOpSparseTextureGatherLodOffsets: out.debug << "sparseTextureGatherLodOffsets"; break;
case EOpSparseImageLoadLod: out.debug << "sparseImageLoadLod"; break;
#endif
#ifdef NV_EXTENSIONS
case EOpImageSampleFootprintNV: out.debug << "imageSampleFootprintNV"; break;
case EOpImageSampleFootprintClampNV: out.debug << "imageSampleFootprintClampNV"; break;
case EOpImageSampleFootprintLodNV: out.debug << "imageSampleFootprintLodNV"; break;
case EOpImageSampleFootprintGradNV: out.debug << "imageSampleFootprintGradNV"; break;
case EOpImageSampleFootprintGradClampNV: out.debug << "mageSampleFootprintGradClampNV"; break;
#endif
case EOpAddCarry: out.debug << "addCarry"; break;
case EOpSubBorrow: out.debug << "subBorrow"; break;
case EOpUMulExtended: out.debug << "uMulExtended"; break;
@ -985,9 +985,7 @@ bool TOutputTraverser::visitAggregate(TVisit /* visit */, TIntermAggregate* node
case EOpInterpolateAtSample: out.debug << "interpolateAtSample"; break;
case EOpInterpolateAtOffset: out.debug << "interpolateAtOffset"; break;
#ifdef AMD_EXTENSIONS
case EOpInterpolateAtVertex: out.debug << "interpolateAtVertex"; break;
#endif
case EOpSinCos: out.debug << "sincos"; break;
case EOpGenMul: out.debug << "mul"; break;
@ -1054,17 +1052,44 @@ bool TOutputTraverser::visitAggregate(TVisit /* visit */, TIntermAggregate* node
case EOpSubgroupQuadSwapVertical: out.debug << "subgroupQuadSwapVertical"; break;
case EOpSubgroupQuadSwapDiagonal: out.debug << "subgroupQuadSwapDiagonal"; break;
case EOpSubgroupPartition: out.debug << "subgroupPartitionNV"; break;
case EOpSubgroupPartitionedAdd: out.debug << "subgroupPartitionedAddNV"; break;
case EOpSubgroupPartitionedMul: out.debug << "subgroupPartitionedMulNV"; break;
case EOpSubgroupPartitionedMin: out.debug << "subgroupPartitionedMinNV"; break;
case EOpSubgroupPartitionedMax: out.debug << "subgroupPartitionedMaxNV"; break;
case EOpSubgroupPartitionedAnd: out.debug << "subgroupPartitionedAndNV"; break;
case EOpSubgroupPartitionedOr: out.debug << "subgroupPartitionedOrNV"; break;
case EOpSubgroupPartitionedXor: out.debug << "subgroupPartitionedXorNV"; break;
case EOpSubgroupPartitionedInclusiveAdd: out.debug << "subgroupPartitionedInclusiveAddNV"; break;
case EOpSubgroupPartitionedInclusiveMul: out.debug << "subgroupPartitionedInclusiveMulNV"; break;
case EOpSubgroupPartitionedInclusiveMin: out.debug << "subgroupPartitionedInclusiveMinNV"; break;
case EOpSubgroupPartitionedInclusiveMax: out.debug << "subgroupPartitionedInclusiveMaxNV"; break;
case EOpSubgroupPartitionedInclusiveAnd: out.debug << "subgroupPartitionedInclusiveAndNV"; break;
case EOpSubgroupPartitionedInclusiveOr: out.debug << "subgroupPartitionedInclusiveOrNV"; break;
case EOpSubgroupPartitionedInclusiveXor: out.debug << "subgroupPartitionedInclusiveXorNV"; break;
case EOpSubgroupPartitionedExclusiveAdd: out.debug << "subgroupPartitionedExclusiveAddNV"; break;
case EOpSubgroupPartitionedExclusiveMul: out.debug << "subgroupPartitionedExclusiveMulNV"; break;
case EOpSubgroupPartitionedExclusiveMin: out.debug << "subgroupPartitionedExclusiveMinNV"; break;
case EOpSubgroupPartitionedExclusiveMax: out.debug << "subgroupPartitionedExclusiveMaxNV"; break;
case EOpSubgroupPartitionedExclusiveAnd: out.debug << "subgroupPartitionedExclusiveAndNV"; break;
case EOpSubgroupPartitionedExclusiveOr: out.debug << "subgroupPartitionedExclusiveOrNV"; break;
case EOpSubgroupPartitionedExclusiveXor: out.debug << "subgroupPartitionedExclusiveXorNV"; break;
case EOpSubpassLoad: out.debug << "subpassLoad"; break;
case EOpSubpassLoadMS: out.debug << "subpassLoadMS"; break;
#ifdef NV_EXTENSIONS
case EOpTraceNV: out.debug << "traceNV"; break;
case EOpReportIntersectionNV: out.debug << "reportIntersectionNV"; break;
case EOpIgnoreIntersectionNV: out.debug << "ignoreIntersectionNV"; break;
case EOpTerminateRayNV: out.debug << "terminateRayNV"; break;
case EOpExecuteCallableNV: out.debug << "executeCallableNV"; break;
case EOpWritePackedPrimitiveIndices4x8NV: out.debug << "writePackedPrimitiveIndices4x8NV"; break;
#endif
case EOpCooperativeMatrixLoad: out.debug << "Load cooperative matrix"; break;
case EOpCooperativeMatrixStore: out.debug << "Store cooperative matrix"; break;
case EOpCooperativeMatrixMulAdd: out.debug << "MulAdd cooperative matrices"; break;
case EOpIsHelperInvocation: out.debug << "IsHelperInvocation"; break;
default: out.debug.message(EPrefixError, "Bad aggregation op");
}
@ -1157,8 +1182,11 @@ static void OutputDouble(TInfoSink& out, double value, TOutputTraverser::EExtraO
switch (extra) {
case TOutputTraverser::BinaryDoubleOutput:
{
uint64_t b;
static_assert(sizeof(b) == sizeof(value), "sizeof(uint64_t) != sizeof(double)");
memcpy(&b, &value, sizeof(b));
out.debug << " : ";
long long b = *reinterpret_cast<long long*>(&value);
for (size_t i = 0; i < 8 * sizeof(value); ++i, ++b) {
out.debug << ((b & 0x8000000000000000) != 0 ? "1" : "0");
b <<= 1;
@ -1357,6 +1385,7 @@ bool TOutputTraverser::visitBranch(TVisit /* visit*/, TIntermBranch* node)
case EOpContinue: out.debug << "Branch: Continue"; break;
case EOpReturn: out.debug << "Branch: Return"; break;
case EOpCase: out.debug << "case: "; break;
case EOpDemote: out.debug << "Demote"; break;
case EOpDefault: out.debug << "default: "; break;
default: out.debug << "Branch: Unknown Branch"; break;
}
@ -1467,18 +1496,17 @@ void TIntermediate::output(TInfoSink& infoSink, bool tree)
}
infoSink.debug << "\n";
}
if (interlockOrdering != EioNone)
infoSink.debug << "interlock ordering = " << TQualifier::getInterlockOrderingString(interlockOrdering) << "\n";
break;
#ifdef NV_EXTENSIONS
case EShLangMeshNV:
infoSink.debug << "max_vertices = " << vertices << "\n";
infoSink.debug << "max_primitives = " << primitives << "\n";
infoSink.debug << "output primitive = " << TQualifier::getGeometryString(outputPrimitive) << "\n";
// Fall through
case EShLangTaskNV:
// Fall through
#endif
case EShLangCompute:
infoSink.debug << "local_size = (" << localSize[0] << ", " << localSize[1] << ", " << localSize[2] << ")\n";
{
@ -1507,3 +1535,5 @@ void TIntermediate::output(TInfoSink& infoSink, bool tree)
}
} // end namespace glslang
#endif // not GLSLANG_WEB

File diff suppressed because it is too large Load diff

View file

@ -33,11 +33,15 @@
// POSSIBILITY OF SUCH DAMAGE.
//
#ifndef GLSLANG_WEB
#ifndef _IOMAPPER_INCLUDED
#define _IOMAPPER_INCLUDED
#include "../Public/ShaderLang.h"
#include <cstdint>
#include "LiveTraverser.h"
#include <unordered_map>
#include <unordered_set>
//
// A reflection database and its interface, consistent with the OpenGL API reflection queries.
//
@ -47,17 +51,249 @@ class TInfoSink;
namespace glslang {
class TIntermediate;
struct TVarEntryInfo {
int id;
TIntermSymbol* symbol;
bool live;
int newBinding;
int newSet;
int newLocation;
int newComponent;
int newIndex;
EShLanguage stage;
struct TOrderById {
inline bool operator()(const TVarEntryInfo& l, const TVarEntryInfo& r) { return l.id < r.id; }
};
struct TOrderByPriority {
// ordering:
// 1) has both binding and set
// 2) has binding but no set
// 3) has no binding but set
// 4) has no binding and no set
inline bool operator()(const TVarEntryInfo& l, const TVarEntryInfo& r) {
const TQualifier& lq = l.symbol->getQualifier();
const TQualifier& rq = r.symbol->getQualifier();
// simple rules:
// has binding gives 2 points
// has set gives 1 point
// who has the most points is more important.
int lPoints = (lq.hasBinding() ? 2 : 0) + (lq.hasSet() ? 1 : 0);
int rPoints = (rq.hasBinding() ? 2 : 0) + (rq.hasSet() ? 1 : 0);
if (lPoints == rPoints)
return l.id < r.id;
return lPoints > rPoints;
}
};
};
// Base class for shared TIoMapResolver services, used by several derivations.
struct TDefaultIoResolverBase : public glslang::TIoMapResolver {
public:
TDefaultIoResolverBase(const TIntermediate& intermediate);
typedef std::vector<int> TSlotSet;
typedef std::unordered_map<int, TSlotSet> TSlotSetMap;
// grow the reflection stage by stage
void notifyBinding(EShLanguage, TVarEntryInfo& /*ent*/) override {}
void notifyInOut(EShLanguage, TVarEntryInfo& /*ent*/) override {}
void beginNotifications(EShLanguage) override {}
void endNotifications(EShLanguage) override {}
void beginResolve(EShLanguage) override {}
void endResolve(EShLanguage) override {}
void beginCollect(EShLanguage) override {}
void endCollect(EShLanguage) override {}
void reserverResourceSlot(TVarEntryInfo& /*ent*/, TInfoSink& /*infoSink*/) override {}
void reserverStorageSlot(TVarEntryInfo& /*ent*/, TInfoSink& /*infoSink*/) override {}
int getBaseBinding(TResourceType res, unsigned int set) const;
const std::vector<std::string>& getResourceSetBinding() const;
virtual TResourceType getResourceType(const glslang::TType& type) = 0;
bool doAutoBindingMapping() const;
bool doAutoLocationMapping() const;
TSlotSet::iterator findSlot(int set, int slot);
bool checkEmpty(int set, int slot);
bool validateInOut(EShLanguage /*stage*/, TVarEntryInfo& /*ent*/) override { return true; }
int reserveSlot(int set, int slot, int size = 1);
int getFreeSlot(int set, int base, int size = 1);
int resolveSet(EShLanguage /*stage*/, TVarEntryInfo& ent) override;
int resolveUniformLocation(EShLanguage /*stage*/, TVarEntryInfo& ent) override;
int resolveInOutLocation(EShLanguage stage, TVarEntryInfo& ent) override;
int resolveInOutComponent(EShLanguage /*stage*/, TVarEntryInfo& ent) override;
int resolveInOutIndex(EShLanguage /*stage*/, TVarEntryInfo& ent) override;
void addStage(EShLanguage stage) override {
if (stage < EShLangCount)
stageMask[stage] = true;
}
uint32_t computeTypeLocationSize(const TType& type, EShLanguage stage);
TSlotSetMap slots;
protected:
TDefaultIoResolverBase(TDefaultIoResolverBase&);
TDefaultIoResolverBase& operator=(TDefaultIoResolverBase&);
const TIntermediate& intermediate;
int nextUniformLocation;
int nextInputLocation;
int nextOutputLocation;
bool stageMask[EShLangCount + 1];
// Return descriptor set specific base if there is one, and the generic base otherwise.
int selectBaseBinding(int base, int descriptorSetBase) const {
return descriptorSetBase != -1 ? descriptorSetBase : base;
}
static int getLayoutSet(const glslang::TType& type) {
if (type.getQualifier().hasSet())
return type.getQualifier().layoutSet;
else
return 0;
}
static bool isSamplerType(const glslang::TType& type) {
return type.getBasicType() == glslang::EbtSampler && type.getSampler().isPureSampler();
}
static bool isTextureType(const glslang::TType& type) {
return (type.getBasicType() == glslang::EbtSampler &&
(type.getSampler().isTexture() || type.getSampler().isSubpass()));
}
static bool isUboType(const glslang::TType& type) {
return type.getQualifier().storage == EvqUniform;
}
static bool isImageType(const glslang::TType& type) {
return type.getBasicType() == glslang::EbtSampler && type.getSampler().isImage();
}
static bool isSsboType(const glslang::TType& type) {
return type.getQualifier().storage == EvqBuffer;
}
// Return true if this is a SRV (shader resource view) type:
static bool isSrvType(const glslang::TType& type) {
return isTextureType(type) || type.getQualifier().storage == EvqBuffer;
}
// Return true if this is a UAV (unordered access view) type:
static bool isUavType(const glslang::TType& type) {
if (type.getQualifier().isReadOnly())
return false;
return (type.getBasicType() == glslang::EbtSampler && type.getSampler().isImage()) ||
(type.getQualifier().storage == EvqBuffer);
}
};
// Defaulf I/O resolver for OpenGL
struct TDefaultGlslIoResolver : public TDefaultIoResolverBase {
public:
typedef std::map<TString, int> TVarSlotMap; // <resourceName, location/binding>
typedef std::map<int, TVarSlotMap> TSlotMap; // <resourceKey, TVarSlotMap>
TDefaultGlslIoResolver(const TIntermediate& intermediate);
bool validateBinding(EShLanguage /*stage*/, TVarEntryInfo& /*ent*/) override { return true; }
TResourceType getResourceType(const glslang::TType& type) override;
int resolveInOutLocation(EShLanguage stage, TVarEntryInfo& ent) override;
int resolveUniformLocation(EShLanguage /*stage*/, TVarEntryInfo& ent) override;
int resolveBinding(EShLanguage /*stage*/, TVarEntryInfo& ent) override;
void beginResolve(EShLanguage /*stage*/) override;
void endResolve(EShLanguage stage) override;
void beginCollect(EShLanguage) override;
void endCollect(EShLanguage) override;
void reserverStorageSlot(TVarEntryInfo& ent, TInfoSink& infoSink) override;
void reserverResourceSlot(TVarEntryInfo& ent, TInfoSink& infoSink) override;
// in/out symbol and uniform symbol are stored in the same resourceSlotMap, the storage key is used to identify each type of symbol.
// We use stage and storage qualifier to construct a storage key. it can help us identify the same storage resource used in different stage.
// if a resource is a program resource and we don't need know it usage stage, we can use same stage to build storage key.
// Note: both stage and type must less then 0xffff.
int buildStorageKey(EShLanguage stage, TStorageQualifier type) {
assert(static_cast<uint32_t>(stage) <= 0x0000ffff && static_cast<uint32_t>(type) <= 0x0000ffff);
return (stage << 16) | type;
}
protected:
// Use for mark pre stage, to get more interface symbol information.
EShLanguage preStage;
// Use for mark current shader stage for resolver
EShLanguage currentStage;
// Slot map for storage resource(location of uniform and interface symbol) It's a program share slot
TSlotMap resourceSlotMap;
// Slot map for other resource(image, ubo, ssbo), It's a program share slot.
TSlotMap storageSlotMap;
};
typedef std::map<TString, TVarEntryInfo> TVarLiveMap;
// override function "operator=", if a vector<const _Kty, _Ty> being sort,
// when use vc++, the sort function will call :
// pair& operator=(const pair<_Other1, _Other2>& _Right)
// {
// first = _Right.first;
// second = _Right.second;
// return (*this);
// }
// that will make a const type handing on left.
// override this function can avoid a compiler error.
// In the future, if the vc++ compiler can handle such a situation,
// this part of the code will be removed.
struct TVarLivePair : std::pair<const TString, TVarEntryInfo> {
TVarLivePair(std::pair<const TString, TVarEntryInfo>& _Right) : pair(_Right.first, _Right.second) {}
TVarLivePair& operator=(const TVarLivePair& _Right) {
const_cast<TString&>(first) = _Right.first;
second = _Right.second;
return (*this);
}
};
typedef std::vector<TVarLivePair> TVarLiveVector;
// I/O mapper
class TIoMapper {
public:
TIoMapper() {}
virtual ~TIoMapper() {}
// grow the reflection stage by stage
bool addStage(EShLanguage, TIntermediate&, TInfoSink&, TIoMapResolver*);
bool virtual addStage(EShLanguage, TIntermediate&, TInfoSink&, TIoMapResolver*);
bool virtual doMap(TIoMapResolver*, TInfoSink&) { return true; }
};
// I/O mapper for OpenGL
class TGlslIoMapper : public TIoMapper {
public:
TGlslIoMapper() {
memset(inVarMaps, 0, sizeof(TVarLiveMap*) * (EShLangCount + 1));
memset(outVarMaps, 0, sizeof(TVarLiveMap*) * (EShLangCount + 1));
memset(uniformVarMap, 0, sizeof(TVarLiveMap*) * (EShLangCount + 1));
memset(intermediates, 0, sizeof(TIntermediate*) * (EShLangCount + 1));
}
virtual ~TGlslIoMapper() {
for (size_t stage = 0; stage < EShLangCount; stage++) {
if (inVarMaps[stage] != nullptr) {
delete inVarMaps[stage];
inVarMaps[stage] = nullptr;
}
if (outVarMaps[stage] != nullptr) {
delete outVarMaps[stage];
outVarMaps[stage] = nullptr;
}
if (uniformVarMap[stage] != nullptr) {
delete uniformVarMap[stage];
uniformVarMap[stage] = nullptr;
}
if (intermediates[stage] != nullptr)
intermediates[stage] = nullptr;
}
}
// grow the reflection stage by stage
bool addStage(EShLanguage, TIntermediate&, TInfoSink&, TIoMapResolver*) override;
bool doMap(TIoMapResolver*, TInfoSink&) override;
TVarLiveMap *inVarMaps[EShLangCount], *outVarMaps[EShLangCount],
*uniformVarMap[EShLangCount];
TIntermediate* intermediates[EShLangCount];
bool hadError = false;
};
} // end namespace glslang
#endif // _IOMAPPER_INCLUDED
#endif // GLSLANG_WEB

View file

@ -187,12 +187,14 @@ bool TIndexTraverser::visitAggregate(TVisit /* visit */, TIntermAggregate* node)
//
void TParseContext::constantIndexExpressionCheck(TIntermNode* index)
{
#ifndef GLSLANG_WEB
TIndexTraverser it(inductiveLoopIds);
index->traverse(&it);
if (it.bad)
error(it.badLoc, "Non-constant-index-expression", "limitations", "");
#endif
}
} // end namespace glslang

View file

@ -56,8 +56,10 @@ namespace glslang {
//
void TIntermediate::error(TInfoSink& infoSink, const char* message)
{
#ifndef GLSLANG_WEB
infoSink.info.prefix(EPrefixError);
infoSink.info << "Linking " << StageName(language) << " stage: " << message << "\n";
#endif
++numErrors;
}
@ -65,8 +67,10 @@ void TIntermediate::error(TInfoSink& infoSink, const char* message)
// Link-time warning.
void TIntermediate::warn(TInfoSink& infoSink, const char* message)
{
#ifndef GLSLANG_WEB
infoSink.info.prefix(EPrefixWarning);
infoSink.info << "Linking " << StageName(language) << " stage: " << message << "\n";
#endif
}
// TODO: 4.4 offset/align: "Two blocks linked together in the same program with the same block
@ -78,9 +82,11 @@ void TIntermediate::warn(TInfoSink& infoSink, const char* message)
//
void TIntermediate::merge(TInfoSink& infoSink, TIntermediate& unit)
{
#ifndef GLSLANG_WEB
mergeCallGraphs(infoSink, unit);
mergeModes(infoSink, unit);
mergeTrees(infoSink, unit);
#endif
}
void TIntermediate::mergeCallGraphs(TInfoSink& infoSink, TIntermediate& unit)
@ -98,6 +104,8 @@ void TIntermediate::mergeCallGraphs(TInfoSink& infoSink, TIntermediate& unit)
callGraph.insert(callGraph.end(), unit.callGraph.begin(), unit.callGraph.end());
}
#ifndef GLSLANG_WEB
#define MERGE_MAX(member) member = std::max(member, unit.member)
#define MERGE_TRUE(member) if (unit.member) member = unit.member;
@ -106,9 +114,9 @@ void TIntermediate::mergeModes(TInfoSink& infoSink, TIntermediate& unit)
if (language != unit.language)
error(infoSink, "stages must match when linking into a single stage");
if (source == EShSourceNone)
source = unit.source;
if (source != unit.source)
if (getSource() == EShSourceNone)
setSource(unit.getSource());
if (getSource() != unit.getSource())
error(infoSink, "can't link compilation units from different source languages");
if (treeRoot == nullptr) {
@ -116,7 +124,7 @@ void TIntermediate::mergeModes(TInfoSink& infoSink, TIntermediate& unit)
version = unit.version;
requestedExtensions = unit.requestedExtensions;
} else {
if ((profile == EEsProfile) != (unit.profile == EEsProfile))
if ((isEsProfile()) != (unit.isEsProfile()))
error(infoSink, "Cannot cross link ES and desktop profiles");
else if (unit.profile == ECompatibilityProfile)
profile = ECompatibilityProfile;
@ -142,18 +150,13 @@ void TIntermediate::mergeModes(TInfoSink& infoSink, TIntermediate& unit)
if (vertices == TQualifier::layoutNotSet)
vertices = unit.vertices;
else if (vertices != unit.vertices) {
if (language == EShLangGeometry
#ifdef NV_EXTENSIONS
|| language == EShLangMeshNV
#endif
)
if (language == EShLangGeometry || language == EShLangMeshNV)
error(infoSink, "Contradictory layout max_vertices values");
else if (language == EShLangTessControl)
error(infoSink, "Contradictory layout vertices values");
else
assert(0);
}
#ifdef NV_EXTENSIONS
if (primitives == TQualifier::layoutNotSet)
primitives = unit.primitives;
else if (primitives != unit.primitives) {
@ -162,7 +165,6 @@ void TIntermediate::mergeModes(TInfoSink& infoSink, TIntermediate& unit)
else
assert(0);
}
#endif
if (inputPrimitive == ElgNone)
inputPrimitive = unit.inputPrimitive;
@ -190,12 +192,14 @@ void TIntermediate::mergeModes(TInfoSink& infoSink, TIntermediate& unit)
MERGE_TRUE(pointMode);
for (int i = 0; i < 3; ++i) {
if (localSize[i] > 1)
if (!localSizeNotDefault[i] && unit.localSizeNotDefault[i]) {
localSize[i] = unit.localSize[i];
localSizeNotDefault[i] = true;
}
else if (localSize[i] != unit.localSize[i])
error(infoSink, "Contradictory local size");
if (localSizeSpecId[i] != TQualifier::layoutNotSet)
if (localSizeSpecId[i] == TQualifier::layoutNotSet)
localSizeSpecId[i] = unit.localSizeSpecId[i];
else if (localSizeSpecId[i] != unit.localSizeSpecId[i])
error(infoSink, "Contradictory local size specialization ids");
@ -224,21 +228,16 @@ void TIntermediate::mergeModes(TInfoSink& infoSink, TIntermediate& unit)
xfbBuffers[b].implicitStride = std::max(xfbBuffers[b].implicitStride, unit.xfbBuffers[b].implicitStride);
if (unit.xfbBuffers[b].contains64BitType)
xfbBuffers[b].contains64BitType = true;
#ifdef AMD_EXTENSIONS
if (unit.xfbBuffers[b].contains32BitType)
xfbBuffers[b].contains32BitType = true;
if (unit.xfbBuffers[b].contains16BitType)
xfbBuffers[b].contains16BitType = true;
#endif
// TODO: 4.4 link: enhanced layouts: compare ranges
}
MERGE_TRUE(multiStream);
#ifdef NV_EXTENSIONS
MERGE_TRUE(layoutOverrideCoverage);
MERGE_TRUE(geoPassthroughEXT);
#endif
for (unsigned int i = 0; i < unit.shiftBinding.size(); ++i) {
if (unit.shiftBinding[i] > 0)
@ -287,13 +286,8 @@ void TIntermediate::mergeTrees(TInfoSink& infoSink, TIntermediate& unit)
}
// Getting this far means we have two existing trees to merge...
#ifdef NV_EXTENSIONS
numShaderRecordNVBlocks += unit.numShaderRecordNVBlocks;
#endif
#ifdef NV_EXTENSIONS
numTaskNVBlocks += unit.numTaskNVBlocks;
#endif
// Get the top-level globals of each unit
TIntermSequence& globals = treeRoot->getAsAggregate()->getSequence();
@ -315,6 +309,8 @@ void TIntermediate::mergeTrees(TInfoSink& infoSink, TIntermediate& unit)
ioAccessed.insert(unit.ioAccessed.begin(), unit.ioAccessed.end());
}
#endif
// Traverser that seeds an ID map with all built-ins, and tracks the
// maximum ID used.
// (It would be nice to put this in a function, but that causes warnings
@ -502,6 +498,7 @@ void TIntermediate::mergeImplicitArraySizes(TType& type, const TType& unitType)
//
void TIntermediate::mergeErrorCheck(TInfoSink& infoSink, const TIntermSymbol& symbol, const TIntermSymbol& unitSymbol, bool crossStage)
{
#ifndef GLSLANG_WEB
bool writeTypeComparison = false;
// Types have to match
@ -536,7 +533,7 @@ void TIntermediate::mergeErrorCheck(TInfoSink& infoSink, const TIntermSymbol& sy
}
// Precise...
if (! crossStage && symbol.getQualifier().noContraction != unitSymbol.getQualifier().noContraction) {
if (! crossStage && symbol.getQualifier().isNoContraction() != unitSymbol.getQualifier().isNoContraction()) {
error(infoSink, "Presence of precise qualifier must match:");
writeTypeComparison = true;
}
@ -545,9 +542,9 @@ void TIntermediate::mergeErrorCheck(TInfoSink& infoSink, const TIntermSymbol& sy
if (symbol.getQualifier().centroid != unitSymbol.getQualifier().centroid ||
symbol.getQualifier().smooth != unitSymbol.getQualifier().smooth ||
symbol.getQualifier().flat != unitSymbol.getQualifier().flat ||
symbol.getQualifier().sample != unitSymbol.getQualifier().sample ||
symbol.getQualifier().patch != unitSymbol.getQualifier().patch ||
symbol.getQualifier().nopersp != unitSymbol.getQualifier().nopersp) {
symbol.getQualifier().isSample()!= unitSymbol.getQualifier().isSample() ||
symbol.getQualifier().isPatch() != unitSymbol.getQualifier().isPatch() ||
symbol.getQualifier().isNonPerspective() != unitSymbol.getQualifier().isNonPerspective()) {
error(infoSink, "Interpolation and auxiliary storage qualifiers must match:");
writeTypeComparison = true;
}
@ -595,6 +592,7 @@ void TIntermediate::mergeErrorCheck(TInfoSink& infoSink, const TIntermSymbol& sy
if (writeTypeComparison)
infoSink.info << " " << symbol.getName() << ": \"" << symbol.getType().getCompleteString() << "\" versus \"" <<
unitSymbol.getType().getCompleteString() << "\"\n";
#endif
}
//
@ -609,15 +607,12 @@ void TIntermediate::finalCheck(TInfoSink& infoSink, bool keepUncalled)
return;
if (numEntryPoints < 1) {
if (source == EShSourceGlsl)
if (getSource() == EShSourceGlsl)
error(infoSink, "Missing entry point: Each stage requires one entry point");
else
warn(infoSink, "Entry point not found");
}
if (numPushConstants > 1)
error(infoSink, "Only one push_constant block is allowed per stage");
// recursion and missing body checking
checkCallGraphCycles(infoSink);
checkCallGraphBodies(infoSink, keepUncalled);
@ -625,6 +620,10 @@ void TIntermediate::finalCheck(TInfoSink& infoSink, bool keepUncalled)
// overlap/alias/missing I/O, etc.
inOutLocationCheck(infoSink);
#ifndef GLSLANG_WEB
if (getNumPushConstants() > 1)
error(infoSink, "Only one push_constant block is allowed per stage");
// invocations
if (invocations == TQualifier::layoutNotSet)
invocations = 1;
@ -642,12 +641,10 @@ void TIntermediate::finalCheck(TInfoSink& infoSink, bool keepUncalled)
for (size_t b = 0; b < xfbBuffers.size(); ++b) {
if (xfbBuffers[b].contains64BitType)
RoundToPow2(xfbBuffers[b].implicitStride, 8);
#ifdef AMD_EXTENSIONS
else if (xfbBuffers[b].contains32BitType)
RoundToPow2(xfbBuffers[b].implicitStride, 4);
else if (xfbBuffers[b].contains16BitType)
RoundToPow2(xfbBuffers[b].implicitStride, 2);
#endif
// "It is a compile-time or link-time error to have
// any xfb_offset that overflows xfb_stride, whether stated on declarations before or after the xfb_stride, or
@ -668,16 +665,11 @@ void TIntermediate::finalCheck(TInfoSink& infoSink, bool keepUncalled)
error(infoSink, "xfb_stride must be multiple of 8 for buffer holding a double or 64-bit integer:");
infoSink.info.prefix(EPrefixError);
infoSink.info << " xfb_buffer " << (unsigned int)b << ", xfb_stride " << xfbBuffers[b].stride << "\n";
#ifdef AMD_EXTENSIONS
} else if (xfbBuffers[b].contains32BitType && ! IsMultipleOfPow2(xfbBuffers[b].stride, 4)) {
#else
} else if (! IsMultipleOfPow2(xfbBuffers[b].stride, 4)) {
#endif
error(infoSink, "xfb_stride must be multiple of 4:");
infoSink.info.prefix(EPrefixError);
infoSink.info << " xfb_buffer " << (unsigned int)b << ", xfb_stride " << xfbBuffers[b].stride << "\n";
}
#ifdef AMD_EXTENSIONS
// "If the buffer is capturing any
// outputs with half-precision or 16-bit integer components, the stride must be a multiple of 2"
else if (xfbBuffers[b].contains16BitType && ! IsMultipleOfPow2(xfbBuffers[b].stride, 2)) {
@ -686,7 +678,6 @@ void TIntermediate::finalCheck(TInfoSink& infoSink, bool keepUncalled)
infoSink.info << " xfb_buffer " << (unsigned int)b << ", xfb_stride " << xfbBuffers[b].stride << "\n";
}
#endif
// "The resulting stride (implicit or explicit), when divided by 4, must be less than or equal to the
// implementation-dependent constant gl_MaxTransformFeedbackInterleavedComponents."
if (xfbBuffers[b].stride > (unsigned int)(4 * resources.maxTransformFeedbackInterleavedComponents)) {
@ -704,7 +695,7 @@ void TIntermediate::finalCheck(TInfoSink& infoSink, bool keepUncalled)
error(infoSink, "At least one shader must specify an output layout(vertices=...)");
break;
case EShLangTessEvaluation:
if (source == EShSourceGlsl) {
if (getSource() == EShSourceGlsl) {
if (inputPrimitive == ElgNone)
error(infoSink, "At least one shader must specify an input layout primitive");
if (vertexSpacing == EvsNone)
@ -730,8 +721,6 @@ void TIntermediate::finalCheck(TInfoSink& infoSink, bool keepUncalled)
break;
case EShLangCompute:
break;
#ifdef NV_EXTENSIONS
case EShLangRayGenNV:
case EShLangIntersectNV:
case EShLangAnyHitNV:
@ -764,8 +753,6 @@ void TIntermediate::finalCheck(TInfoSink& infoSink, bool keepUncalled)
if (numTaskNVBlocks > 1)
error(infoSink, "Only one taskNV interface block is allowed per shader");
break;
#endif
default:
error(infoSink, "Unknown Stage.");
break;
@ -787,6 +774,7 @@ void TIntermediate::finalCheck(TInfoSink& infoSink, bool keepUncalled)
} finalLinkTraverser;
treeRoot->traverse(&finalLinkTraverser);
#endif
}
//
@ -973,7 +961,7 @@ void TIntermediate::inOutLocationCheck(TInfoSink& infoSink)
}
}
if (profile == EEsProfile) {
if (isEsProfile()) {
if (numFragOut > 1 && fragOutWithNoLocation)
error(infoSink, "when more than one fragment shader output, all must have location qualifiers");
}
@ -1066,6 +1054,7 @@ int TIntermediate::addUsedLocation(const TQualifier& qualifier, const TType& typ
// So, for the case of dvec3, we need two independent ioRanges.
int collision = -1; // no collision
#ifndef GLSLANG_WEB
if (size == 2 && type.getBasicType() == EbtDouble && type.getVectorSize() == 3 &&
(qualifier.isPipeInput() || qualifier.isPipeOutput())) {
// Dealing with dvec3 in/out split across two locations.
@ -1092,7 +1081,9 @@ int TIntermediate::addUsedLocation(const TQualifier& qualifier, const TType& typ
if (collision < 0)
usedIo[set].push_back(range2);
}
} else {
} else
#endif
{
// Not a dvec3 in/out split across two locations, generic path.
// Need a single IO-range block.
@ -1106,10 +1097,10 @@ int TIntermediate::addUsedLocation(const TQualifier& qualifier, const TType& typ
}
// combine location and component ranges
TIoRange range(locationRange, componentRange, type.getBasicType(), qualifier.hasIndex() ? qualifier.layoutIndex : 0);
TIoRange range(locationRange, componentRange, type.getBasicType(), qualifier.hasIndex() ? qualifier.getIndex() : 0);
// check for collisions, except for vertex inputs on desktop targeting OpenGL
if (! (profile != EEsProfile && language == EShLangVertex && qualifier.isPipeInput()) || spvVersion.vulkan > 0)
if (! (!isEsProfile() && language == EShLangVertex && qualifier.isPipeInput()) || spvVersion.vulkan > 0)
collision = checkLocationRange(set, range, type, typeCollision);
if (collision < 0)
@ -1187,14 +1178,10 @@ int TIntermediate::computeTypeLocationSize(const TType& type, EShLanguage stage)
// TODO: perf: this can be flattened by using getCumulativeArraySize(), and a deref that discards all arrayness
// TODO: are there valid cases of having an unsized array with a location? If so, running this code too early.
TType elementType(type, 0);
if (type.isSizedArray()
#ifdef NV_EXTENSIONS
&& !type.getQualifier().isPerView()
#endif
)
if (type.isSizedArray() && !type.getQualifier().isPerView())
return type.getOuterArraySize() * computeTypeLocationSize(elementType, stage);
else {
#ifdef NV_EXTENSIONS
#ifndef GLSLANG_WEB
// unset perViewNV attributes for arrayed per-view outputs: "perviewNV vec4 v[MAX_VIEWS][3];"
elementType.getQualifier().perViewNV = false;
#endif
@ -1273,6 +1260,8 @@ int TIntermediate::computeTypeUniformLocationSize(const TType& type)
return 1;
}
#ifndef GLSLANG_WEB
// Accumulate xfb buffer ranges and check for collisions as the accumulation is done.
//
// Returns < 0 if no collision, >= 0 if collision and the value returned is a colliding value.
@ -1285,11 +1274,7 @@ int TIntermediate::addXfbBufferOffset(const TType& type)
TXfbBuffer& buffer = xfbBuffers[qualifier.layoutXfbBuffer];
// compute the range
#ifdef AMD_EXTENSIONS
unsigned int size = computeTypeXfbSize(type, buffer.contains64BitType, buffer.contains32BitType, buffer.contains16BitType);
#else
unsigned int size = computeTypeXfbSize(type, buffer.contains64BitType);
#endif
buffer.implicitStride = std::max(buffer.implicitStride, qualifier.layoutXfbOffset + size);
TRange range(qualifier.layoutXfbOffset, qualifier.layoutXfbOffset + size - 1);
@ -1309,15 +1294,10 @@ int TIntermediate::addXfbBufferOffset(const TType& type)
// Recursively figure out how many bytes of xfb buffer are used by the given type.
// Return the size of type, in bytes.
// Sets contains64BitType to true if the type contains a 64-bit data type.
#ifdef AMD_EXTENSIONS
// Sets contains32BitType to true if the type contains a 32-bit data type.
// Sets contains16BitType to true if the type contains a 16-bit data type.
// N.B. Caller must set contains64BitType, contains32BitType, and contains16BitType to false before calling.
unsigned int TIntermediate::computeTypeXfbSize(const TType& type, bool& contains64BitType, bool& contains32BitType, bool& contains16BitType) const
#else
// N.B. Caller must set contains64BitType to false before calling.
unsigned int TIntermediate::computeTypeXfbSize(const TType& type, bool& contains64BitType) const
#endif
{
// "...if applied to an aggregate containing a double or 64-bit integer, the offset must also be a multiple of 8,
// and the space taken in the buffer will be a multiple of 8.
@ -1330,44 +1310,32 @@ unsigned int TIntermediate::computeTypeXfbSize(const TType& type, bool& contains
// TODO: perf: this can be flattened by using getCumulativeArraySize(), and a deref that discards all arrayness
assert(type.isSizedArray());
TType elementType(type, 0);
#ifdef AMD_EXTENSIONS
return type.getOuterArraySize() * computeTypeXfbSize(elementType, contains64BitType, contains16BitType, contains16BitType);
#else
return type.getOuterArraySize() * computeTypeXfbSize(elementType, contains64BitType);
#endif
}
if (type.isStruct()) {
unsigned int size = 0;
bool structContains64BitType = false;
#ifdef AMD_EXTENSIONS
bool structContains32BitType = false;
bool structContains16BitType = false;
#endif
for (int member = 0; member < (int)type.getStruct()->size(); ++member) {
TType memberType(type, member);
// "... if applied to
// an aggregate containing a double or 64-bit integer, the offset must also be a multiple of 8,
// and the space taken in the buffer will be a multiple of 8."
bool memberContains64BitType = false;
#ifdef AMD_EXTENSIONS
bool memberContains32BitType = false;
bool memberContains16BitType = false;
int memberSize = computeTypeXfbSize(memberType, memberContains64BitType, memberContains32BitType, memberContains16BitType);
#else
int memberSize = computeTypeXfbSize(memberType, memberContains64BitType);
#endif
if (memberContains64BitType) {
structContains64BitType = true;
RoundToPow2(size, 8);
#ifdef AMD_EXTENSIONS
} else if (memberContains32BitType) {
structContains32BitType = true;
RoundToPow2(size, 4);
} else if (memberContains16BitType) {
structContains16BitType = true;
RoundToPow2(size, 2);
#endif
}
size += memberSize;
}
@ -1375,14 +1343,12 @@ unsigned int TIntermediate::computeTypeXfbSize(const TType& type, bool& contains
if (structContains64BitType) {
contains64BitType = true;
RoundToPow2(size, 8);
#ifdef AMD_EXTENSIONS
} else if (structContains32BitType) {
contains32BitType = true;
RoundToPow2(size, 4);
} else if (structContains16BitType) {
contains16BitType = true;
RoundToPow2(size, 2);
#endif
}
return size;
}
@ -1402,7 +1368,6 @@ unsigned int TIntermediate::computeTypeXfbSize(const TType& type, bool& contains
if (type.getBasicType() == EbtDouble || type.getBasicType() == EbtInt64 || type.getBasicType() == EbtUint64) {
contains64BitType = true;
return 8 * numComponents;
#ifdef AMD_EXTENSIONS
} else if (type.getBasicType() == EbtFloat16 || type.getBasicType() == EbtInt16 || type.getBasicType() == EbtUint16) {
contains16BitType = true;
return 2 * numComponents;
@ -1412,12 +1377,10 @@ unsigned int TIntermediate::computeTypeXfbSize(const TType& type, bool& contains
contains32BitType = true;
return 4 * numComponents;
}
#else
} else
return 4 * numComponents;
#endif
}
#endif
const int baseAlignmentVec4Std140 = 16;
// Return the size and alignment of a component of the given type.
@ -1425,6 +1388,10 @@ const int baseAlignmentVec4Std140 = 16;
// Return value is the alignment..
int TIntermediate::getBaseAlignmentScalar(const TType& type, int& size)
{
#ifdef GLSLANG_WEB
size = 4; return 4;
#endif
switch (type.getBasicType()) {
case EbtInt64:
case EbtUint64:
@ -1683,4 +1650,74 @@ int TIntermediate::getMemberAlignment(const TType& type, int& size, int& stride,
}
}
// shared calculation by getOffset and getOffsets
void TIntermediate::updateOffset(const TType& parentType, const TType& memberType, int& offset, int& memberSize)
{
int dummyStride;
// modify just the children's view of matrix layout, if there is one for this member
TLayoutMatrix subMatrixLayout = memberType.getQualifier().layoutMatrix;
int memberAlignment = getMemberAlignment(memberType, memberSize, dummyStride,
parentType.getQualifier().layoutPacking,
subMatrixLayout != ElmNone
? subMatrixLayout == ElmRowMajor
: parentType.getQualifier().layoutMatrix == ElmRowMajor);
RoundToPow2(offset, memberAlignment);
}
// Lookup or calculate the offset of a block member, using the recursively
// defined block offset rules.
int TIntermediate::getOffset(const TType& type, int index)
{
const TTypeList& memberList = *type.getStruct();
// Don't calculate offset if one is present, it could be user supplied
// and different than what would be calculated. That is, this is faster,
// but not just an optimization.
if (memberList[index].type->getQualifier().hasOffset())
return memberList[index].type->getQualifier().layoutOffset;
int memberSize = 0;
int offset = 0;
for (int m = 0; m <= index; ++m) {
updateOffset(type, *memberList[m].type, offset, memberSize);
if (m < index)
offset += memberSize;
}
return offset;
}
// Calculate the block data size.
// Block arrayness is not taken into account, each element is backed by a separate buffer.
int TIntermediate::getBlockSize(const TType& blockType)
{
const TTypeList& memberList = *blockType.getStruct();
int lastIndex = (int)memberList.size() - 1;
int lastOffset = getOffset(blockType, lastIndex);
int lastMemberSize;
int dummyStride;
getMemberAlignment(*memberList[lastIndex].type, lastMemberSize, dummyStride,
blockType.getQualifier().layoutPacking,
blockType.getQualifier().layoutMatrix == ElmRowMajor);
return lastOffset + lastMemberSize;
}
int TIntermediate::computeBufferReferenceTypeSize(const TType& type)
{
assert(type.isReference());
int size = getBlockSize(*type.getReferentType());
int align = type.getBufferReferenceAlignment();
if (align) {
size = (size + align - 1) & ~(align-1);
}
return size;
}
} // end namespace glslang

View file

@ -147,23 +147,19 @@ struct TOffsetRange {
TRange offset;
};
#ifndef GLSLANG_WEB
// Things that need to be tracked per xfb buffer.
struct TXfbBuffer {
#ifdef AMD_EXTENSIONS
TXfbBuffer() : stride(TQualifier::layoutXfbStrideEnd), implicitStride(0), contains64BitType(false),
contains32BitType(false), contains16BitType(false) { }
#else
TXfbBuffer() : stride(TQualifier::layoutXfbStrideEnd), implicitStride(0), contains64BitType(false) { }
#endif
std::vector<TRange> ranges; // byte offsets that have already been assigned
unsigned int stride;
unsigned int implicitStride;
bool contains64BitType;
#ifdef AMD_EXTENSIONS
bool contains32BitType;
bool contains16BitType;
#endif
};
#endif
// Track a set of strings describing how the module was processed.
// Using the form:
@ -217,7 +213,6 @@ class TSymbolTable;
class TSymbol;
class TVariable;
#ifdef NV_EXTENSIONS
//
// Texture and Sampler transformation mode.
//
@ -226,7 +221,6 @@ enum ComputeDerivativeMode {
LayoutDerivativeGroupQuads, // derivative_group_quadsNV
LayoutDerivativeGroupLinear, // derivative_group_linearNV
};
#endif
//
// Set of helper functions to help parse and build the tree.
@ -234,186 +228,59 @@ enum ComputeDerivativeMode {
class TIntermediate {
public:
explicit TIntermediate(EShLanguage l, int v = 0, EProfile p = ENoProfile) :
implicitThisName("@this"), implicitCounterName("@count"),
language(l), source(EShSourceNone), profile(p), version(v), treeRoot(0),
language(l),
profile(p), version(v), treeRoot(0),
numEntryPoints(0), numErrors(0), numPushConstants(0), recursive(false),
invertY(false),
useStorageBuffer(false),
nanMinMaxClamp(false),
depthReplacing(false)
#ifndef GLSLANG_WEB
,
implicitThisName("@this"), implicitCounterName("@count"),
source(EShSourceNone),
useVulkanMemoryModel(false),
invocations(TQualifier::layoutNotSet), vertices(TQualifier::layoutNotSet),
inputPrimitive(ElgNone), outputPrimitive(ElgNone),
pixelCenterInteger(false), originUpperLeft(false),
vertexSpacing(EvsNone), vertexOrder(EvoNone), pointMode(false), earlyFragmentTests(false),
postDepthCoverage(false), depthLayout(EldNone), depthReplacing(false),
vertexSpacing(EvsNone), vertexOrder(EvoNone), interlockOrdering(EioNone), pointMode(false), earlyFragmentTests(false),
postDepthCoverage(false), depthLayout(EldNone),
hlslFunctionality1(false),
blendEquations(0), xfbMode(false), multiStream(false),
#ifdef NV_EXTENSIONS
layoutOverrideCoverage(false),
geoPassthroughEXT(false),
numShaderRecordNVBlocks(0),
computeDerivativeMode(LayoutDerivativeNone),
primitives(TQualifier::layoutNotSet),
numTaskNVBlocks(0),
#endif
autoMapBindings(false),
autoMapLocations(false),
invertY(false),
flattenUniformArrays(false),
useUnknownFormat(false),
hlslOffsets(false),
useStorageBuffer(false),
useVulkanMemoryModel(false),
hlslIoMapping(false),
useVariablePointers(false),
textureSamplerTransformMode(EShTexSampTransKeep),
needToLegalize(false),
binaryDoubleOutput(false),
usePhysicalStorageBuffer(false),
uniformLocationBase(0)
#endif
{
localSize[0] = 1;
localSize[1] = 1;
localSize[2] = 1;
localSizeNotDefault[0] = false;
localSizeNotDefault[1] = false;
localSizeNotDefault[2] = false;
localSizeSpecId[0] = TQualifier::layoutNotSet;
localSizeSpecId[1] = TQualifier::layoutNotSet;
localSizeSpecId[2] = TQualifier::layoutNotSet;
#ifndef GLSLANG_WEB
xfbBuffers.resize(TQualifier::layoutXfbBufferEnd);
shiftBinding.fill(0);
#endif
}
void setLimits(const TBuiltInResource& r) { resources = r; }
bool postProcess(TIntermNode*, EShLanguage);
void output(TInfoSink&, bool tree);
void removeTree();
void setSource(EShSource s) { source = s; }
EShSource getSource() const { return source; }
void setEntryPointName(const char* ep)
{
entryPointName = ep;
processes.addProcess("entry-point");
processes.addArgument(entryPointName);
}
void setEntryPointMangledName(const char* ep) { entryPointMangledName = ep; }
const std::string& getEntryPointName() const { return entryPointName; }
const std::string& getEntryPointMangledName() const { return entryPointMangledName; }
void setShiftBinding(TResourceType res, unsigned int shift)
{
shiftBinding[res] = shift;
const char* name = getResourceName(res);
if (name != nullptr)
processes.addIfNonZero(name, shift);
}
unsigned int getShiftBinding(TResourceType res) const { return shiftBinding[res]; }
void setShiftBindingForSet(TResourceType res, unsigned int shift, unsigned int set)
{
if (shift == 0) // ignore if there's no shift: it's a no-op.
return;
shiftBindingForSet[res][set] = shift;
const char* name = getResourceName(res);
if (name != nullptr) {
processes.addProcess(name);
processes.addArgument(shift);
processes.addArgument(set);
}
}
int getShiftBindingForSet(TResourceType res, unsigned int set) const
{
const auto shift = shiftBindingForSet[res].find(set);
return shift == shiftBindingForSet[res].end() ? -1 : shift->second;
}
bool hasShiftBindingForSet(TResourceType res) const { return !shiftBindingForSet[res].empty(); }
void setResourceSetBinding(const std::vector<std::string>& shift)
{
resourceSetBinding = shift;
if (shift.size() > 0) {
processes.addProcess("resource-set-binding");
for (int s = 0; s < (int)shift.size(); ++s)
processes.addArgument(shift[s]);
}
}
const std::vector<std::string>& getResourceSetBinding() const { return resourceSetBinding; }
void setAutoMapBindings(bool map)
{
autoMapBindings = map;
if (autoMapBindings)
processes.addProcess("auto-map-bindings");
}
bool getAutoMapBindings() const { return autoMapBindings; }
void setAutoMapLocations(bool map)
{
autoMapLocations = map;
if (autoMapLocations)
processes.addProcess("auto-map-locations");
}
bool getAutoMapLocations() const { return autoMapLocations; }
void setInvertY(bool invert)
{
invertY = invert;
if (invertY)
processes.addProcess("invert-y");
}
bool getInvertY() const { return invertY; }
void setFlattenUniformArrays(bool flatten)
{
flattenUniformArrays = flatten;
if (flattenUniformArrays)
processes.addProcess("flatten-uniform-arrays");
}
bool getFlattenUniformArrays() const { return flattenUniformArrays; }
void setNoStorageFormat(bool b)
{
useUnknownFormat = b;
if (useUnknownFormat)
processes.addProcess("no-storage-format");
}
bool getNoStorageFormat() const { return useUnknownFormat; }
void setHlslOffsets()
{
hlslOffsets = true;
if (hlslOffsets)
processes.addProcess("hlsl-offsets");
}
bool usingHlslOffsets() const { return hlslOffsets; }
void setUseStorageBuffer()
{
useStorageBuffer = true;
processes.addProcess("use-storage-buffer");
}
bool usingStorageBuffer() const { return useStorageBuffer; }
void setHlslIoMapping(bool b)
{
hlslIoMapping = b;
if (hlslIoMapping)
processes.addProcess("hlsl-iomap");
}
bool usingHlslIoMapping() { return hlslIoMapping; }
void setUseVulkanMemoryModel()
{
useVulkanMemoryModel = true;
processes.addProcess("use-vulkan-memory-model");
}
bool usingVulkanMemoryModel() const { return useVulkanMemoryModel; }
void setUsePhysicalStorageBuffer()
{
usePhysicalStorageBuffer = true;
}
bool usingPhysicalStorageBuffer() const { return usePhysicalStorageBuffer; }
template<class T> T addCounterBufferName(const T& name) const { return name + implicitCounterName; }
bool hasCounterBufferName(const TString& name) const {
size_t len = strlen(implicitCounterName);
return name.size() > len &&
name.compare(name.size() - len, len, implicitCounterName) == 0;
}
void setTextureSamplerTransformMode(EShTextureSamplerTransformMode mode) { textureSamplerTransformMode = mode; }
void setVersion(int v) { version = v; }
int getVersion() const { return version; }
@ -444,6 +311,12 @@ public:
case EShTargetSpv_1_3:
processes.addProcess("target-env spirv1.3");
break;
case EShTargetSpv_1_4:
processes.addProcess("target-env spirv1.4");
break;
case EShTargetSpv_1_5:
processes.addProcess("target-env spirv1.5");
break;
default:
processes.addProcess("target-env spirvUnknown");
break;
@ -477,9 +350,35 @@ public:
int getNumEntryPoints() const { return numEntryPoints; }
int getNumErrors() const { return numErrors; }
void addPushConstantCount() { ++numPushConstants; }
#ifdef NV_EXTENSIONS
void addShaderRecordNVCount() { ++numShaderRecordNVBlocks; }
void addTaskNVCount() { ++numTaskNVBlocks; }
void setLimits(const TBuiltInResource& r) { resources = r; }
bool postProcess(TIntermNode*, EShLanguage);
void removeTree();
void setEntryPointName(const char* ep)
{
entryPointName = ep;
processes.addProcess("entry-point");
processes.addArgument(entryPointName);
}
void setEntryPointMangledName(const char* ep) { entryPointMangledName = ep; }
const std::string& getEntryPointName() const { return entryPointName; }
const std::string& getEntryPointMangledName() const { return entryPointMangledName; }
void setInvertY(bool invert)
{
invertY = invert;
if (invertY)
processes.addProcess("invert-y");
}
bool getInvertY() const { return invertY; }
#ifdef ENABLE_HLSL
void setSource(EShSource s) { source = s; }
EShSource getSource() const { return source; }
#else
void setSource(EShSource s) { assert(s == EShSourceGlsl); }
EShSource getSource() const { return EShSourceGlsl; }
#endif
bool isRecursive() const { return recursive; }
@ -488,9 +387,10 @@ public:
TIntermSymbol* addSymbol(const TVariable&, const TSourceLoc&);
TIntermSymbol* addSymbol(const TType&, const TSourceLoc&);
TIntermSymbol* addSymbol(const TIntermSymbol&);
TIntermTyped* addConversion(TOperator, const TType&, TIntermTyped*) const;
std::tuple<TIntermTyped*, TIntermTyped*> addConversion(TOperator op, TIntermTyped* node0, TIntermTyped* node1) const;
TIntermTyped* addConversion(TOperator, const TType&, TIntermTyped*);
std::tuple<TIntermTyped*, TIntermTyped*> addConversion(TOperator op, TIntermTyped* node0, TIntermTyped* node1);
TIntermTyped* addUniShapeConversion(TOperator, const TType&, TIntermTyped*);
TIntermTyped* addConversion(TBasicType convertTo, TIntermTyped* node) const;
void addBiShapeConversion(TOperator, TIntermTyped*& lhsNode, TIntermTyped*& rhsNode);
TIntermTyped* addShapeConversion(const TType&, TIntermTyped*);
TIntermTyped* addBinaryMath(TOperator, TIntermTyped* left, TIntermTyped* right, TSourceLoc);
@ -557,6 +457,169 @@ public:
void addSymbolLinkageNodes(TIntermAggregate*& linkage, EShLanguage, TSymbolTable&);
void addSymbolLinkageNode(TIntermAggregate*& linkage, const TSymbol&);
void setUseStorageBuffer()
{
useStorageBuffer = true;
processes.addProcess("use-storage-buffer");
}
bool usingStorageBuffer() const { return useStorageBuffer; }
void setDepthReplacing() { depthReplacing = true; }
bool isDepthReplacing() const { return depthReplacing; }
bool setLocalSize(int dim, int size)
{
if (localSizeNotDefault[dim])
return size == localSize[dim];
localSizeNotDefault[dim] = true;
localSize[dim] = size;
return true;
}
unsigned int getLocalSize(int dim) const { return localSize[dim]; }
bool setLocalSizeSpecId(int dim, int id)
{
if (localSizeSpecId[dim] != TQualifier::layoutNotSet)
return id == localSizeSpecId[dim];
localSizeSpecId[dim] = id;
return true;
}
int getLocalSizeSpecId(int dim) const { return localSizeSpecId[dim]; }
#ifdef GLSLANG_WEB
void output(TInfoSink&, bool tree) { }
bool isEsProfile() const { return false; }
bool getXfbMode() const { return false; }
bool isMultiStream() const { return false; }
TLayoutGeometry getOutputPrimitive() const { return ElgNone; }
bool getPostDepthCoverage() const { return false; }
bool getEarlyFragmentTests() const { return false; }
TLayoutDepth getDepth() const { return EldNone; }
bool getPixelCenterInteger() const { return false; }
void setOriginUpperLeft() { }
bool getOriginUpperLeft() const { return true; }
TInterlockOrdering getInterlockOrdering() const { return EioNone; }
bool getAutoMapBindings() const { return false; }
bool getAutoMapLocations() const { return false; }
int getNumPushConstants() const { return 0; }
void addShaderRecordNVCount() { }
void addTaskNVCount() { }
void setUseVulkanMemoryModel() { }
bool usingVulkanMemoryModel() const { return false; }
bool usingPhysicalStorageBuffer() const { return false; }
bool usingVariablePointers() const { return false; }
unsigned getXfbStride(int buffer) const { return 0; }
bool hasLayoutDerivativeModeNone() const { return false; }
ComputeDerivativeMode getLayoutDerivativeModeNone() const { return LayoutDerivativeNone; }
#else
void output(TInfoSink&, bool tree);
bool isEsProfile() const { return profile == EEsProfile; }
void setShiftBinding(TResourceType res, unsigned int shift)
{
shiftBinding[res] = shift;
const char* name = getResourceName(res);
if (name != nullptr)
processes.addIfNonZero(name, shift);
}
unsigned int getShiftBinding(TResourceType res) const { return shiftBinding[res]; }
void setShiftBindingForSet(TResourceType res, unsigned int shift, unsigned int set)
{
if (shift == 0) // ignore if there's no shift: it's a no-op.
return;
shiftBindingForSet[res][set] = shift;
const char* name = getResourceName(res);
if (name != nullptr) {
processes.addProcess(name);
processes.addArgument(shift);
processes.addArgument(set);
}
}
int getShiftBindingForSet(TResourceType res, unsigned int set) const
{
const auto shift = shiftBindingForSet[res].find(set);
return shift == shiftBindingForSet[res].end() ? -1 : shift->second;
}
bool hasShiftBindingForSet(TResourceType res) const { return !shiftBindingForSet[res].empty(); }
void setResourceSetBinding(const std::vector<std::string>& shift)
{
resourceSetBinding = shift;
if (shift.size() > 0) {
processes.addProcess("resource-set-binding");
for (int s = 0; s < (int)shift.size(); ++s)
processes.addArgument(shift[s]);
}
}
const std::vector<std::string>& getResourceSetBinding() const { return resourceSetBinding; }
void setAutoMapBindings(bool map)
{
autoMapBindings = map;
if (autoMapBindings)
processes.addProcess("auto-map-bindings");
}
bool getAutoMapBindings() const { return autoMapBindings; }
void setAutoMapLocations(bool map)
{
autoMapLocations = map;
if (autoMapLocations)
processes.addProcess("auto-map-locations");
}
bool getAutoMapLocations() const { return autoMapLocations; }
#ifdef ENABLE_HLSL
void setFlattenUniformArrays(bool flatten)
{
flattenUniformArrays = flatten;
if (flattenUniformArrays)
processes.addProcess("flatten-uniform-arrays");
}
bool getFlattenUniformArrays() const { return flattenUniformArrays; }
#endif
void setNoStorageFormat(bool b)
{
useUnknownFormat = b;
if (useUnknownFormat)
processes.addProcess("no-storage-format");
}
bool getNoStorageFormat() const { return useUnknownFormat; }
void setUseVulkanMemoryModel()
{
useVulkanMemoryModel = true;
processes.addProcess("use-vulkan-memory-model");
}
bool usingVulkanMemoryModel() const { return useVulkanMemoryModel; }
void setUsePhysicalStorageBuffer()
{
usePhysicalStorageBuffer = true;
}
bool usingPhysicalStorageBuffer() const { return usePhysicalStorageBuffer; }
void setUseVariablePointers()
{
useVariablePointers = true;
processes.addProcess("use-variable-pointers");
}
bool usingVariablePointers() const { return useVariablePointers; }
#ifdef ENABLE_HLSL
template<class T> T addCounterBufferName(const T& name) const { return name + implicitCounterName; }
bool hasCounterBufferName(const TString& name) const {
size_t len = strlen(implicitCounterName);
return name.size() > len &&
name.compare(name.size() - len, len, implicitCounterName) == 0;
}
#endif
void setTextureSamplerTransformMode(EShTextureSamplerTransformMode mode) { textureSamplerTransformMode = mode; }
int getNumPushConstants() const { return numPushConstants; }
void addShaderRecordNVCount() { ++numShaderRecordNVBlocks; }
void addTaskNVCount() { ++numTaskNVBlocks; }
bool setInvocations(int i)
{
if (invocations != TQualifier::layoutNotSet)
@ -600,23 +663,14 @@ public:
void setPointMode() { pointMode = true; }
bool getPointMode() const { return pointMode; }
bool setLocalSize(int dim, int size)
bool setInterlockOrdering(TInterlockOrdering o)
{
if (localSize[dim] > 1)
return size == localSize[dim];
localSize[dim] = size;
if (interlockOrdering != EioNone)
return interlockOrdering == o;
interlockOrdering = o;
return true;
}
unsigned int getLocalSize(int dim) const { return localSize[dim]; }
bool setLocalSizeSpecId(int dim, int id)
{
if (localSizeSpecId[dim] != TQualifier::layoutNotSet)
return id == localSizeSpecId[dim];
localSizeSpecId[dim] = id;
return true;
}
int getLocalSizeSpecId(int dim) const { return localSizeSpecId[dim]; }
TInterlockOrdering getInterlockOrdering() const { return interlockOrdering; }
void setXfbMode() { xfbMode = true; }
bool getXfbMode() const { return xfbMode; }
@ -630,14 +684,10 @@ public:
return true;
}
TLayoutGeometry getOutputPrimitive() const { return outputPrimitive; }
void setOriginUpperLeft() { originUpperLeft = true; }
bool getOriginUpperLeft() const { return originUpperLeft; }
void setPixelCenterInteger() { pixelCenterInteger = true; }
bool getPixelCenterInteger() const { return pixelCenterInteger; }
void setEarlyFragmentTests() { earlyFragmentTests = true; }
bool getEarlyFragmentTests() const { return earlyFragmentTests; }
void setPostDepthCoverage() { postDepthCoverage = true; }
bool getPostDepthCoverage() const { return postDepthCoverage; }
void setEarlyFragmentTests() { earlyFragmentTests = true; }
bool getEarlyFragmentTests() const { return earlyFragmentTests; }
bool setDepth(TLayoutDepth d)
{
if (depthLayout != EldNone)
@ -646,29 +696,12 @@ public:
return true;
}
TLayoutDepth getDepth() const { return depthLayout; }
void setDepthReplacing() { depthReplacing = true; }
bool isDepthReplacing() const { return depthReplacing; }
void setHlslFunctionality1() { hlslFunctionality1 = true; }
bool getHlslFunctionality1() const { return hlslFunctionality1; }
void setOriginUpperLeft() { originUpperLeft = true; }
bool getOriginUpperLeft() const { return originUpperLeft; }
void setPixelCenterInteger() { pixelCenterInteger = true; }
bool getPixelCenterInteger() const { return pixelCenterInteger; }
void addBlendEquation(TBlendEquationShift b) { blendEquations |= (1 << b); }
unsigned int getBlendEquations() const { return blendEquations; }
void addToCallGraph(TInfoSink&, const TString& caller, const TString& callee);
void merge(TInfoSink&, TIntermediate&);
void finalCheck(TInfoSink&, bool keepUncalled);
void addIoAccessed(const TString& name) { ioAccessed.insert(name); }
bool inIoAccessed(const TString& name) const { return ioAccessed.find(name) != ioAccessed.end(); }
int addUsedLocation(const TQualifier&, const TType&, bool& typeCollision);
int checkLocationRange(int set, const TIoRange& range, const TType&, bool& typeCollision);
int addUsedOffsets(int binding, int offset, int numOffsets);
bool addUsedConstantId(int id);
static int computeTypeLocationSize(const TType&, EShLanguage);
static int computeTypeUniformLocationSize(const TType&);
bool setXfbBufferStride(int buffer, unsigned stride)
{
if (xfbBuffers[buffer].stride != TQualifier::layoutXfbStrideEnd)
@ -678,24 +711,14 @@ public:
}
unsigned getXfbStride(int buffer) const { return xfbBuffers[buffer].stride; }
int addXfbBufferOffset(const TType&);
#ifdef AMD_EXTENSIONS
unsigned int computeTypeXfbSize(const TType&, bool& contains64BitType, bool& contains32BitType, bool& contains16BitType) const;
#else
unsigned int computeTypeXfbSize(const TType&, bool& contains64BitType) const;
#endif
static int getBaseAlignmentScalar(const TType&, int& size);
static int getBaseAlignment(const TType&, int& size, int& stride, TLayoutPacking layoutPacking, bool rowMajor);
static int getScalarAlignment(const TType&, int& size, int& stride, bool rowMajor);
static int getMemberAlignment(const TType&, int& size, int& stride, TLayoutPacking layoutPacking, bool rowMajor);
static bool improperStraddle(const TType& type, int size, int offset);
bool promote(TIntermOperator*);
#ifdef NV_EXTENSIONS
void setLayoutOverrideCoverage() { layoutOverrideCoverage = true; }
bool getLayoutOverrideCoverage() const { return layoutOverrideCoverage; }
void setGeoPassthroughEXT() { geoPassthroughEXT = true; }
bool getGeoPassthroughEXT() const { return geoPassthroughEXT; }
void setLayoutDerivativeMode(ComputeDerivativeMode mode) { computeDerivativeMode = mode; }
bool hasLayoutDerivativeModeNone() const { return computeDerivativeMode != LayoutDerivativeNone; }
ComputeDerivativeMode getLayoutDerivativeModeNone() const { return computeDerivativeMode; }
bool setPrimitives(int m)
{
@ -705,28 +728,10 @@ public:
return true;
}
int getPrimitives() const { return primitives; }
#endif
const char* addSemanticName(const TString& name)
{
return semanticNameSet.insert(name).first->c_str();
}
void setSourceFile(const char* file) { if (file != nullptr) sourceFile = file; }
const std::string& getSourceFile() const { return sourceFile; }
void addSourceText(const char* text, size_t len) { sourceText.append(text, len); }
const std::string& getSourceText() const { return sourceText; }
const std::map<std::string, std::string>& getIncludeText() const { return includeText; }
void addIncludeText(const char* name, const char* text, size_t len) { includeText[name].assign(text,len); }
void addProcesses(const std::vector<std::string>& p)
{
for (int i = 0; i < (int)p.size(); ++i)
processes.addProcess(p[i]);
}
void addProcess(const std::string& process) { processes.addProcess(process); }
void addProcessArgument(const std::string& arg) { processes.addArgument(arg); }
const std::vector<std::string>& getProcesses() const { return processes.getProcesses(); }
void addUniformLocationOverride(const char* nameStr, int location)
{
std::string name = nameStr;
@ -751,9 +756,98 @@ public:
void setBinaryDoubleOutput() { binaryDoubleOutput = true; }
bool getBinaryDoubleOutput() { return binaryDoubleOutput; }
#endif // GLSLANG_WEB
const char* const implicitThisName;
const char* const implicitCounterName;
#ifdef ENABLE_HLSL
void setHlslFunctionality1() { hlslFunctionality1 = true; }
bool getHlslFunctionality1() const { return hlslFunctionality1; }
void setHlslOffsets()
{
hlslOffsets = true;
if (hlslOffsets)
processes.addProcess("hlsl-offsets");
}
bool usingHlslOffsets() const { return hlslOffsets; }
void setHlslIoMapping(bool b)
{
hlslIoMapping = b;
if (hlslIoMapping)
processes.addProcess("hlsl-iomap");
}
bool usingHlslIoMapping() { return hlslIoMapping; }
#else
bool getHlslFunctionality1() const { return false; }
bool usingHlslOffsets() const { return false; }
bool usingHlslIoMapping() { return false; }
#endif
void addToCallGraph(TInfoSink&, const TString& caller, const TString& callee);
void merge(TInfoSink&, TIntermediate&);
void finalCheck(TInfoSink&, bool keepUncalled);
bool buildConvertOp(TBasicType dst, TBasicType src, TOperator& convertOp) const;
TIntermTyped* createConversion(TBasicType convertTo, TIntermTyped* node) const;
void addIoAccessed(const TString& name) { ioAccessed.insert(name); }
bool inIoAccessed(const TString& name) const { return ioAccessed.find(name) != ioAccessed.end(); }
int addUsedLocation(const TQualifier&, const TType&, bool& typeCollision);
int checkLocationRange(int set, const TIoRange& range, const TType&, bool& typeCollision);
int addUsedOffsets(int binding, int offset, int numOffsets);
bool addUsedConstantId(int id);
static int computeTypeLocationSize(const TType&, EShLanguage);
static int computeTypeUniformLocationSize(const TType&);
static int getBaseAlignmentScalar(const TType&, int& size);
static int getBaseAlignment(const TType&, int& size, int& stride, TLayoutPacking layoutPacking, bool rowMajor);
static int getScalarAlignment(const TType&, int& size, int& stride, bool rowMajor);
static int getMemberAlignment(const TType&, int& size, int& stride, TLayoutPacking layoutPacking, bool rowMajor);
static bool improperStraddle(const TType& type, int size, int offset);
static void updateOffset(const TType& parentType, const TType& memberType, int& offset, int& memberSize);
static int getOffset(const TType& type, int index);
static int getBlockSize(const TType& blockType);
static int computeBufferReferenceTypeSize(const TType&);
bool promote(TIntermOperator*);
void setNanMinMaxClamp(bool setting) { nanMinMaxClamp = setting; }
bool getNanMinMaxClamp() const { return nanMinMaxClamp; }
void setSourceFile(const char* file) { if (file != nullptr) sourceFile = file; }
const std::string& getSourceFile() const { return sourceFile; }
void addSourceText(const char* text, size_t len) { sourceText.append(text, len); }
const std::string& getSourceText() const { return sourceText; }
const std::map<std::string, std::string>& getIncludeText() const { return includeText; }
void addIncludeText(const char* name, const char* text, size_t len) { includeText[name].assign(text,len); }
void addProcesses(const std::vector<std::string>& p)
{
for (int i = 0; i < (int)p.size(); ++i)
processes.addProcess(p[i]);
}
void addProcess(const std::string& process) { processes.addProcess(process); }
void addProcessArgument(const std::string& arg) { processes.addArgument(arg); }
const std::vector<std::string>& getProcesses() const { return processes.getProcesses(); }
// Certain explicit conversions are allowed conditionally
#ifdef GLSLANG_WEB
bool getArithemeticInt8Enabled() const { return false; }
bool getArithemeticInt16Enabled() const { return false; }
bool getArithemeticFloat16Enabled() const { return false; }
#else
bool getArithemeticInt8Enabled() const {
return extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types) ||
extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_int8);
}
bool getArithemeticInt16Enabled() const {
return extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types) ||
extensionRequested(E_GL_AMD_gpu_shader_int16) ||
extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_int16);
}
bool getArithemeticFloat16Enabled() const {
return extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types) ||
extensionRequested(E_GL_AMD_gpu_shader_half_float) ||
extensionRequested(E_GL_EXT_shader_explicit_arithmetic_types_float16);
}
#endif
protected:
TIntermSymbol* addSymbol(int Id, const TString&, const TType&, const TConstUnionArray&, TIntermTyped* subtree, const TSourceLoc&);
@ -784,13 +878,21 @@ protected:
bool specConstantPropagates(const TIntermTyped&, const TIntermTyped&);
void performTextureUpgradeAndSamplerRemovalTransformation(TIntermNode* root);
bool isConversionAllowed(TOperator op, TIntermTyped* node) const;
TIntermTyped* createConversion(TBasicType convertTo, TIntermTyped* node) const;
std::tuple<TBasicType, TBasicType> getConversionDestinatonType(TBasicType type0, TBasicType type1, TOperator op) const;
// JohnK: I think this function should go away.
// This data structure is just a log to pass on to back ends.
// Versioning and extensions are handled in Version.cpp, with a rich
// set of functions for querying stages, versions, extension enable/disabled, etc.
#ifdef GLSLANG_WEB
bool extensionRequested(const char *extension) const { return false; }
#else
bool extensionRequested(const char *extension) const {return requestedExtensions.find(extension) != requestedExtensions.end();}
#endif
static const char* getResourceName(TResourceType);
const EShLanguage language; // stage, known at construction time
EShSource source; // source language, known a bit later
std::string entryPointName;
std::string entryPointMangledName;
typedef std::list<TCall> TGraph;
@ -806,6 +908,20 @@ protected:
int numErrors;
int numPushConstants;
bool recursive;
bool invertY;
bool useStorageBuffer;
bool nanMinMaxClamp; // true if desiring min/max/clamp to favor non-NaN over NaN
bool depthReplacing;
int localSize[3];
bool localSizeNotDefault[3];
int localSizeSpecId[3];
#ifndef GLSLANG_WEB
public:
const char* const implicitThisName;
const char* const implicitCounterName;
protected:
EShSource source; // source language, known a bit later
bool useVulkanMemoryModel;
int invocations;
int vertices;
TLayoutGeometry inputPrimitive;
@ -814,27 +930,22 @@ protected:
bool originUpperLeft;
TVertexSpacing vertexSpacing;
TVertexOrder vertexOrder;
TInterlockOrdering interlockOrdering;
bool pointMode;
int localSize[3];
int localSizeSpecId[3];
bool earlyFragmentTests;
bool postDepthCoverage;
TLayoutDepth depthLayout;
bool depthReplacing;
bool hlslFunctionality1;
int blendEquations; // an 'or'ing of masks of shifts of TBlendEquationShift
bool xfbMode;
std::vector<TXfbBuffer> xfbBuffers; // all the data we need to track per xfb buffer
bool multiStream;
#ifdef NV_EXTENSIONS
bool layoutOverrideCoverage;
bool geoPassthroughEXT;
int numShaderRecordNVBlocks;
ComputeDerivativeMode computeDerivativeMode;
int primitives;
int numTaskNVBlocks;
#endif
// Base shift values
std::array<unsigned int, EResCount> shiftBinding;
@ -845,22 +956,29 @@ protected:
std::vector<std::string> resourceSetBinding;
bool autoMapBindings;
bool autoMapLocations;
bool invertY;
bool flattenUniformArrays;
bool useUnknownFormat;
bool hlslOffsets;
bool useStorageBuffer;
bool useVulkanMemoryModel;
bool hlslIoMapping;
bool useVariablePointers;
std::set<TString> ioAccessed; // set of names of statically read/written I/O that might need extra checking
std::vector<TIoRange> usedIo[4]; // sets of used locations, one for each of in, out, uniform, and buffers
std::vector<TOffsetRange> usedAtomics; // sets of bindings used by atomic counters
std::unordered_set<int> usedConstantId; // specialization constant ids used
std::set<TString> semanticNameSet;
EShTextureSamplerTransformMode textureSamplerTransformMode;
bool needToLegalize;
bool binaryDoubleOutput;
bool usePhysicalStorageBuffer;
std::unordered_map<std::string, int> uniformLocationOverrides;
int uniformLocationBase;
#endif
std::unordered_set<int> usedConstantId; // specialization constant ids used
std::vector<TOffsetRange> usedAtomics; // sets of bindings used by atomic counters
std::vector<TIoRange> usedIo[4]; // sets of used locations, one for each of in, out, uniform, and buffers
// set of names of statically read/written I/O that might need extra checking
std::set<TString> ioAccessed;
// source code of shader, useful as part of debug information
std::string sourceFile;
std::string sourceText;
@ -871,13 +989,6 @@ protected:
// for OpModuleProcessed, or equivalent
TProcesses processes;
bool needToLegalize;
bool binaryDoubleOutput;
bool usePhysicalStorageBuffer;
std::unordered_map<std::string, int> uniformLocationOverrides;
int uniformLocationBase;
private:
void operator=(TIntermediate&); // prevent assignments
};

View file

@ -57,26 +57,91 @@ public:
TParseVersions(TIntermediate& interm, int version, EProfile profile,
const SpvVersion& spvVersion, EShLanguage language, TInfoSink& infoSink,
bool forwardCompatible, EShMessages messages)
: infoSink(infoSink), version(version), profile(profile), language(language),
spvVersion(spvVersion), forwardCompatible(forwardCompatible),
intermediate(interm), messages(messages), numErrors(0), currentScanner(0) { }
:
#ifndef GLSLANG_WEB
forwardCompatible(forwardCompatible),
profile(profile),
#endif
infoSink(infoSink), version(version),
language(language),
spvVersion(spvVersion),
intermediate(interm), messages(messages), numErrors(0), currentScanner(0) { }
virtual ~TParseVersions() { }
void requireStage(const TSourceLoc&, EShLanguageMask, const char* featureDesc);
void requireStage(const TSourceLoc&, EShLanguage, const char* featureDesc);
#ifdef GLSLANG_WEB
const EProfile profile = EEsProfile;
bool isEsProfile() const { return true; }
void requireProfile(const TSourceLoc& loc, int profileMask, const char* featureDesc)
{
if (! (EEsProfile & profileMask))
error(loc, "not supported with this profile:", featureDesc, ProfileName(profile));
}
void profileRequires(const TSourceLoc& loc, int profileMask, int minVersion, int numExtensions,
const char* const extensions[], const char* featureDesc)
{
if ((EEsProfile & profileMask) && (minVersion == 0 || version < minVersion))
error(loc, "not supported for this version or the enabled extensions", featureDesc, "");
}
void profileRequires(const TSourceLoc& loc, int profileMask, int minVersion, const char* extension,
const char* featureDesc)
{
profileRequires(loc, profileMask, minVersion, extension ? 1 : 0, &extension, featureDesc);
}
void initializeExtensionBehavior() { }
void checkDeprecated(const TSourceLoc&, int queryProfiles, int depVersion, const char* featureDesc) { }
void requireNotRemoved(const TSourceLoc&, int queryProfiles, int removedVersion, const char* featureDesc) { }
void requireExtensions(const TSourceLoc&, int numExtensions, const char* const extensions[],
const char* featureDesc) { }
void ppRequireExtensions(const TSourceLoc&, int numExtensions, const char* const extensions[],
const char* featureDesc) { }
TExtensionBehavior getExtensionBehavior(const char*) { return EBhMissing; }
bool extensionTurnedOn(const char* const extension) { return false; }
bool extensionsTurnedOn(int numExtensions, const char* const extensions[]) { return false; }
void updateExtensionBehavior(int line, const char* const extension, const char* behavior) { }
void updateExtensionBehavior(const char* const extension, TExtensionBehavior) { }
void checkExtensionStage(const TSourceLoc&, const char* const extension) { }
void fullIntegerCheck(const TSourceLoc&, const char* op) { }
void doubleCheck(const TSourceLoc&, const char* op) { }
bool float16Arithmetic() { return false; }
void requireFloat16Arithmetic(const TSourceLoc& loc, const char* op, const char* featureDesc) { }
bool int16Arithmetic() { return false; }
void requireInt16Arithmetic(const TSourceLoc& loc, const char* op, const char* featureDesc) { }
bool int8Arithmetic() { return false; }
void requireInt8Arithmetic(const TSourceLoc& loc, const char* op, const char* featureDesc) { }
void int64Check(const TSourceLoc&, const char* op, bool builtIn = false) { }
void explicitFloat32Check(const TSourceLoc&, const char* op, bool builtIn = false) { }
void explicitFloat64Check(const TSourceLoc&, const char* op, bool builtIn = false) { }
bool relaxedErrors() const { return false; }
bool suppressWarnings() const { return true; }
bool isForwardCompatible() const { return false; }
#else
bool forwardCompatible; // true if errors are to be given for use of deprecated features
EProfile profile; // the declared profile in the shader (core by default)
bool isEsProfile() const { return profile == EEsProfile; }
void requireProfile(const TSourceLoc& loc, int profileMask, const char* featureDesc);
void profileRequires(const TSourceLoc& loc, int profileMask, int minVersion, int numExtensions,
const char* const extensions[], const char* featureDesc);
void profileRequires(const TSourceLoc& loc, int profileMask, int minVersion, const char* extension,
const char* featureDesc);
virtual void initializeExtensionBehavior();
virtual void requireProfile(const TSourceLoc&, int queryProfiles, const char* featureDesc);
virtual void profileRequires(const TSourceLoc&, int queryProfiles, int minVersion, int numExtensions, const char* const extensions[], const char* featureDesc);
virtual void profileRequires(const TSourceLoc&, int queryProfiles, int minVersion, const char* const extension, const char* featureDesc);
virtual void requireStage(const TSourceLoc&, EShLanguageMask, const char* featureDesc);
virtual void requireStage(const TSourceLoc&, EShLanguage, const char* featureDesc);
virtual void checkDeprecated(const TSourceLoc&, int queryProfiles, int depVersion, const char* featureDesc);
virtual void requireNotRemoved(const TSourceLoc&, int queryProfiles, int removedVersion, const char* featureDesc);
virtual void unimplemented(const TSourceLoc&, const char* featureDesc);
virtual void requireExtensions(const TSourceLoc&, int numExtensions, const char* const extensions[], const char* featureDesc);
virtual void ppRequireExtensions(const TSourceLoc&, int numExtensions, const char* const extensions[], const char* featureDesc);
virtual void requireExtensions(const TSourceLoc&, int numExtensions, const char* const extensions[],
const char* featureDesc);
virtual void ppRequireExtensions(const TSourceLoc&, int numExtensions, const char* const extensions[],
const char* featureDesc);
virtual TExtensionBehavior getExtensionBehavior(const char*);
virtual bool extensionTurnedOn(const char* const extension);
virtual bool extensionsTurnedOn(int numExtensions, const char* const extensions[]);
virtual void updateExtensionBehavior(int line, const char* const extension, const char* behavior);
virtual void updateExtensionBehavior(const char* const extension, TExtensionBehavior);
virtual bool checkExtensionsRequested(const TSourceLoc&, int numExtensions, const char* const extensions[],
const char* featureDesc);
virtual void checkExtensionStage(const TSourceLoc&, const char* const extension);
virtual void fullIntegerCheck(const TSourceLoc&, const char* op);
virtual void unimplemented(const TSourceLoc&, const char* featureDesc);
virtual void doubleCheck(const TSourceLoc&, const char* op);
virtual void float16Check(const TSourceLoc&, const char* op, bool builtIn = false);
virtual void float16ScalarVectorCheck(const TSourceLoc&, const char* op, bool builtIn = false);
@ -88,23 +153,35 @@ public:
virtual void int8ScalarVectorCheck(const TSourceLoc&, const char* op, bool builtIn = false);
virtual bool int8Arithmetic();
virtual void requireInt8Arithmetic(const TSourceLoc& loc, const char* op, const char* featureDesc);
#ifdef AMD_EXTENSIONS
virtual void float16OpaqueCheck(const TSourceLoc&, const char* op, bool builtIn = false);
#endif
virtual void int64Check(const TSourceLoc&, const char* op, bool builtIn = false);
virtual void explicitInt8Check(const TSourceLoc&, const char* op, bool builtIn = false);
virtual void explicitInt16Check(const TSourceLoc&, const char* op, bool builtIn = false);
virtual void explicitInt32Check(const TSourceLoc&, const char* op, bool builtIn = false);
virtual void explicitFloat32Check(const TSourceLoc&, const char* op, bool builtIn = false);
virtual void explicitFloat64Check(const TSourceLoc&, const char* op, bool builtIn = false);
virtual void fcoopmatCheck(const TSourceLoc&, const char* op, bool builtIn = false);
virtual void intcoopmatCheck(const TSourceLoc&, const char *op, bool builtIn = false);
bool relaxedErrors() const { return (messages & EShMsgRelaxedErrors) != 0; }
bool suppressWarnings() const { return (messages & EShMsgSuppressWarnings) != 0; }
bool isForwardCompatible() const { return forwardCompatible; }
#endif // GLSLANG_WEB
virtual void spvRemoved(const TSourceLoc&, const char* op);
virtual void vulkanRemoved(const TSourceLoc&, const char* op);
virtual void requireVulkan(const TSourceLoc&, const char* op);
virtual void requireSpv(const TSourceLoc&, const char* op);
virtual bool checkExtensionsRequested(const TSourceLoc&, int numExtensions, const char* const extensions[], const char* featureDesc);
virtual void updateExtensionBehavior(const char* const extension, TExtensionBehavior);
virtual void checkExtensionStage(const TSourceLoc&, const char* const extension);
#if defined(GLSLANG_WEB) && !defined(GLSLANG_WEB_DEVEL)
void C_DECL error(const TSourceLoc&, const char* szReason, const char* szToken,
const char* szExtraInfoFormat, ...) { addError(); }
void C_DECL warn(const TSourceLoc&, const char* szReason, const char* szToken,
const char* szExtraInfoFormat, ...) { }
void C_DECL ppError(const TSourceLoc&, const char* szReason, const char* szToken,
const char* szExtraInfoFormat, ...) { addError(); }
void C_DECL ppWarn(const TSourceLoc&, const char* szReason, const char* szToken,
const char* szExtraInfoFormat, ...) { }
#else
virtual void C_DECL error(const TSourceLoc&, const char* szReason, const char* szToken,
const char* szExtraInfoFormat, ...) = 0;
virtual void C_DECL warn(const TSourceLoc&, const char* szReason, const char* szToken,
@ -113,6 +190,7 @@ public:
const char* szExtraInfoFormat, ...) = 0;
virtual void C_DECL ppWarn(const TSourceLoc&, const char* szReason, const char* szToken,
const char* szExtraInfoFormat, ...) = 0;
#endif
void addError() { ++numErrors; }
int getNumErrors() const { return numErrors; }
@ -126,20 +204,20 @@ public:
void setCurrentString(int string) { currentScanner->setString(string); }
void getPreamble(std::string&);
bool relaxedErrors() const { return (messages & EShMsgRelaxedErrors) != 0; }
bool suppressWarnings() const { return (messages & EShMsgSuppressWarnings) != 0; }
#ifdef ENABLE_HLSL
bool isReadingHLSL() const { return (messages & EShMsgReadHlsl) == EShMsgReadHlsl; }
bool hlslEnable16BitTypes() const { return (messages & EShMsgHlslEnable16BitTypes) != 0; }
bool hlslDX9Compatible() const { return (messages & EShMsgHlslDX9Compatible) != 0; }
#else
bool isReadingHLSL() const { return false; }
#endif
TInfoSink& infoSink;
// compilation mode
int version; // version, updated by #version in the shader
EProfile profile; // the declared profile in the shader (core by default)
EShLanguage language; // really the stage
SpvVersion spvVersion;
bool forwardCompatible; // true if errors are to be given for use of deprecated features
TIntermediate& intermediate; // helper for making and hooking up pieces of the parse tree
protected:

View file

@ -147,6 +147,10 @@ int TPpContext::CPPdefine(TPpToken* ppToken)
}
token = scanToken(ppToken);
} else if (token != '\n' && token != EndOfInput && !ppToken->space) {
parseContext.ppWarn(ppToken->loc, "missing space after macro name", "#define", "");
return token;
}
// record the definition of the macro
@ -162,29 +166,43 @@ int TPpContext::CPPdefine(TPpToken* ppToken)
if (existing != nullptr) {
if (! existing->undef) {
// Already defined -- need to make sure they are identical:
// "Two replacement lists are identical if and only if the preprocessing tokens in both have the same number,
// ordering, spelling, and white-space separation, where all white-space separations are considered identical."
if (existing->functionLike != mac.functionLike)
parseContext.ppError(defineLoc, "Macro redefined; function-like versus object-like:", "#define", atomStrings.getString(defAtom));
else if (existing->args.size() != mac.args.size())
parseContext.ppError(defineLoc, "Macro redefined; different number of arguments:", "#define", atomStrings.getString(defAtom));
else {
if (existing->args != mac.args)
parseContext.ppError(defineLoc, "Macro redefined; different argument names:", "#define", atomStrings.getString(defAtom));
// "Two replacement lists are identical if and only if the
// preprocessing tokens in both have the same number,
// ordering, spelling, and white-space separation, where all
// white-space separations are considered identical."
if (existing->functionLike != mac.functionLike) {
parseContext.ppError(defineLoc, "Macro redefined; function-like versus object-like:", "#define",
atomStrings.getString(defAtom));
} else if (existing->args.size() != mac.args.size()) {
parseContext.ppError(defineLoc, "Macro redefined; different number of arguments:", "#define",
atomStrings.getString(defAtom));
} else {
if (existing->args != mac.args) {
parseContext.ppError(defineLoc, "Macro redefined; different argument names:", "#define",
atomStrings.getString(defAtom));
}
// set up to compare the two
existing->body.reset();
mac.body.reset();
int newToken;
bool firstToken = true;
do {
int oldToken;
TPpToken oldPpToken;
TPpToken newPpToken;
oldToken = existing->body.getToken(parseContext, &oldPpToken);
newToken = mac.body.getToken(parseContext, &newPpToken);
// for the first token, preceding spaces don't matter
if (firstToken) {
newPpToken.space = oldPpToken.space;
firstToken = false;
}
if (oldToken != newToken || oldPpToken != newPpToken) {
parseContext.ppError(defineLoc, "Macro redefined; different substitutions:", "#define", atomStrings.getString(defAtom));
parseContext.ppError(defineLoc, "Macro redefined; different substitutions:", "#define",
atomStrings.getString(defAtom));
break;
}
} while (newToken > 0);
} while (newToken != EndOfInput);
}
}
*existing = mac;
@ -527,7 +545,7 @@ int TPpContext::evalToToken(int token, bool shortCircuit, int& res, bool& err, T
case MacroExpandStarted:
break;
case MacroExpandUndef:
if (! shortCircuit && parseContext.profile == EEsProfile) {
if (! shortCircuit && parseContext.isEsProfile()) {
const char* message = "undefined macro in expression not allowed in es profile";
if (parseContext.relaxedErrors())
parseContext.ppWarn(ppToken->loc, message, "preprocessor evaluation", ppToken->name);
@ -704,6 +722,7 @@ int TPpContext::CPPline(TPpToken* ppToken)
parseContext.setCurrentLine(lineRes);
if (token != '\n') {
#ifndef GLSLANG_WEB
if (token == PpAtomConstString) {
parseContext.ppRequireExtensions(directiveLoc, 1, &E_GL_GOOGLE_cpp_style_line_directive, "filename-based #line");
// We need to save a copy of the string instead of pointing
@ -713,7 +732,9 @@ int TPpContext::CPPline(TPpToken* ppToken)
parseContext.setCurrentSourceName(sourceName);
hasFile = true;
token = scanToken(ppToken);
} else {
} else
#endif
{
token = eval(token, MIN_PRECEDENCE, false, fileRes, fileErr, ppToken);
if (! fileErr) {
parseContext.setCurrentString(fileRes);
@ -774,10 +795,8 @@ int TPpContext::CPPpragma(TPpToken* ppToken)
case PpAtomConstUint:
case PpAtomConstInt64:
case PpAtomConstUint64:
#ifdef AMD_EXTENSIONS
case PpAtomConstInt16:
case PpAtomConstUint16:
#endif
case PpAtomConstFloat:
case PpAtomConstDouble:
case PpAtomConstFloat16:
@ -862,8 +881,7 @@ int TPpContext::CPPextension(TPpToken* ppToken)
if (token != PpAtomIdentifier)
parseContext.ppError(ppToken->loc, "extension name expected", "#extension", "");
assert(strlen(ppToken->name) <= MaxTokenLength);
strcpy(extensionName, ppToken->name);
snprintf(extensionName, sizeof(extensionName), "%s", ppToken->name);
token = scanToken(ppToken);
if (token != ':') {
@ -937,18 +955,20 @@ int TPpContext::readCPPline(TPpToken* ppToken)
case PpAtomIfndef:
token = CPPifdef(0, ppToken);
break;
case PpAtomLine:
token = CPPline(ppToken);
break;
#ifndef GLSLANG_WEB
case PpAtomInclude:
if(!parseContext.isReadingHLSL()) {
parseContext.ppRequireExtensions(ppToken->loc, 1, &E_GL_GOOGLE_include_directive, "#include");
}
token = CPPinclude(ppToken);
break;
case PpAtomLine:
token = CPPline(ppToken);
break;
case PpAtomPragma:
token = CPPpragma(ppToken);
break;
#endif
case PpAtomUndef:
token = CPPundef(ppToken);
break;
@ -1023,7 +1043,9 @@ TPpContext::TokenStream* TPpContext::PrescanMacroArg(TokenStream& arg, TPpToken*
case MacroExpandNotStarted:
break;
case MacroExpandError:
token = EndOfInput;
// toss the rest of the pushed-input argument by scanning until tMarkerInput
while ((token = scanToken(ppToken)) != tMarkerInput::marker && token != EndOfInput)
;
break;
case MacroExpandStarted:
case MacroExpandUndef:
@ -1035,13 +1057,10 @@ TPpContext::TokenStream* TPpContext::PrescanMacroArg(TokenStream& arg, TPpToken*
expandedArg->putToken(token, ppToken);
}
if (token == EndOfInput) {
if (token != tMarkerInput::marker) {
// Error, or MacroExpand ate the marker, so had bad input, recover
delete expandedArg;
expandedArg = nullptr;
} else {
// remove the marker
popInput();
}
return expandedArg;
@ -1258,7 +1277,7 @@ MacroExpandResult TPpContext::MacroExpand(TPpToken* ppToken, bool expandUndef, b
if (token == ')') {
// closing paren of call
if (in->mac->args.size() == 1 && tokenRecorded == 0)
if (in->mac->args.size() == 1 && !tokenRecorded)
break;
arg++;
break;

View file

@ -84,6 +84,7 @@ NVIDIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <sstream>
#include "../ParseHelper.h"
#include "PpTokens.h"
/* windows only pragma */
#ifdef _MSC_VER
@ -212,7 +213,8 @@ public:
virtual int scan(TPpToken*) = 0;
virtual int getch() = 0;
virtual void ungetch() = 0;
virtual bool peekPasting() { return false; } // true when about to see ##
virtual bool peekPasting() { return false; } // true when about to see ##
virtual bool peekContinuedPasting(int) { return false; } // true when non-spaced tokens can paste
virtual bool endOfReplacementList() { return false; } // true when at the end of a macro replacement list (RHS of #define)
virtual bool isMacroInput() { return false; }
@ -243,24 +245,79 @@ public:
// From PpTokens.cpp
//
// Capture the needed parts of a token stream for macro recording/playback.
class TokenStream {
public:
TokenStream() : current(0) { }
// Manage a stream of these 'Token', which capture the relevant parts
// of a TPpToken, plus its atom.
class Token {
public:
Token(int atom, const TPpToken& ppToken) :
atom(atom),
space(ppToken.space),
i64val(ppToken.i64val),
name(ppToken.name) { }
int get(TPpToken& ppToken)
{
ppToken.clear();
ppToken.space = space;
ppToken.i64val = i64val;
snprintf(ppToken.name, sizeof(ppToken.name), "%s", name.c_str());
return atom;
}
bool isAtom(int a) const { return atom == a; }
int getAtom() const { return atom; }
bool nonSpaced() const { return !space; }
protected:
Token() {}
int atom;
bool space; // did a space precede the token?
long long i64val;
TString name;
};
TokenStream() : currentPos(0) { }
void putToken(int token, TPpToken* ppToken);
bool peekToken(int atom) { return !atEnd() && stream[currentPos].isAtom(atom); }
bool peekContinuedPasting(int atom)
{
// This is basically necessary because, for example, the PP
// tokenizer only accepts valid numeric-literals plus suffixes, so
// separates numeric-literals plus bad suffix into two tokens, which
// should get both pasted together as one token when token pasting.
//
// The following code is a bit more generalized than the above example.
if (!atEnd() && atom == PpAtomIdentifier && stream[currentPos].nonSpaced()) {
switch(stream[currentPos].getAtom()) {
case PpAtomConstInt:
case PpAtomConstUint:
case PpAtomConstInt64:
case PpAtomConstUint64:
case PpAtomConstInt16:
case PpAtomConstUint16:
case PpAtomConstFloat:
case PpAtomConstDouble:
case PpAtomConstFloat16:
case PpAtomConstString:
case PpAtomIdentifier:
return true;
default:
break;
}
}
return false;
}
int getToken(TParseContextBase&, TPpToken*);
bool atEnd() { return current >= data.size(); }
bool atEnd() { return currentPos >= stream.size(); }
bool peekTokenizedPasting(bool lastTokenPastes);
bool peekUntokenizedPasting();
void reset() { current = 0; }
void reset() { currentPos = 0; }
protected:
void putSubtoken(char);
int getSubtoken();
void ungetSubtoken();
TVector<unsigned char> data;
size_t current;
TVector<Token> stream;
size_t currentPos;
};
//
@ -320,6 +377,10 @@ protected:
int getChar() { return inputStack.back()->getch(); }
void ungetChar() { inputStack.back()->ungetch(); }
bool peekPasting() { return !inputStack.empty() && inputStack.back()->peekPasting(); }
bool peekContinuedPasting(int a)
{
return !inputStack.empty() && inputStack.back()->peekContinuedPasting(a);
}
bool endOfReplacementList() { return inputStack.empty() || inputStack.back()->endOfReplacementList(); }
bool isMacroInput() { return inputStack.size() > 0 && inputStack.back()->isMacroInput(); }
@ -344,6 +405,7 @@ protected:
virtual int getch() override { assert(0); return EndOfInput; }
virtual void ungetch() override { assert(0); }
bool peekPasting() override { return prepaste; }
bool peekContinuedPasting(int a) override { return mac->body.peekContinuedPasting(a); }
bool endOfReplacementList() override { return mac->body.atEnd(); }
bool isMacroInput() override { return true; }
@ -418,14 +480,18 @@ protected:
class tTokenInput : public tInput {
public:
tTokenInput(TPpContext* pp, TokenStream* t, bool prepasting) : tInput(pp), tokens(t), lastTokenPastes(prepasting) { }
tTokenInput(TPpContext* pp, TokenStream* t, bool prepasting) :
tInput(pp),
tokens(t),
lastTokenPastes(prepasting) { }
virtual int scan(TPpToken *ppToken) override { return tokens->getToken(pp->parseContext, ppToken); }
virtual int getch() override { assert(0); return EndOfInput; }
virtual void ungetch() override { assert(0); }
virtual bool peekPasting() override { return tokens->peekTokenizedPasting(lastTokenPastes); }
bool peekContinuedPasting(int a) override { return tokens->peekContinuedPasting(a); }
protected:
TokenStream* tokens;
bool lastTokenPastes; // true if the last token in the input is to be pasted, rather than consumed as a token
bool lastTokenPastes; // true if the last token in the input is to be pasted, rather than consumed as a token
};
class tUngotTokenInput : public tInput {

View file

@ -96,12 +96,19 @@ namespace glslang {
/////////////////////////////////// Floating point constants: /////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////
/*
* lFloatConst() - Scan a single- or double-precision floating point constant. Assumes that the scanner
* has seen at least one digit, followed by either a decimal '.' or the
* letter 'e', or a precision ending (e.g., F or LF).
*/
//
// Scan a single- or double-precision floating point constant.
// Assumes that the scanner has seen at least one digit,
// followed by either a decimal '.' or the letter 'e', or a
// precision ending (e.g., F or LF).
//
// This is technically not correct, as the preprocessor should just
// accept the numeric literal along with whatever suffix it has, but
// currently, it stops on seeing a bad suffix, treating that as the
// next token. This effects things like token pasting, where it is
// relevant how many tokens something was broken into.
//
// See peekContinuedPasting().
int TPpContext::lFloatConst(int len, int ch, TPpToken* ppToken)
{
const auto saveName = [&](int ch) {
@ -135,6 +142,7 @@ int TPpContext::lFloatConst(int len, int ch, TPpToken* ppToken)
ch = getChar();
int firstDecimal = len;
#ifdef ENABLE_HLSL
// 1.#INF or -1.#INF
if (ch == '#' && (ifdepth > 0 || parseContext.intermediate.getSource() == EShSourceHlsl)) {
if ((len < 2) ||
@ -162,6 +170,7 @@ int TPpContext::lFloatConst(int len, int ch, TPpToken* ppToken)
}
}
}
#endif
// Consume leading-zero digits after the decimal point
while (ch == '0') {
@ -250,6 +259,7 @@ int TPpContext::lFloatConst(int len, int ch, TPpToken* ppToken)
// Suffix:
bool isDouble = false;
bool isFloat16 = false;
#ifndef GLSLANG_WEB
if (ch == 'l' || ch == 'L') {
if (ifdepth == 0 && parseContext.intermediate.getSource() == EShSourceGlsl)
parseContext.doubleCheck(ppToken->loc, "double floating-point suffix");
@ -288,11 +298,15 @@ int TPpContext::lFloatConst(int len, int ch, TPpToken* ppToken)
saveName(ch);
isFloat16 = true;
}
} else if (ch == 'f' || ch == 'F') {
} else
#endif
if (ch == 'f' || ch == 'F') {
#ifndef GLSLANG_WEB
if (ifdepth == 0)
parseContext.profileRequires(ppToken->loc, EEsProfile, 300, nullptr, "floating-point suffix");
if (ifdepth == 0 && !parseContext.relaxedErrors())
parseContext.profileRequires(ppToken->loc, ~EEsProfile, 120, nullptr, "floating-point suffix");
#endif
if (ifdepth == 0 && !hasDecimalOrExponent)
parseContext.ppError(ppToken->loc, "float literal needs a decimal point or exponent", "", "");
saveName(ch);
@ -435,6 +449,14 @@ int TPpContext::characterLiteral(TPpToken* ppToken)
//
// Scanner used to tokenize source stream.
//
// N.B. Invalid numeric suffixes are not consumed.//
// This is technically not correct, as the preprocessor should just
// accept the numeric literal along with whatever suffix it has, but
// currently, it stops on seeing a bad suffix, treating that as the
// next token. This effects things like token pasting, where it is
// relevant how many tokens something was broken into.
// See peekContinuedPasting().
//
int TPpContext::tStringInput::scan(TPpToken* ppToken)
{
int AlreadyComplained = 0;
@ -453,9 +475,7 @@ int TPpContext::tStringInput::scan(TPpToken* ppToken)
static const int Num_Int64_Extensions = sizeof(Int64_Extensions) / sizeof(Int64_Extensions[0]);
static const char* const Int16_Extensions[] = {
#ifdef AMD_EXTENSIONS
E_GL_AMD_gpu_shader_int16,
#endif
E_GL_EXT_shader_explicit_arithmetic_types,
E_GL_EXT_shader_explicit_arithmetic_types_int16 };
static const int Num_Int16_Extensions = sizeof(Int16_Extensions) / sizeof(Int16_Extensions[0]);
@ -564,6 +584,7 @@ int TPpContext::tStringInput::scan(TPpToken* ppToken)
ppToken->name[len++] = (char)ch;
isUnsigned = true;
#ifndef GLSLANG_WEB
int nextCh = getch();
if (nextCh == 'l' || nextCh == 'L') {
if (len < MaxTokenLength)
@ -572,7 +593,6 @@ int TPpContext::tStringInput::scan(TPpToken* ppToken)
} else
ungetch();
#ifdef AMD_EXTENSIONS
nextCh = getch();
if ((nextCh == 's' || nextCh == 'S') &&
pp->parseContext.intermediate.getSource() == EShSourceGlsl) {
@ -581,12 +601,10 @@ int TPpContext::tStringInput::scan(TPpToken* ppToken)
isInt16 = true;
} else
ungetch();
#endif
} else if (ch == 'l' || ch == 'L') {
if (len < MaxTokenLength)
ppToken->name[len++] = (char)ch;
isInt64 = true;
#ifdef AMD_EXTENSIONS
} else if ((ch == 's' || ch == 'S') &&
pp->parseContext.intermediate.getSource() == EShSourceGlsl) {
if (len < MaxTokenLength)
@ -674,6 +692,7 @@ int TPpContext::tStringInput::scan(TPpToken* ppToken)
ppToken->name[len++] = (char)ch;
isUnsigned = true;
#ifndef GLSLANG_WEB
int nextCh = getch();
if (nextCh == 'l' || nextCh == 'L') {
if (len < MaxTokenLength)
@ -682,7 +701,6 @@ int TPpContext::tStringInput::scan(TPpToken* ppToken)
} else
ungetch();
#ifdef AMD_EXTENSIONS
nextCh = getch();
if ((nextCh == 's' || nextCh == 'S') &&
pp->parseContext.intermediate.getSource() == EShSourceGlsl) {
@ -691,12 +709,10 @@ int TPpContext::tStringInput::scan(TPpToken* ppToken)
isInt16 = true;
} else
ungetch();
#endif
} else if (ch == 'l' || ch == 'L') {
if (len < MaxTokenLength)
ppToken->name[len++] = (char)ch;
isInt64 = true;
#ifdef AMD_EXTENSIONS
} else if ((ch == 's' || ch == 'S') &&
pp->parseContext.intermediate.getSource() == EShSourceGlsl) {
if (len < MaxTokenLength)
@ -765,6 +781,7 @@ int TPpContext::tStringInput::scan(TPpToken* ppToken)
ppToken->name[len++] = (char)ch;
isUnsigned = true;
#ifndef GLSLANG_WEB
int nextCh = getch();
if (nextCh == 'l' || nextCh == 'L') {
if (len < MaxTokenLength)
@ -773,7 +790,6 @@ int TPpContext::tStringInput::scan(TPpToken* ppToken)
} else
ungetch();
#ifdef AMD_EXTENSIONS
nextCh = getch();
if ((nextCh == 's' || nextCh == 'S') &&
pp->parseContext.intermediate.getSource() == EShSourceGlsl) {
@ -782,12 +798,10 @@ int TPpContext::tStringInput::scan(TPpToken* ppToken)
isInt16 = true;
} else
ungetch();
#endif
} else if (ch == 'l' || ch == 'L') {
if (len < MaxTokenLength)
ppToken->name[len++] = (char)ch;
isInt64 = true;
#ifdef AMD_EXTENSIONS
} else if ((ch == 's' || ch == 'S') &&
pp->parseContext.intermediate.getSource() == EShSourceGlsl) {
if (len < MaxTokenLength)
@ -1116,7 +1130,7 @@ int TPpContext::tokenize(TPpToken& ppToken)
parseContext.ppError(ppToken.loc, "character literals not supported", "\'", "");
continue;
default:
strcpy(ppToken.name, atomStrings.getString(token));
snprintf(ppToken.name, sizeof(ppToken.name), "%s", atomStrings.getString(token));
break;
}
@ -1153,61 +1167,69 @@ int TPpContext::tokenPaste(int token, TPpToken& ppToken)
break;
}
// get the token after the ##
token = scanToken(&pastedPpToken);
// Get the token(s) after the ##.
// Because of "space" semantics, and prior tokenization, what
// appeared a single token, e.g. "3A", might have been tokenized
// into two tokens "3" and "A", but the "A" will have 'space' set to
// false. Accumulate all of these to recreate the original lexical
// appearing token.
do {
token = scanToken(&pastedPpToken);
// This covers end of argument expansion
if (token == tMarkerInput::marker) {
parseContext.ppError(ppToken.loc, "unexpected location; end of argument", "##", "");
break;
}
// This covers end of argument expansion
if (token == tMarkerInput::marker) {
parseContext.ppError(ppToken.loc, "unexpected location; end of argument", "##", "");
return resultToken;
}
// get the token text
switch (resultToken) {
case PpAtomIdentifier:
// already have the correct text in token.names
break;
case '=':
case '!':
case '-':
case '~':
case '+':
case '*':
case '/':
case '%':
case '<':
case '>':
case '|':
case '^':
case '&':
case PpAtomRight:
case PpAtomLeft:
case PpAtomAnd:
case PpAtomOr:
case PpAtomXor:
strcpy(ppToken.name, atomStrings.getString(resultToken));
strcpy(pastedPpToken.name, atomStrings.getString(token));
break;
default:
parseContext.ppError(ppToken.loc, "not supported for these tokens", "##", "");
return resultToken;
}
// get the token text
switch (resultToken) {
case PpAtomIdentifier:
// already have the correct text in token.names
break;
case '=':
case '!':
case '-':
case '~':
case '+':
case '*':
case '/':
case '%':
case '<':
case '>':
case '|':
case '^':
case '&':
case PpAtomRight:
case PpAtomLeft:
case PpAtomAnd:
case PpAtomOr:
case PpAtomXor:
snprintf(ppToken.name, sizeof(ppToken.name), "%s", atomStrings.getString(resultToken));
snprintf(pastedPpToken.name, sizeof(pastedPpToken.name), "%s", atomStrings.getString(token));
break;
default:
parseContext.ppError(ppToken.loc, "not supported for these tokens", "##", "");
return resultToken;
}
// combine the tokens
if (strlen(ppToken.name) + strlen(pastedPpToken.name) > MaxTokenLength) {
parseContext.ppError(ppToken.loc, "combined tokens are too long", "##", "");
return resultToken;
}
strncat(ppToken.name, pastedPpToken.name, MaxTokenLength - strlen(ppToken.name));
// combine the tokens
if (strlen(ppToken.name) + strlen(pastedPpToken.name) > MaxTokenLength) {
parseContext.ppError(ppToken.loc, "combined tokens are too long", "##", "");
return resultToken;
}
snprintf(&ppToken.name[0] + strlen(ppToken.name), sizeof(ppToken.name) - strlen(ppToken.name),
"%s", pastedPpToken.name);
// correct the kind of token we are making, if needed (identifiers stay identifiers)
if (resultToken != PpAtomIdentifier) {
int newToken = atomStrings.getAtom(ppToken.name);
if (newToken > 0)
resultToken = newToken;
else
parseContext.ppError(ppToken.loc, "combined token is invalid", "##", "");
}
// correct the kind of token we are making, if needed (identifiers stay identifiers)
if (resultToken != PpAtomIdentifier) {
int newToken = atomStrings.getAtom(ppToken.name);
if (newToken > 0)
resultToken = newToken;
else
parseContext.ppError(ppToken.loc, "combined token is invalid", "##", "");
}
} while (peekContinuedPasting(resultToken));
}
return resultToken;

View file

@ -99,150 +99,34 @@ NVIDIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
namespace glslang {
namespace {
// When recording (and playing back) should the backing name string
// be saved (restored)?
bool SaveName(int atom)
{
switch (atom) {
case PpAtomIdentifier:
case PpAtomConstString:
case PpAtomConstInt:
case PpAtomConstUint:
case PpAtomConstInt64:
case PpAtomConstUint64:
#ifdef AMD_EXTENSIONS
case PpAtomConstInt16:
case PpAtomConstUint16:
#endif
case PpAtomConstFloat:
case PpAtomConstDouble:
case PpAtomConstFloat16:
return true;
default:
return false;
}
}
// When recording (and playing back) should the numeric value
// be saved (restored)?
bool SaveValue(int atom)
{
switch (atom) {
case PpAtomConstInt:
case PpAtomConstUint:
case PpAtomConstInt64:
case PpAtomConstUint64:
#ifdef AMD_EXTENSIONS
case PpAtomConstInt16:
case PpAtomConstUint16:
#endif
case PpAtomConstFloat:
case PpAtomConstDouble:
case PpAtomConstFloat16:
return true;
default:
return false;
}
}
}
// push onto back of stream
void TPpContext::TokenStream::putSubtoken(char subtoken)
{
data.push_back(static_cast<unsigned char>(subtoken));
}
// get the next token in stream
int TPpContext::TokenStream::getSubtoken()
{
if (current < data.size())
return data[current++];
else
return EndOfInput;
}
// back up one position in the stream
void TPpContext::TokenStream::ungetSubtoken()
{
if (current > 0)
--current;
}
// Add a complete token (including backing string) to the end of a list
// for later playback.
// Add a token (including backing string) to the end of a macro
// token stream, for later playback.
void TPpContext::TokenStream::putToken(int atom, TPpToken* ppToken)
{
// save the atom
assert((atom & ~0xff) == 0);
putSubtoken(static_cast<char>(atom));
// save the backing name string
if (SaveName(atom)) {
const char* s = ppToken->name;
while (*s)
putSubtoken(*s++);
putSubtoken(0);
}
// save the numeric value
if (SaveValue(atom)) {
const char* n = reinterpret_cast<const char*>(&ppToken->i64val);
for (size_t i = 0; i < sizeof(ppToken->i64val); ++i)
putSubtoken(*n++);
}
TokenStream::Token streamToken(atom, *ppToken);
stream.push_back(streamToken);
}
// Read the next token from a token stream.
// (Not the source stream, but a stream used to hold a tokenized macro).
// Read the next token from a macro token stream.
int TPpContext::TokenStream::getToken(TParseContextBase& parseContext, TPpToken *ppToken)
{
// get the atom
int atom = getSubtoken();
if (atom == EndOfInput)
return atom;
if (atEnd())
return EndOfInput;
// init the token
ppToken->clear();
int atom = stream[currentPos++].get(*ppToken);
ppToken->loc = parseContext.getCurrentLoc();
// get the backing name string
if (SaveName(atom)) {
int ch = getSubtoken();
int len = 0;
while (ch != 0 && ch != EndOfInput) {
if (len < MaxTokenLength) {
ppToken->name[len] = (char)ch;
len++;
ch = getSubtoken();
} else {
parseContext.error(ppToken->loc, "token too long", "", "");
break;
}
}
ppToken->name[len] = 0;
}
#ifndef GLSLANG_WEB
// Check for ##, unless the current # is the last character
if (atom == '#') {
if (current < data.size()) {
if (getSubtoken() == '#') {
parseContext.requireProfile(ppToken->loc, ~EEsProfile, "token pasting (##)");
parseContext.profileRequires(ppToken->loc, ~EEsProfile, 130, 0, "token pasting (##)");
atom = PpAtomPaste;
} else
ungetSubtoken();
if (peekToken('#')) {
parseContext.requireProfile(ppToken->loc, ~EEsProfile, "token pasting (##)");
parseContext.profileRequires(ppToken->loc, ~EEsProfile, 130, 0, "token pasting (##)");
currentPos++;
atom = PpAtomPaste;
}
}
// get the numeric value
if (SaveValue(atom)) {
char* n = reinterpret_cast<char*>(&ppToken->i64val);
for (size_t i = 0; i < sizeof(ppToken->i64val); ++i)
*n++ = (char)getSubtoken();
}
#endif
return atom;
}
@ -256,15 +140,14 @@ bool TPpContext::TokenStream::peekTokenizedPasting(bool lastTokenPastes)
{
// 1. preceding ##?
size_t savePos = current;
int subtoken;
size_t savePos = currentPos;
// skip white space
do {
subtoken = getSubtoken();
} while (subtoken == ' ');
current = savePos;
if (subtoken == PpAtomPaste)
while (peekToken(' '))
++currentPos;
if (peekToken(PpAtomPaste)) {
currentPos = savePos;
return true;
}
// 2. last token and we've been told after this there will be a ##
@ -273,18 +156,18 @@ bool TPpContext::TokenStream::peekTokenizedPasting(bool lastTokenPastes)
// Getting here means the last token will be pasted, after this
// Are we at the last non-whitespace token?
savePos = current;
savePos = currentPos;
bool moreTokens = false;
do {
subtoken = getSubtoken();
if (subtoken == EndOfInput)
if (atEnd())
break;
if (subtoken != ' ') {
if (!peekToken(' ')) {
moreTokens = true;
break;
}
++currentPos;
} while (true);
current = savePos;
currentPos = savePos;
return !moreTokens;
}
@ -293,23 +176,21 @@ bool TPpContext::TokenStream::peekTokenizedPasting(bool lastTokenPastes)
bool TPpContext::TokenStream::peekUntokenizedPasting()
{
// don't return early, have to restore this
size_t savePos = current;
size_t savePos = currentPos;
// skip white-space
int subtoken;
do {
subtoken = getSubtoken();
} while (subtoken == ' ');
while (peekToken(' '))
++currentPos;
// check for ##
bool pasting = false;
if (subtoken == '#') {
subtoken = getSubtoken();
if (subtoken == '#')
if (peekToken('#')) {
++currentPos;
if (peekToken('#'))
pasting = true;
}
current = savePos;
currentPos = savePos;
return pasting;
}

View file

@ -37,6 +37,8 @@
// propagate the 'noContraction' qualifier.
//
#ifndef GLSLANG_WEB
#include "propagateNoContraction.h"
#include <cstdlib>
@ -79,7 +81,7 @@ typedef std::unordered_set<glslang::TIntermBranch*> ReturnBranchNodeSet;
// the node has 'noContraction' qualifier, otherwise false.
bool isPreciseObjectNode(glslang::TIntermTyped* node)
{
return node->getType().getQualifier().noContraction;
return node->getType().getQualifier().isNoContraction();
}
// Returns true if the opcode is a dereferencing one.
@ -864,3 +866,5 @@ void PropagateNoContraction(const glslang::TIntermediate& intermediate)
}
}
};
#endif // GLSLANG_WEB

View file

@ -33,6 +33,8 @@
// POSSIBILITY OF SUCH DAMAGE.
//
#ifndef GLSLANG_WEB
#include "../Include/Common.h"
#include "reflection.h"
#include "LiveTraverser.h"
@ -93,65 +95,61 @@ public:
// Use a degenerate (empty) set of dereferences to immediately put as at the end of
// the dereference change expected by blowUpActiveAggregate.
TList<TIntermBinary*> derefs;
blowUpActiveAggregate(base.getType(), base.getName(), derefs, derefs.end(), -1, -1, 0);
blowUpActiveAggregate(base.getType(), base.getName(), derefs, derefs.end(), -1, -1, 0, 0,
base.getQualifier().storage, true);
}
}
void addAttribute(const TIntermSymbol& base)
void addPipeIOVariable(const TIntermSymbol& base)
{
if (processedDerefs.find(&base) == processedDerefs.end()) {
processedDerefs.insert(&base);
const TString &name = base.getName();
const TType &type = base.getType();
const bool input = base.getQualifier().isPipeInput();
TReflection::TNameToIndex::const_iterator it = reflection.nameToIndex.find(name.c_str());
if (it == reflection.nameToIndex.end()) {
reflection.nameToIndex[name.c_str()] = (int)reflection.indexToAttribute.size();
reflection.indexToAttribute.push_back(TObjectReflection(name.c_str(), type, 0, mapToGlType(type), 0, 0));
TReflection::TMapIndexToReflection &ioItems =
input ? reflection.indexToPipeInput : reflection.indexToPipeOutput;
TReflection::TNameToIndex &ioMapper =
input ? reflection.pipeInNameToIndex : reflection.pipeOutNameToIndex;
if (reflection.options & EShReflectionUnwrapIOBlocks) {
bool anonymous = IsAnonymous(name);
TString baseName;
if (type.getBasicType() == EbtBlock) {
baseName = anonymous ? TString() : type.getTypeName();
} else {
baseName = anonymous ? TString() : name;
}
// by convention if this is an arrayed block we ignore the array in the reflection
if (type.isArray() && type.getBasicType() == EbtBlock) {
blowUpIOAggregate(input, baseName, TType(type, 0));
} else {
blowUpIOAggregate(input, baseName, type);
}
} else {
TReflection::TNameToIndex::const_iterator it = ioMapper.find(name.c_str());
if (it == ioMapper.end()) {
// seperate pipe i/o params from uniforms and blocks
// in is only for input in first stage as out is only for last stage. check traverse in call stack.
ioMapper[name.c_str()] = static_cast<int>(ioItems.size());
ioItems.push_back(
TObjectReflection(name.c_str(), type, 0, mapToGlType(type), mapToGlArraySize(type), 0));
EShLanguageMask& stages = ioItems.back().stages;
stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage());
} else {
EShLanguageMask& stages = ioItems[it->second].stages;
stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage());
}
}
}
}
// shared calculation by getOffset and getOffsets
void updateOffset(const TType& parentType, const TType& memberType, int& offset, int& memberSize)
{
int dummyStride;
// modify just the children's view of matrix layout, if there is one for this member
TLayoutMatrix subMatrixLayout = memberType.getQualifier().layoutMatrix;
int memberAlignment = intermediate.getMemberAlignment(memberType, memberSize, dummyStride,
parentType.getQualifier().layoutPacking,
subMatrixLayout != ElmNone
? subMatrixLayout == ElmRowMajor
: parentType.getQualifier().layoutMatrix == ElmRowMajor);
RoundToPow2(offset, memberAlignment);
}
// Lookup or calculate the offset of a block member, using the recursively
// defined block offset rules.
int getOffset(const TType& type, int index)
{
const TTypeList& memberList = *type.getStruct();
// Don't calculate offset if one is present, it could be user supplied
// and different than what would be calculated. That is, this is faster,
// but not just an optimization.
if (memberList[index].type->getQualifier().hasOffset())
return memberList[index].type->getQualifier().layoutOffset;
int memberSize = 0;
int offset = 0;
for (int m = 0; m <= index; ++m) {
updateOffset(type, *memberList[m].type, offset, memberSize);
if (m < index)
offset += memberSize;
}
return offset;
}
// Lookup or calculate the offset of all block members at once, using the recursively
// defined block offset rules.
void getOffsets(const TType& type, TVector<int>& offsets)
@ -166,7 +164,7 @@ public:
offset = memberList[m].type->getQualifier().layoutOffset;
// calculate the offset of the next member and align the current offset to this member
updateOffset(type, *memberList[m].type, offset, memberSize);
intermediate.updateOffset(type, *memberList[m].type, offset, memberSize);
// save the offset of this member
offsets[m] = offset;
@ -196,21 +194,34 @@ public:
return stride;
}
// Calculate the block data size.
// Block arrayness is not taken into account, each element is backed by a separate buffer.
int getBlockSize(const TType& blockType)
// count the total number of leaf members from iterating out of a block type
int countAggregateMembers(const TType& parentType)
{
const TTypeList& memberList = *blockType.getStruct();
int lastIndex = (int)memberList.size() - 1;
int lastOffset = getOffset(blockType, lastIndex);
if (! parentType.isStruct())
return 1;
int lastMemberSize;
int dummyStride;
intermediate.getMemberAlignment(*memberList[lastIndex].type, lastMemberSize, dummyStride,
blockType.getQualifier().layoutPacking,
blockType.getQualifier().layoutMatrix == ElmRowMajor);
const bool strictArraySuffix = (reflection.options & EShReflectionStrictArraySuffix);
return lastOffset + lastMemberSize;
bool blockParent = (parentType.getBasicType() == EbtBlock && parentType.getQualifier().storage == EvqBuffer);
const TTypeList &memberList = *parentType.getStruct();
int ret = 0;
for (size_t i = 0; i < memberList.size(); i++)
{
const TType &memberType = *memberList[i].type;
int numMembers = countAggregateMembers(memberType);
// for sized arrays of structs, apply logic to expand out the same as we would below in
// blowUpActiveAggregate
if (memberType.isArray() && ! memberType.getArraySizes()->hasUnsized() && memberType.isStruct()) {
if (! strictArraySuffix || ! blockParent)
numMembers *= memberType.getArraySizes()->getCumulativeSize();
}
ret += numMembers;
}
return ret;
}
// Traverse the provided deref chain, including the base, and
@ -221,8 +232,19 @@ public:
// arraySize tracks, just for the final dereference in the chain, if there was a specific known size.
// A value of 0 for arraySize will mean to use the full array's size.
void blowUpActiveAggregate(const TType& baseType, const TString& baseName, const TList<TIntermBinary*>& derefs,
TList<TIntermBinary*>::const_iterator deref, int offset, int blockIndex, int arraySize)
TList<TIntermBinary*>::const_iterator deref, int offset, int blockIndex, int arraySize,
int topLevelArrayStride, TStorageQualifier baseStorage, bool active)
{
// when strictArraySuffix is enabled, we closely follow the rules from ARB_program_interface_query.
// Broadly:
// * arrays-of-structs always have a [x] suffix.
// * with array-of-struct variables in the root of a buffer block, only ever return [0].
// * otherwise, array suffixes are added whenever we iterate, even if that means expanding out an array.
const bool strictArraySuffix = (reflection.options & EShReflectionStrictArraySuffix);
// is this variable inside a buffer block. This flag is set back to false after we iterate inside the first array element.
bool blockParent = (baseType.getBasicType() == EbtBlock && baseType.getQualifier().storage == EvqBuffer);
// process the part of the dereference chain that was explicit in the shader
TString name = baseName;
const TType* terminalType = &baseType;
@ -234,36 +256,51 @@ public:
case EOpIndexIndirect: {
int stride = getArrayStride(baseType, visitNode->getLeft()->getType());
if (topLevelArrayStride == 0)
topLevelArrayStride = stride;
// Visit all the indices of this array, and for each one add on the remaining dereferencing
for (int i = 0; i < std::max(visitNode->getLeft()->getType().getOuterArraySize(), 1); ++i) {
TString newBaseName = name;
if (baseType.getBasicType() != EbtBlock)
if (strictArraySuffix && blockParent)
newBaseName.append(TString("[0]"));
else if (strictArraySuffix || baseType.getBasicType() != EbtBlock)
newBaseName.append(TString("[") + String(i) + "]");
TList<TIntermBinary*>::const_iterator nextDeref = deref;
++nextDeref;
TType derefType(*terminalType, 0);
blowUpActiveAggregate(derefType, newBaseName, derefs, nextDeref, offset, blockIndex, arraySize);
blowUpActiveAggregate(*terminalType, newBaseName, derefs, nextDeref, offset, blockIndex, arraySize,
topLevelArrayStride, baseStorage, active);
if (offset >= 0)
offset += stride;
offset += stride;
}
// it was all completed in the recursive calls above
return;
}
case EOpIndexDirect:
case EOpIndexDirect: {
int stride = getArrayStride(baseType, visitNode->getLeft()->getType());
index = visitNode->getRight()->getAsConstantUnion()->getConstArray()[0].getIConst();
if (baseType.getBasicType() != EbtBlock) {
if (strictArraySuffix && blockParent) {
name.append(TString("[0]"));
} else if (strictArraySuffix || baseType.getBasicType() != EbtBlock) {
name.append(TString("[") + String(index) + "]");
if (offset >= 0)
offset += getArrayStride(baseType, visitNode->getLeft()->getType()) * index;
offset += stride * index;
}
if (topLevelArrayStride == 0)
topLevelArrayStride = stride;
blockParent = false;
break;
}
case EOpIndexDirectStruct:
index = visitNode->getRight()->getAsConstantUnion()->getConstArray()[0].getIConst();
if (offset >= 0)
offset += getOffset(visitNode->getLeft()->getType(), index);
offset += intermediate.getOffset(visitNode->getLeft()->getType(), index);
if (name.size() > 0)
name.append(".");
name.append((*visitNode->getLeft()->getType().getStruct())[index].type->getFieldName());
@ -286,13 +323,24 @@ public:
if (offset >= 0)
stride = getArrayStride(baseType, *terminalType);
for (int i = 0; i < std::max(terminalType->getOuterArraySize(), 1); ++i) {
if (topLevelArrayStride == 0)
topLevelArrayStride = stride;
int arrayIterateSize = std::max(terminalType->getOuterArraySize(), 1);
// for top-level arrays in blocks, only expand [0] to avoid explosion of items
if (strictArraySuffix && blockParent)
arrayIterateSize = 1;
for (int i = 0; i < arrayIterateSize; ++i) {
TString newBaseName = name;
newBaseName.append(TString("[") + String(i) + "]");
TType derefType(*terminalType, 0);
if (offset >= 0)
offset = baseOffset + stride * i;
blowUpActiveAggregate(derefType, newBaseName, derefs, derefs.end(), offset, blockIndex, 0);
blowUpActiveAggregate(derefType, newBaseName, derefs, derefs.end(), offset, blockIndex, 0,
topLevelArrayStride, baseStorage, active);
}
} else {
// Visit all members of this aggregate, and for each one,
@ -308,11 +356,21 @@ public:
for (int i = 0; i < (int)typeList.size(); ++i) {
TString newBaseName = name;
newBaseName.append(TString(".") + typeList[i].type->getFieldName());
if (newBaseName.size() > 0)
newBaseName.append(".");
newBaseName.append(typeList[i].type->getFieldName());
TType derefType(*terminalType, i);
if (offset >= 0)
offset = baseOffset + memberOffsets[i];
blowUpActiveAggregate(derefType, newBaseName, derefs, derefs.end(), offset, blockIndex, 0);
int arrayStride = topLevelArrayStride;
if (terminalType->getBasicType() == EbtBlock && terminalType->getQualifier().storage == EvqBuffer &&
derefType.isArray()) {
arrayStride = getArrayStride(baseType, derefType);
}
blowUpActiveAggregate(derefType, newBaseName, derefs, derefs.end(), offset, blockIndex, 0,
arrayStride, baseStorage, active);
}
}
@ -320,6 +378,10 @@ public:
return;
}
if ((reflection.options & EShReflectionBasicArraySuffix) && terminalType->isArray()) {
name.append(TString("[0]"));
}
// Finally, add a full string to the reflection database, and update the array size if necessary.
// If the dereferenced entity to record is an array, compute the size and update the maximum size.
@ -327,15 +389,100 @@ public:
if (arraySize == 0)
arraySize = mapToGlArraySize(*terminalType);
TReflection::TMapIndexToReflection& variables = reflection.GetVariableMapForStorage(baseStorage);
TReflection::TNameToIndex::const_iterator it = reflection.nameToIndex.find(name.c_str());
if (it == reflection.nameToIndex.end()) {
reflection.nameToIndex[name.c_str()] = (int)reflection.indexToUniform.size();
reflection.indexToUniform.push_back(TObjectReflection(name.c_str(), *terminalType, offset,
mapToGlType(*terminalType),
arraySize, blockIndex));
} else if (arraySize > 1) {
int& reflectedArraySize = reflection.indexToUniform[it->second].size;
reflectedArraySize = std::max(arraySize, reflectedArraySize);
int uniformIndex = (int)variables.size();
reflection.nameToIndex[name.c_str()] = uniformIndex;
variables.push_back(TObjectReflection(name.c_str(), *terminalType, offset, mapToGlType(*terminalType),
arraySize, blockIndex));
if (terminalType->isArray()) {
variables.back().arrayStride = getArrayStride(baseType, *terminalType);
if (topLevelArrayStride == 0)
topLevelArrayStride = variables.back().arrayStride;
}
if ((reflection.options & EShReflectionSeparateBuffers) && terminalType->isAtomic())
reflection.atomicCounterUniformIndices.push_back(uniformIndex);
variables.back().topLevelArrayStride = topLevelArrayStride;
if ((reflection.options & EShReflectionAllBlockVariables) && active) {
EShLanguageMask& stages = variables.back().stages;
stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage());
}
} else {
if (arraySize > 1) {
int& reflectedArraySize = variables[it->second].size;
reflectedArraySize = std::max(arraySize, reflectedArraySize);
}
if ((reflection.options & EShReflectionAllBlockVariables) && active) {
EShLanguageMask& stages = variables[it->second].stages;
stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage());
}
}
}
// similar to blowUpActiveAggregate, but with simpler rules and no dereferences to follow.
void blowUpIOAggregate(bool input, const TString &baseName, const TType &type)
{
TString name = baseName;
// if the type is still too coarse a granularity, this is still an aggregate to expand, expand it...
if (! isReflectionGranularity(type)) {
if (type.isArray()) {
// Visit all the indices of this array, and for each one,
// fully explode the remaining aggregate to dereference
for (int i = 0; i < std::max(type.getOuterArraySize(), 1); ++i) {
TString newBaseName = name;
newBaseName.append(TString("[") + String(i) + "]");
TType derefType(type, 0);
blowUpIOAggregate(input, newBaseName, derefType);
}
} else {
// Visit all members of this aggregate, and for each one,
// fully explode the remaining aggregate to dereference
const TTypeList& typeList = *type.getStruct();
for (int i = 0; i < (int)typeList.size(); ++i) {
TString newBaseName = name;
if (newBaseName.size() > 0)
newBaseName.append(".");
newBaseName.append(typeList[i].type->getFieldName());
TType derefType(type, i);
blowUpIOAggregate(input, newBaseName, derefType);
}
}
// it was all completed in the recursive calls above
return;
}
if ((reflection.options & EShReflectionBasicArraySuffix) && type.isArray()) {
name.append(TString("[0]"));
}
TReflection::TMapIndexToReflection &ioItems =
input ? reflection.indexToPipeInput : reflection.indexToPipeOutput;
std::string namespacedName = input ? "in " : "out ";
namespacedName += name.c_str();
TReflection::TNameToIndex::const_iterator it = reflection.nameToIndex.find(namespacedName);
if (it == reflection.nameToIndex.end()) {
reflection.nameToIndex[namespacedName] = (int)ioItems.size();
ioItems.push_back(
TObjectReflection(name.c_str(), type, 0, mapToGlType(type), mapToGlArraySize(type), 0));
EShLanguageMask& stages = ioItems.back().stages;
stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage());
} else {
EShLanguageMask& stages = ioItems[it->second].stages;
stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage());
}
}
@ -385,6 +532,10 @@ public:
anonymous = IsAnonymous(base->getName());
const TString& blockName = base->getType().getTypeName();
TString baseName;
if (! anonymous)
baseName = blockName;
if (base->getType().isArray()) {
TType derefType(base->getType(), 0);
@ -392,9 +543,60 @@ public:
assert(! anonymous);
for (int e = 0; e < base->getType().getCumulativeArraySize(); ++e)
blockIndex = addBlockName(blockName + "[" + String(e) + "]", derefType,
getBlockSize(base->getType()));
intermediate.getBlockSize(base->getType()));
baseName.append(TString("[0]"));
} else
blockIndex = addBlockName(blockName, base->getType(), getBlockSize(base->getType()));
blockIndex = addBlockName(blockName, base->getType(), intermediate.getBlockSize(base->getType()));
if (reflection.options & EShReflectionAllBlockVariables) {
// Use a degenerate (empty) set of dereferences to immediately put as at the end of
// the dereference change expected by blowUpActiveAggregate.
TList<TIntermBinary*> derefs;
// because we don't have any derefs, the first thing blowUpActiveAggregate will do is iterate over each
// member in the struct definition. This will lose any information about whether the parent was a buffer
// block. So if we're using strict array rules which don't expand the first child of a buffer block we
// instead iterate over the children here.
const bool strictArraySuffix = (reflection.options & EShReflectionStrictArraySuffix);
bool blockParent = (base->getType().getBasicType() == EbtBlock && base->getQualifier().storage == EvqBuffer);
if (strictArraySuffix && blockParent) {
TType structDerefType(base->getType(), 0);
const TType &structType = base->getType().isArray() ? structDerefType : base->getType();
const TTypeList& typeList = *structType.getStruct();
TVector<int> memberOffsets;
memberOffsets.resize(typeList.size());
getOffsets(structType, memberOffsets);
for (int i = 0; i < (int)typeList.size(); ++i) {
TType derefType(structType, i);
TString name = baseName;
if (name.size() > 0)
name.append(".");
name.append(typeList[i].type->getFieldName());
// if this member is an array, store the top-level array stride but start the explosion from
// the inner struct type.
if (derefType.isArray() && derefType.isStruct()) {
name.append("[0]");
blowUpActiveAggregate(TType(derefType, 0), name, derefs, derefs.end(), memberOffsets[i],
blockIndex, 0, getArrayStride(structType, derefType),
base->getQualifier().storage, false);
} else {
blowUpActiveAggregate(derefType, name, derefs, derefs.end(), memberOffsets[i], blockIndex,
0, 0, base->getQualifier().storage, false);
}
}
} else {
// otherwise - if we're not using strict array suffix rules, or this isn't a block so we are
// expanding root arrays anyway, just start the iteration from the base block type.
blowUpActiveAggregate(base->getType(), baseName, derefs, derefs.end(), 0, blockIndex, 0, 0,
base->getQualifier().storage, false);
}
}
}
// Process the dereference chain, backward, accumulating the pieces for later forward traversal.
@ -424,27 +626,39 @@ public:
else
baseName = base->getName();
}
blowUpActiveAggregate(base->getType(), baseName, derefs, derefs.begin(), offset, blockIndex, arraySize);
blowUpActiveAggregate(base->getType(), baseName, derefs, derefs.begin(), offset, blockIndex, arraySize, 0,
base->getQualifier().storage, true);
}
int addBlockName(const TString& name, const TType& type, int size)
{
TReflection::TMapIndexToReflection& blocks = reflection.GetBlockMapForStorage(type.getQualifier().storage);
int blockIndex;
TReflection::TNameToIndex::const_iterator it = reflection.nameToIndex.find(name.c_str());
if (reflection.nameToIndex.find(name.c_str()) == reflection.nameToIndex.end()) {
blockIndex = (int)reflection.indexToUniformBlock.size();
blockIndex = (int)blocks.size();
reflection.nameToIndex[name.c_str()] = blockIndex;
reflection.indexToUniformBlock.push_back(TObjectReflection(name.c_str(), type, -1, -1, size, -1));
} else
blocks.push_back(TObjectReflection(name.c_str(), type, -1, -1, size, -1));
blocks.back().numMembers = countAggregateMembers(type);
EShLanguageMask& stages = blocks.back().stages;
stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage());
} else {
blockIndex = it->second;
EShLanguageMask& stages = blocks[blockIndex].stages;
stages = static_cast<EShLanguageMask>(stages | 1 << intermediate.getStage());
}
return blockIndex;
}
// Are we at a level in a dereference chain at which individual active uniform queries are made?
bool isReflectionGranularity(const TType& type)
{
return type.getBasicType() != EbtBlock && type.getBasicType() != EbtStruct;
return type.getBasicType() != EbtBlock && type.getBasicType() != EbtStruct && !type.isArrayOfArrays();
}
// For a binary operation indexing into an aggregate, chase down the base of the aggregate.
@ -497,7 +711,6 @@ public:
case EsdBuffer:
return GL_SAMPLER_BUFFER;
}
#ifdef AMD_EXTENSIONS
case EbtFloat16:
switch ((int)sampler.dim) {
case Esd1D:
@ -526,7 +739,6 @@ public:
case EsdBuffer:
return GL_FLOAT16_SAMPLER_BUFFER_AMD;
}
#endif
case EbtInt:
switch ((int)sampler.dim) {
case Esd1D:
@ -589,7 +801,6 @@ public:
case EsdBuffer:
return GL_IMAGE_BUFFER;
}
#ifdef AMD_EXTENSIONS
case EbtFloat16:
switch ((int)sampler.dim) {
case Esd1D:
@ -608,7 +819,6 @@ public:
case EsdBuffer:
return GL_FLOAT16_IMAGE_BUFFER_AMD;
}
#endif
case EbtInt:
switch ((int)sampler.dim) {
case Esd1D:
@ -674,9 +884,7 @@ public:
switch (type.getBasicType()) {
case EbtFloat: return GL_FLOAT_VEC2 + offset;
case EbtDouble: return GL_DOUBLE_VEC2 + offset;
#ifdef AMD_EXTENSIONS
case EbtFloat16: return GL_FLOAT16_VEC2_NV + offset;
#endif
case EbtInt: return GL_INT_VEC2 + offset;
case EbtUint: return GL_UNSIGNED_INT_VEC2 + offset;
case EbtInt64: return GL_INT64_ARB + offset;
@ -736,7 +944,6 @@ public:
default: return 0;
}
}
#ifdef AMD_EXTENSIONS
case EbtFloat16:
switch (type.getMatrixCols()) {
case 2:
@ -761,7 +968,6 @@ public:
default: return 0;
}
}
#endif
default:
return 0;
}
@ -770,9 +976,7 @@ public:
switch (type.getBasicType()) {
case EbtFloat: return GL_FLOAT;
case EbtDouble: return GL_DOUBLE;
#ifdef AMD_EXTENSIONS
case EbtFloat16: return GL_FLOAT16_NV;
#endif
case EbtInt: return GL_INT;
case EbtUint: return GL_UNSIGNED_INT;
case EbtInt64: return GL_INT64_ARB;
@ -828,8 +1032,47 @@ void TReflectionTraverser::visitSymbol(TIntermSymbol* base)
if (base->getQualifier().storage == EvqUniform)
addUniform(*base);
if (intermediate.getStage() == EShLangVertex && base->getQualifier().isPipeInput())
addAttribute(*base);
if ((intermediate.getStage() == reflection.firstStage && base->getQualifier().isPipeInput()) ||
(intermediate.getStage() == reflection.lastStage && base->getQualifier().isPipeOutput()))
addPipeIOVariable(*base);
}
//
// Implement TObjectReflection methods.
//
TObjectReflection::TObjectReflection(const std::string &pName, const TType &pType, int pOffset, int pGLDefineType,
int pSize, int pIndex)
: name(pName), offset(pOffset), glDefineType(pGLDefineType), size(pSize), index(pIndex), counterIndex(-1),
numMembers(-1), arrayStride(0), topLevelArrayStride(0), stages(EShLanguageMask(0)), type(pType.clone())
{
}
int TObjectReflection::getBinding() const
{
if (type == nullptr || !type->getQualifier().hasBinding())
return -1;
return type->getQualifier().layoutBinding;
}
void TObjectReflection::dump() const
{
printf("%s: offset %d, type %x, size %d, index %d, binding %d, stages %d", name.c_str(), offset, glDefineType, size,
index, getBinding(), stages);
if (counterIndex != -1)
printf(", counter %d", counterIndex);
if (numMembers != -1)
printf(", numMembers %d", numMembers);
if (arrayStride != 0)
printf(", arrayStride %d", arrayStride);
if (topLevelArrayStride != 0)
printf(", topLevelArrayStride %d", topLevelArrayStride);
printf("\n");
}
//
@ -850,6 +1093,7 @@ void TReflection::buildAttributeReflection(EShLanguage stage, const TIntermediat
// build counter block index associations for buffers
void TReflection::buildCounterIndices(const TIntermediate& intermediate)
{
#ifdef ENABLE_HLSL
// search for ones that have counters
for (int i = 0; i < int(indexToUniformBlock.size()); ++i) {
const TString counterName(intermediate.addCounterBufferName(indexToUniformBlock[i].name).c_str());
@ -858,14 +1102,23 @@ void TReflection::buildCounterIndices(const TIntermediate& intermediate)
if (index >= 0)
indexToUniformBlock[i].counterIndex = index;
}
#endif
}
// build Shader Stages mask for all uniforms
void TReflection::buildUniformStageMask(const TIntermediate& intermediate)
{
if (options & EShReflectionAllBlockVariables)
return;
for (int i = 0; i < int(indexToUniform.size()); ++i) {
indexToUniform[i].stages = static_cast<EShLanguageMask>(indexToUniform[i].stages | 1 << intermediate.getStage());
}
for (int i = 0; i < int(indexToBufferVariable.size()); ++i) {
indexToBufferVariable[i].stages =
static_cast<EShLanguageMask>(indexToBufferVariable[i].stages | 1 << intermediate.getStage());
}
}
// Merge live symbols from 'intermediate' into the existing reflection database.
@ -910,9 +1163,24 @@ void TReflection::dump()
indexToUniformBlock[i].dump();
printf("\n");
printf("Vertex attribute reflection:\n");
for (size_t i = 0; i < indexToAttribute.size(); ++i)
indexToAttribute[i].dump();
printf("Buffer variable reflection:\n");
for (size_t i = 0; i < indexToBufferVariable.size(); ++i)
indexToBufferVariable[i].dump();
printf("\n");
printf("Buffer block reflection:\n");
for (size_t i = 0; i < indexToBufferBlock.size(); ++i)
indexToBufferBlock[i].dump();
printf("\n");
printf("Pipeline input reflection:\n");
for (size_t i = 0; i < indexToPipeInput.size(); ++i)
indexToPipeInput[i].dump();
printf("\n");
printf("Pipeline output reflection:\n");
for (size_t i = 0; i < indexToPipeOutput.size(); ++i)
indexToPipeOutput[i].dump();
printf("\n");
if (getLocalSize(0) > 1) {
@ -932,3 +1200,5 @@ void TReflection::dump()
}
} // end namespace glslang
#endif // GLSLANG_WEB

View file

@ -33,6 +33,8 @@
// POSSIBILITY OF SUCH DAMAGE.
//
#ifndef GLSLANG_WEB
#ifndef _REFLECTION_INCLUDED
#define _REFLECTION_INCLUDED
@ -52,51 +54,11 @@ class TIntermediate;
class TIntermAggregate;
class TReflectionTraverser;
// Data needed for just a single object at the granularity exchanged by the reflection API
class TObjectReflection {
public:
TObjectReflection(const std::string& pName, const TType& pType, int pOffset, int pGLDefineType, int pSize, int pIndex) :
name(pName), offset(pOffset),
glDefineType(pGLDefineType), size(pSize), index(pIndex), counterIndex(-1), stages(EShLanguageMask(0)), type(pType.clone()) { }
const TType* getType() const { return type; }
int getBinding() const
{
if (type == nullptr || !type->getQualifier().hasBinding())
return -1;
return type->getQualifier().layoutBinding;
}
void dump() const
{
printf("%s: offset %d, type %x, size %d, index %d, binding %d, stages %d",
name.c_str(), offset, glDefineType, size, index, getBinding(), stages );
if (counterIndex != -1)
printf(", counter %d", counterIndex);
printf("\n");
}
static TObjectReflection badReflection() { return TObjectReflection(); }
std::string name;
int offset;
int glDefineType;
int size; // data size in bytes for a block, array size for a (non-block) object that's an array
int index;
int counterIndex;
EShLanguageMask stages;
protected:
TObjectReflection() :
offset(-1), glDefineType(-1), size(-1), index(-1), counterIndex(-1), stages(EShLanguageMask(0)), type(nullptr) { }
const TType* type;
};
// The full reflection database
class TReflection {
public:
TReflection() : badReflection(TObjectReflection::badReflection())
TReflection(EShReflectionOptions opts, EShLanguage first, EShLanguage last)
: options(opts), firstStage(first), lastStage(last), badReflection(TObjectReflection::badReflection())
{
for (int dim=0; dim<3; ++dim)
localSize[dim] = 0;
@ -127,17 +89,57 @@ public:
return badReflection;
}
// for mapping an attribute index to the attribute's description
int getNumAttributes() { return (int)indexToAttribute.size(); }
const TObjectReflection& getAttribute(int i) const
// for mapping an pipeline input index to the input's description
int getNumPipeInputs() { return (int)indexToPipeInput.size(); }
const TObjectReflection& getPipeInput(int i) const
{
if (i >= 0 && i < (int)indexToAttribute.size())
return indexToAttribute[i];
if (i >= 0 && i < (int)indexToPipeInput.size())
return indexToPipeInput[i];
else
return badReflection;
}
// for mapping any name to its index (block names, uniform names and attribute names)
// for mapping an pipeline output index to the output's description
int getNumPipeOutputs() { return (int)indexToPipeOutput.size(); }
const TObjectReflection& getPipeOutput(int i) const
{
if (i >= 0 && i < (int)indexToPipeOutput.size())
return indexToPipeOutput[i];
else
return badReflection;
}
// for mapping from an atomic counter to the uniform index
int getNumAtomicCounters() const { return (int)atomicCounterUniformIndices.size(); }
const TObjectReflection& getAtomicCounter(int i) const
{
if (i >= 0 && i < (int)atomicCounterUniformIndices.size())
return getUniform(atomicCounterUniformIndices[i]);
else
return badReflection;
}
// for mapping a buffer variable index to a buffer variable object's description
int getNumBufferVariables() { return (int)indexToBufferVariable.size(); }
const TObjectReflection& getBufferVariable(int i) const
{
if (i >= 0 && i < (int)indexToBufferVariable.size())
return indexToBufferVariable[i];
else
return badReflection;
}
// for mapping a storage block index to the storage block's description
int getNumStorageBuffers() const { return (int)indexToBufferBlock.size(); }
const TObjectReflection& getStorageBufferBlock(int i) const
{
if (i >= 0 && i < (int)indexToBufferBlock.size())
return indexToBufferBlock[i];
else
return badReflection;
}
// for mapping any name to its index (block names, uniform names and input/output names)
int getIndex(const char* name) const
{
TNameToIndex::const_iterator it = nameToIndex.find(name);
@ -150,6 +152,20 @@ public:
// see getIndex(const char*)
int getIndex(const TString& name) const { return getIndex(name.c_str()); }
// for mapping any name to its index (only pipe input/output names)
int getPipeIOIndex(const char* name, const bool inOrOut) const
{
TNameToIndex::const_iterator it = inOrOut ? pipeInNameToIndex.find(name) : pipeOutNameToIndex.find(name);
if (it == (inOrOut ? pipeInNameToIndex.end() : pipeOutNameToIndex.end()))
return -1;
else
return it->second;
}
// see gePipeIOIndex(const char*, const bool)
int getPipeIOIndex(const TString& name, const bool inOrOut) const { return getPipeIOIndex(name.c_str(), inOrOut); }
// Thread local size
unsigned getLocalSize(int dim) const { return dim <= 2 ? localSize[dim] : 0; }
@ -165,12 +181,37 @@ protected:
// Need a TString hash: typedef std::unordered_map<TString, int> TNameToIndex;
typedef std::map<std::string, int> TNameToIndex;
typedef std::vector<TObjectReflection> TMapIndexToReflection;
typedef std::vector<int> TIndices;
TMapIndexToReflection& GetBlockMapForStorage(TStorageQualifier storage)
{
if ((options & EShReflectionSeparateBuffers) && storage == EvqBuffer)
return indexToBufferBlock;
return indexToUniformBlock;
}
TMapIndexToReflection& GetVariableMapForStorage(TStorageQualifier storage)
{
if ((options & EShReflectionSeparateBuffers) && storage == EvqBuffer)
return indexToBufferVariable;
return indexToUniform;
}
EShReflectionOptions options;
EShLanguage firstStage;
EShLanguage lastStage;
TObjectReflection badReflection; // return for queries of -1 or generally out of range; has expected descriptions with in it for this
TNameToIndex nameToIndex; // maps names to indexes; can hold all types of data: uniform/buffer and which function names have been processed
TNameToIndex pipeInNameToIndex; // maps pipe in names to indexes, this is a fix to seperate pipe I/O from uniforms and buffers.
TNameToIndex pipeOutNameToIndex; // maps pipe out names to indexes, this is a fix to seperate pipe I/O from uniforms and buffers.
TMapIndexToReflection indexToUniform;
TMapIndexToReflection indexToUniformBlock;
TMapIndexToReflection indexToAttribute;
TMapIndexToReflection indexToBufferVariable;
TMapIndexToReflection indexToBufferBlock;
TMapIndexToReflection indexToPipeInput;
TMapIndexToReflection indexToPipeOutput;
TIndices atomicCounterUniformIndices;
unsigned int localSize[3];
};
@ -178,3 +219,5 @@ protected:
} // end namespace glslang
#endif // _REFLECTION_INCLUDED
#endif // GLSLANG_WEB

View file

@ -20,6 +20,7 @@ else()
endif()
if(ENABLE_GLSLANG_INSTALL)
install(TARGETS OSDependent
install(TARGETS OSDependent EXPORT OSDependentTargets
ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR})
install(EXPORT OSDependentTargets DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake)
endif(ENABLE_GLSLANG_INSTALL)

View file

@ -0,0 +1,24 @@
add_executable(glslang.js "glslang.js.cpp")
glslang_set_link_args(glslang.js)
target_link_libraries(glslang.js glslang SPIRV)
if(EMSCRIPTEN)
set_target_properties(glslang.js PROPERTIES
OUTPUT_NAME "glslang"
SUFFIX ".js")
em_link_pre_js(glslang.js "${CMAKE_CURRENT_SOURCE_DIR}/glslang.pre.js")
target_link_options(glslang.js PRIVATE
"SHELL:--bind -s MODULARIZE=1")
if(ENABLE_EMSCRIPTEN_ENVIRONMENT_NODE)
target_link_options(glslang.js PRIVATE
"SHELL:-s ENVIRONMENT=node -s BINARYEN_ASYNC_COMPILATION=0")
else()
target_link_options(glslang.js PRIVATE
"SHELL:-s ENVIRONMENT=web,worker")
endif()
if(NOT ENABLE_EMSCRIPTEN_ENVIRONMENT_NODE)
add_custom_command(TARGET glslang.js POST_BUILD
COMMAND cat ${CMAKE_CURRENT_SOURCE_DIR}/glslang.after.js >> ${CMAKE_CURRENT_BINARY_DIR}/glslang.js)
endif()
endif(EMSCRIPTEN)

View file

@ -0,0 +1,26 @@
export default (() => {
const initialize = () => {
return new Promise(resolve => {
Module({
locateFile() {
const i = import.meta.url.lastIndexOf('/')
return import.meta.url.substring(0, i) + '/glslang.wasm';
},
onRuntimeInitialized() {
resolve({
compileGLSLZeroCopy: this.compileGLSLZeroCopy,
compileGLSL: this.compileGLSL,
});
},
});
});
};
let instance;
return () => {
if (!instance) {
instance = initialize();
}
return instance;
};
})();

View file

@ -0,0 +1,269 @@
//
// Copyright (C) 2019 Google, Inc.
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions
// are met:
//
// Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
//
// Neither the name of 3Dlabs Inc. Ltd. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
#include <cstdio>
#include <cstdint>
#include <memory>
#ifdef __EMSCRIPTEN__
#include <emscripten.h>
#endif
#include "../../../SPIRV/GlslangToSpv.h"
#include "../../../glslang/Public/ShaderLang.h"
#ifndef __EMSCRIPTEN__
#define EMSCRIPTEN_KEEPALIVE
#endif
const TBuiltInResource DefaultTBuiltInResource = {
/* .MaxLights = */ 32,
/* .MaxClipPlanes = */ 6,
/* .MaxTextureUnits = */ 32,
/* .MaxTextureCoords = */ 32,
/* .MaxVertexAttribs = */ 64,
/* .MaxVertexUniformComponents = */ 4096,
/* .MaxVaryingFloats = */ 64,
/* .MaxVertexTextureImageUnits = */ 32,
/* .MaxCombinedTextureImageUnits = */ 80,
/* .MaxTextureImageUnits = */ 32,
/* .MaxFragmentUniformComponents = */ 4096,
/* .MaxDrawBuffers = */ 32,
/* .MaxVertexUniformVectors = */ 128,
/* .MaxVaryingVectors = */ 8,
/* .MaxFragmentUniformVectors = */ 16,
/* .MaxVertexOutputVectors = */ 16,
/* .MaxFragmentInputVectors = */ 15,
/* .MinProgramTexelOffset = */ -8,
/* .MaxProgramTexelOffset = */ 7,
/* .MaxClipDistances = */ 8,
/* .MaxComputeWorkGroupCountX = */ 65535,
/* .MaxComputeWorkGroupCountY = */ 65535,
/* .MaxComputeWorkGroupCountZ = */ 65535,
/* .MaxComputeWorkGroupSizeX = */ 1024,
/* .MaxComputeWorkGroupSizeY = */ 1024,
/* .MaxComputeWorkGroupSizeZ = */ 64,
/* .MaxComputeUniformComponents = */ 1024,
/* .MaxComputeTextureImageUnits = */ 16,
/* .MaxComputeImageUniforms = */ 8,
/* .MaxComputeAtomicCounters = */ 8,
/* .MaxComputeAtomicCounterBuffers = */ 1,
/* .MaxVaryingComponents = */ 60,
/* .MaxVertexOutputComponents = */ 64,
/* .MaxGeometryInputComponents = */ 64,
/* .MaxGeometryOutputComponents = */ 128,
/* .MaxFragmentInputComponents = */ 128,
/* .MaxImageUnits = */ 8,
/* .MaxCombinedImageUnitsAndFragmentOutputs = */ 8,
/* .MaxCombinedShaderOutputResources = */ 8,
/* .MaxImageSamples = */ 0,
/* .MaxVertexImageUniforms = */ 0,
/* .MaxTessControlImageUniforms = */ 0,
/* .MaxTessEvaluationImageUniforms = */ 0,
/* .MaxGeometryImageUniforms = */ 0,
/* .MaxFragmentImageUniforms = */ 8,
/* .MaxCombinedImageUniforms = */ 8,
/* .MaxGeometryTextureImageUnits = */ 16,
/* .MaxGeometryOutputVertices = */ 256,
/* .MaxGeometryTotalOutputComponents = */ 1024,
/* .MaxGeometryUniformComponents = */ 1024,
/* .MaxGeometryVaryingComponents = */ 64,
/* .MaxTessControlInputComponents = */ 128,
/* .MaxTessControlOutputComponents = */ 128,
/* .MaxTessControlTextureImageUnits = */ 16,
/* .MaxTessControlUniformComponents = */ 1024,
/* .MaxTessControlTotalOutputComponents = */ 4096,
/* .MaxTessEvaluationInputComponents = */ 128,
/* .MaxTessEvaluationOutputComponents = */ 128,
/* .MaxTessEvaluationTextureImageUnits = */ 16,
/* .MaxTessEvaluationUniformComponents = */ 1024,
/* .MaxTessPatchComponents = */ 120,
/* .MaxPatchVertices = */ 32,
/* .MaxTessGenLevel = */ 64,
/* .MaxViewports = */ 16,
/* .MaxVertexAtomicCounters = */ 0,
/* .MaxTessControlAtomicCounters = */ 0,
/* .MaxTessEvaluationAtomicCounters = */ 0,
/* .MaxGeometryAtomicCounters = */ 0,
/* .MaxFragmentAtomicCounters = */ 8,
/* .MaxCombinedAtomicCounters = */ 8,
/* .MaxAtomicCounterBindings = */ 1,
/* .MaxVertexAtomicCounterBuffers = */ 0,
/* .MaxTessControlAtomicCounterBuffers = */ 0,
/* .MaxTessEvaluationAtomicCounterBuffers = */ 0,
/* .MaxGeometryAtomicCounterBuffers = */ 0,
/* .MaxFragmentAtomicCounterBuffers = */ 1,
/* .MaxCombinedAtomicCounterBuffers = */ 1,
/* .MaxAtomicCounterBufferSize = */ 16384,
/* .MaxTransformFeedbackBuffers = */ 4,
/* .MaxTransformFeedbackInterleavedComponents = */ 64,
/* .MaxCullDistances = */ 8,
/* .MaxCombinedClipAndCullDistances = */ 8,
/* .MaxSamples = */ 4,
/* .maxMeshOutputVerticesNV = */ 256,
/* .maxMeshOutputPrimitivesNV = */ 512,
/* .maxMeshWorkGroupSizeX_NV = */ 32,
/* .maxMeshWorkGroupSizeY_NV = */ 1,
/* .maxMeshWorkGroupSizeZ_NV = */ 1,
/* .maxTaskWorkGroupSizeX_NV = */ 32,
/* .maxTaskWorkGroupSizeY_NV = */ 1,
/* .maxTaskWorkGroupSizeZ_NV = */ 1,
/* .maxMeshViewCountNV = */ 4,
/* .limits = */ {
/* .nonInductiveForLoops = */ 1,
/* .whileLoops = */ 1,
/* .doWhileLoops = */ 1,
/* .generalUniformIndexing = */ 1,
/* .generalAttributeMatrixVectorIndexing = */ 1,
/* .generalVaryingIndexing = */ 1,
/* .generalSamplerIndexing = */ 1,
/* .generalVariableIndexing = */ 1,
/* .generalConstantMatrixVectorIndexing = */ 1,
}};
static bool initialized = false;
extern "C" {
/*
* Takes in a GLSL shader as a string and converts it to SPIR-V in binary form.
*
* |glsl| Null-terminated string containing the shader to be converted.
* |stage_int| Magic number indicating the type of shader being processed.
* Legal values are as follows:
* Vertex = 0
* Fragment = 4
* Compute = 5
* |gen_debug| Flag to indicate if debug information should be generated.
* |spirv| Output parameter for a pointer to the resulting SPIR-V data.
* |spirv_len| Output parameter for the length of the output binary buffer.
*
* Returns a void* pointer which, if not null, must be destroyed by
* destroy_output_buffer.o. (This is not the same pointer returned in |spirv|.)
* If null, the compilation failed.
*/
EMSCRIPTEN_KEEPALIVE
void* convert_glsl_to_spirv(const char* glsl, int stage_int, bool gen_debug, uint32_t** spirv, size_t* spirv_len)
{
if (glsl == nullptr) {
fprintf(stderr, "Input pointer null\n");
return nullptr;
}
if (spirv == nullptr || spirv_len == nullptr) {
fprintf(stderr, "Output pointer null\n");
return nullptr;
}
*spirv = nullptr;
*spirv_len = 0;
if (stage_int != 0 && stage_int != 4 && stage_int != 5) {
fprintf(stderr, "Invalid shader stage\n");
return nullptr;
}
EShLanguage stage = static_cast<EShLanguage>(stage_int);
if (!initialized) {
glslang::InitializeProcess();
initialized = true;
}
glslang::TShader shader(stage);
shader.setStrings(&glsl, 1);
shader.setEnvInput(glslang::EShSourceGlsl, stage, glslang::EShClientVulkan, 100);
shader.setEnvClient(glslang::EShClientVulkan, glslang::EShTargetVulkan_1_1);
shader.setEnvTarget(glslang::EShTargetSpv, glslang::EShTargetSpv_1_3);
if (!shader.parse(&DefaultTBuiltInResource, 100, true, EShMsgDefault)) {
fprintf(stderr, "Parse failed\n");
fprintf(stderr, "%s\n", shader.getInfoLog());
return nullptr;
}
glslang::TProgram program;
program.addShader(&shader);
if (!program.link(EShMsgDefault)) {
fprintf(stderr, "Link failed\n");
fprintf(stderr, "%s\n", program.getInfoLog());
return nullptr;
}
glslang::SpvOptions spvOptions;
spvOptions.generateDebugInfo = gen_debug;
spvOptions.optimizeSize = false;
spvOptions.disassemble = false;
spvOptions.validate = false;
std::vector<uint32_t>* output = new std::vector<uint32_t>;
glslang::GlslangToSpv(*program.getIntermediate(stage), *output, nullptr, &spvOptions);
*spirv_len = output->size();
*spirv = output->data();
return output;
}
/*
* Destroys a buffer created by convert_glsl_to_spirv
*/
EMSCRIPTEN_KEEPALIVE
void destroy_output_buffer(void* p)
{
delete static_cast<std::vector<uint32_t>*>(p);
}
} // extern "C"
/*
* For non-Emscripten builds we supply a generic main, so that the glslang.js
* build target can generate an executable with a trivial use case instead of
* generating a WASM binary. This is done so that there is a target that can be
* built and output analyzed using desktop tools, since WASM binaries are
* specific to the Emscripten toolchain.
*/
#ifndef __EMSCRIPTEN__
int main() {
const char* input = R"(#version 310 es
void main() { })";
uint32_t* output;
size_t output_len;
void* id = convert_glsl_to_spirv(input, 4, false, &output, &output_len);
assert(output != nullptr);
assert(output_len != 0);
destroy_output_buffer(id);
return 0;
}
#endif // ifndef __EMSCRIPTEN__

View file

@ -0,0 +1,45 @@
Module['compileGLSLZeroCopy'] = function(glsl, shader_stage, gen_debug) {
gen_debug = !!gen_debug;
var shader_stage_int;
if (shader_stage === 'vertex') {
shader_stage_int = 0;
} else if (shader_stage === 'fragment') {
shader_stage_int = 4;
} else if (shader_stage === 'compute') {
shader_stage_int = 5;
} else {
throw new Error("shader_stage must be 'vertex', 'fragment', or 'compute'");
}
var p_output = Module['_malloc'](4);
var p_output_len = Module['_malloc'](4);
var id = ccall('convert_glsl_to_spirv',
'number',
['string', 'number', 'boolean', 'number', 'number'],
[glsl, shader_stage_int, gen_debug, p_output, p_output_len]);
var output = getValue(p_output, 'i32');
var output_len = getValue(p_output_len, 'i32');
Module['_free'](p_output);
Module['_free'](p_output_len);
if (id === 0) {
throw new Error('GLSL compilation failed');
}
var ret = {};
var outputIndexU32 = output / 4;
ret['data'] = Module['HEAPU32'].subarray(outputIndexU32, outputIndexU32 + output_len);
ret['free'] = function() {
Module['_destroy_output_buffer'](id);
};
return ret;
};
Module['compileGLSL'] = function(glsl, shader_stage, gen_debug) {
var compiled = Module['compileGLSLZeroCopy'](glsl, shader_stage, gen_debug);
var ret = compiled['data'].slice()
compiled['free']();
return ret;
};

View file

@ -15,6 +15,7 @@ if(WIN32)
endif(WIN32)
if(ENABLE_GLSLANG_INSTALL)
install(TARGETS OSDependent
install(TARGETS OSDependent EXPORT OSDependentTargets
ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR})
install(EXPORT OSDependentTargets DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake)
endif(ENABLE_GLSLANG_INSTALL)

View file

@ -68,7 +68,7 @@
// This should always increase, as some paths to do not consume
// a more major number.
// It should increment by one when new functionality is added.
#define GLSLANG_MINOR_VERSION 11
#define GLSLANG_MINOR_VERSION 13
//
// Call before doing any other compiler/linker operations.
@ -126,36 +126,37 @@ class TType;
typedef enum {
EShSourceNone,
EShSourceGlsl,
EShSourceHlsl,
} EShSource; // if EShLanguage were EShStage, this could be EShLanguage instead
EShSourceGlsl, // GLSL, includes ESSL (OpenGL ES GLSL)
EShSourceHlsl, // HLSL
} EShSource; // if EShLanguage were EShStage, this could be EShLanguage instead
typedef enum {
EShClientNone,
EShClientNone, // use when there is no client, e.g. for validation
EShClientVulkan,
EShClientOpenGL,
} EShClient;
typedef enum {
EShTargetNone,
EShTargetSpv, // preferred spelling
EShTargetSpv, // SPIR-V (preferred spelling)
EshTargetSpv = EShTargetSpv, // legacy spelling
} EShTargetLanguage;
typedef enum {
EShTargetVulkan_1_0 = (1 << 22),
EShTargetVulkan_1_1 = (1 << 22) | (1 << 12),
EShTargetOpenGL_450 = 450,
EShTargetVulkan_1_0 = (1 << 22), // Vulkan 1.0
EShTargetVulkan_1_1 = (1 << 22) | (1 << 12), // Vulkan 1.1
EShTargetOpenGL_450 = 450, // OpenGL
} EShTargetClientVersion;
typedef EShTargetClientVersion EshTargetClientVersion;
typedef enum {
EShTargetSpv_1_0 = (1 << 16),
EShTargetSpv_1_1 = (1 << 16) | (1 << 8),
EShTargetSpv_1_2 = (1 << 16) | (2 << 8),
EShTargetSpv_1_3 = (1 << 16) | (3 << 8),
EShTargetSpv_1_4 = (1 << 16) | (4 << 8),
EShTargetSpv_1_0 = (1 << 16), // SPIR-V 1.0
EShTargetSpv_1_1 = (1 << 16) | (1 << 8), // SPIR-V 1.1
EShTargetSpv_1_2 = (1 << 16) | (2 << 8), // SPIR-V 1.2
EShTargetSpv_1_3 = (1 << 16) | (3 << 8), // SPIR-V 1.3
EShTargetSpv_1_4 = (1 << 16) | (4 << 8), // SPIR-V 1.4
EShTargetSpv_1_5 = (1 << 16) | (5 << 8), // SPIR-V 1.5
} EShTargetLanguageVersion;
struct TInputLanguage {
@ -234,8 +235,22 @@ enum EShMessages {
EShMsgHlslEnable16BitTypes = (1 << 11), // enable use of 16-bit types in SPIR-V for HLSL
EShMsgHlslLegalization = (1 << 12), // enable HLSL Legalization messages
EShMsgHlslDX9Compatible = (1 << 13), // enable HLSL DX9 compatible mode (right now only for samplers)
EShMsgBuiltinSymbolTable = (1 << 14), // print the builtin symbol table
};
//
// Options for building reflection
//
typedef enum {
EShReflectionDefault = 0, // default is original behaviour before options were added
EShReflectionStrictArraySuffix = (1 << 0), // reflection will follow stricter rules for array-of-structs suffixes
EShReflectionBasicArraySuffix = (1 << 1), // arrays of basic types will be appended with [0] as in GL reflection
EShReflectionIntermediateIO = (1 << 2), // reflect inputs and outputs to program, even with no vertex shader
EShReflectionSeparateBuffers = (1 << 3), // buffer variables and buffer blocks are reflected separately
EShReflectionAllBlockVariables = (1 << 4), // reflect all variables in blocks, even if they are inactive
EShReflectionUnwrapIOBlocks = (1 << 5), // unwrap input/output blocks the same as with uniform blocks
} EShReflectionOptions;
//
// Build a table for bindings. This can be used for locating
// attributes, uniforms, globals, etc., as needed.
@ -418,15 +433,42 @@ public:
void addUniformLocationOverride(const char* name, int loc);
void setUniformLocationBase(int base);
void setInvertY(bool invert);
#ifdef ENABLE_HLSL
void setHlslIoMapping(bool hlslIoMap);
void setFlattenUniformArrays(bool flatten);
#endif
void setNoStorageFormat(bool useUnknownFormat);
void setNanMinMaxClamp(bool nanMinMaxClamp);
void setTextureSamplerTransformMode(EShTextureSamplerTransformMode mode);
// For setting up the environment (cleared to nothingness in the constructor).
// These must be called so that parsing is done for the right source language and
// target environment, either indirectly through TranslateEnvironment() based on
// EShMessages et. al., or directly by the user.
//
// setEnvInput: The input source language and stage. If generating code for a
// specific client, the input client semantics to use and the
// version of the that client's input semantics to use, otherwise
// use EShClientNone and version of 0, e.g. for validation mode.
// Note 'version' does not describe the target environment,
// just the version of the source dialect to compile under.
//
// See the definitions of TEnvironment, EShSource, EShLanguage,
// and EShClient for choices and more detail.
//
// setEnvClient: The client that will be hosting the execution, and it's version.
// Note 'version' is not the version of the languages involved, but
// the version of the client environment.
// Use EShClientNone and version of 0 if there is no client, e.g.
// for validation mode.
//
// See EShTargetClientVersion for choices.
//
// setEnvTarget: The language to translate to when generating code, and that
// language's version.
// Use EShTargetNone and version of 0 if there is no client, e.g.
// for validation mode.
//
void setEnvInput(EShSource lang, EShLanguage envStage, EShClient client, int version)
{
environment.input.languageFamily = lang;
@ -444,8 +486,15 @@ public:
environment.target.language = lang;
environment.target.version = version;
}
void getStrings(const char* const* &s, int& n) { s = strings; n = numStrings; }
#ifdef ENABLE_HLSL
void setEnvTargetHlslFunctionality1() { environment.target.hlslFunctionality1 = true; }
bool getEnvTargetHlslFunctionality1() const { return environment.target.hlslFunctionality1; }
#else
bool getEnvTargetHlslFunctionality1() const { return false; }
#endif
// Interface to #include handlers.
//
@ -596,8 +645,46 @@ private:
TShader& operator=(TShader&);
};
class TReflection;
class TIoMapper;
#ifndef GLSLANG_WEB
//
// A reflection database and its interface, consistent with the OpenGL API reflection queries.
//
// Data needed for just a single object at the granularity exchanged by the reflection API
class TObjectReflection {
public:
TObjectReflection(const std::string& pName, const TType& pType, int pOffset, int pGLDefineType, int pSize, int pIndex);
const TType* getType() const { return type; }
int getBinding() const;
void dump() const;
static TObjectReflection badReflection() { return TObjectReflection(); }
std::string name;
int offset;
int glDefineType;
int size; // data size in bytes for a block, array size for a (non-block) object that's an array
int index;
int counterIndex;
int numMembers;
int arrayStride; // stride of an array variable
int topLevelArrayStride; // stride of the top-level variable in a storage buffer member
EShLanguageMask stages;
protected:
TObjectReflection()
: offset(-1), glDefineType(-1), size(-1), index(-1), counterIndex(-1), numMembers(-1), arrayStride(0),
topLevelArrayStride(0), stages(EShLanguageMask(0)), type(nullptr)
{
}
const TType* type;
};
class TReflection;
class TIoMapper;
struct TVarEntryInfo;
// Allows to customize the binding layout after linking.
// All used uniform variables will invoke at least validateBinding.
@ -618,53 +705,65 @@ class TIoMapper;
// notifiy callbacks, this phase ends with a call to endNotifications.
// Phase two starts directly after the call to endNotifications
// and calls all other callbacks to validate and to get the
// bindings, sets, locations, component and color indices.
// bindings, sets, locations, component and color indices.
//
// NOTE: that still limit checks are applied to bindings and sets
// and may result in an error.
class TIoMapResolver
{
public:
virtual ~TIoMapResolver() {}
virtual ~TIoMapResolver() {}
// Should return true if the resulting/current binding would be okay.
// Basic idea is to do aliasing binding checks with this.
virtual bool validateBinding(EShLanguage stage, const char* name, const TType& type, bool is_live) = 0;
// Should return a value >= 0 if the current binding should be overridden.
// Return -1 if the current binding (including no binding) should be kept.
virtual int resolveBinding(EShLanguage stage, const char* name, const TType& type, bool is_live) = 0;
// Should return a value >= 0 if the current set should be overridden.
// Return -1 if the current set (including no set) should be kept.
virtual int resolveSet(EShLanguage stage, const char* name, const TType& type, bool is_live) = 0;
// Should return a value >= 0 if the current location should be overridden.
// Return -1 if the current location (including no location) should be kept.
virtual int resolveUniformLocation(EShLanguage stage, const char* name, const TType& type, bool is_live) = 0;
// Should return true if the resulting/current setup would be okay.
// Basic idea is to do aliasing checks and reject invalid semantic names.
virtual bool validateInOut(EShLanguage stage, const char* name, const TType& type, bool is_live) = 0;
// Should return a value >= 0 if the current location should be overridden.
// Return -1 if the current location (including no location) should be kept.
virtual int resolveInOutLocation(EShLanguage stage, const char* name, const TType& type, bool is_live) = 0;
// Should return a value >= 0 if the current component index should be overridden.
// Return -1 if the current component index (including no index) should be kept.
virtual int resolveInOutComponent(EShLanguage stage, const char* name, const TType& type, bool is_live) = 0;
// Should return a value >= 0 if the current color index should be overridden.
// Return -1 if the current color index (including no index) should be kept.
virtual int resolveInOutIndex(EShLanguage stage, const char* name, const TType& type, bool is_live) = 0;
// Notification of a uniform variable
virtual void notifyBinding(EShLanguage stage, const char* name, const TType& type, bool is_live) = 0;
// Notification of a in or out variable
virtual void notifyInOut(EShLanguage stage, const char* name, const TType& type, bool is_live) = 0;
// Called by mapIO when it has finished the notify pass
virtual void endNotifications(EShLanguage stage) = 0;
// Called by mapIO when it starts its notify pass for the given stage
virtual void beginNotifications(EShLanguage stage) = 0;
// Called by mipIO when it starts its resolve pass for the given stage
virtual void beginResolve(EShLanguage stage) = 0;
// Called by mapIO when it has finished the resolve pass
virtual void endResolve(EShLanguage stage) = 0;
// Should return true if the resulting/current binding would be okay.
// Basic idea is to do aliasing binding checks with this.
virtual bool validateBinding(EShLanguage stage, TVarEntryInfo& ent) = 0;
// Should return a value >= 0 if the current binding should be overridden.
// Return -1 if the current binding (including no binding) should be kept.
virtual int resolveBinding(EShLanguage stage, TVarEntryInfo& ent) = 0;
// Should return a value >= 0 if the current set should be overridden.
// Return -1 if the current set (including no set) should be kept.
virtual int resolveSet(EShLanguage stage, TVarEntryInfo& ent) = 0;
// Should return a value >= 0 if the current location should be overridden.
// Return -1 if the current location (including no location) should be kept.
virtual int resolveUniformLocation(EShLanguage stage, TVarEntryInfo& ent) = 0;
// Should return true if the resulting/current setup would be okay.
// Basic idea is to do aliasing checks and reject invalid semantic names.
virtual bool validateInOut(EShLanguage stage, TVarEntryInfo& ent) = 0;
// Should return a value >= 0 if the current location should be overridden.
// Return -1 if the current location (including no location) should be kept.
virtual int resolveInOutLocation(EShLanguage stage, TVarEntryInfo& ent) = 0;
// Should return a value >= 0 if the current component index should be overridden.
// Return -1 if the current component index (including no index) should be kept.
virtual int resolveInOutComponent(EShLanguage stage, TVarEntryInfo& ent) = 0;
// Should return a value >= 0 if the current color index should be overridden.
// Return -1 if the current color index (including no index) should be kept.
virtual int resolveInOutIndex(EShLanguage stage, TVarEntryInfo& ent) = 0;
// Notification of a uniform variable
virtual void notifyBinding(EShLanguage stage, TVarEntryInfo& ent) = 0;
// Notification of a in or out variable
virtual void notifyInOut(EShLanguage stage, TVarEntryInfo& ent) = 0;
// Called by mapIO when it starts its notify pass for the given stage
virtual void beginNotifications(EShLanguage stage) = 0;
// Called by mapIO when it has finished the notify pass
virtual void endNotifications(EShLanguage stage) = 0;
// Called by mipIO when it starts its resolve pass for the given stage
virtual void beginResolve(EShLanguage stage) = 0;
// Called by mapIO when it has finished the resolve pass
virtual void endResolve(EShLanguage stage) = 0;
// Called by mapIO when it starts its symbol collect for teh given stage
virtual void beginCollect(EShLanguage stage) = 0;
// Called by mapIO when it has finished the symbol collect
virtual void endCollect(EShLanguage stage) = 0;
// Called by TSlotCollector to resolve storage locations or bindings
virtual void reserverStorageSlot(TVarEntryInfo& ent, TInfoSink& infoSink) = 0;
// Called by TSlotCollector to resolve resource locations or bindings
virtual void reserverResourceSlot(TVarEntryInfo& ent, TInfoSink& infoSink) = 0;
// Called by mapIO.addStage to set shader stage mask to mark a stage be added to this pipeline
virtual void addStage(EShLanguage stage) = 0;
};
#endif // GLSLANG_WEB
// Make one TProgram per set of shaders that will get linked together. Add all
// the shaders that are to be linked together. After calling shader.parse()
// for all shaders, call link().
@ -676,7 +775,7 @@ public:
TProgram();
virtual ~TProgram();
void addShader(TShader* shader) { stages[shader->stage].push_back(shader); }
std::list<TShader*>& getShaders(EShLanguage stage) { return stages[stage]; }
// Link Validation interface
bool link(EShMessages);
const char* getInfoLog();
@ -684,36 +783,101 @@ public:
TIntermediate* getIntermediate(EShLanguage stage) const { return intermediate[stage]; }
#ifndef GLSLANG_WEB
// Reflection Interface
bool buildReflection(); // call first, to do liveness analysis, index mapping, etc.; returns false on failure
int getNumLiveUniformVariables() const; // can be used for glGetProgramiv(GL_ACTIVE_UNIFORMS)
int getNumLiveUniformBlocks() const; // can be used for glGetProgramiv(GL_ACTIVE_UNIFORM_BLOCKS)
const char* getUniformName(int index) const; // can be used for "name" part of glGetActiveUniform()
const char* getUniformBlockName(int blockIndex) const; // can be used for glGetActiveUniformBlockName()
int getUniformBlockSize(int blockIndex) const; // can be used for glGetActiveUniformBlockiv(UNIFORM_BLOCK_DATA_SIZE)
int getUniformIndex(const char* name) const; // can be used for glGetUniformIndices()
int getUniformBinding(int index) const; // returns the binding number
EShLanguageMask getUniformStages(int index) const; // returns Shaders Stages where a Uniform is present
int getUniformBlockBinding(int index) const; // returns the block binding number
int getUniformBlockIndex(int index) const; // can be used for glGetActiveUniformsiv(GL_UNIFORM_BLOCK_INDEX)
int getUniformBlockCounterIndex(int index) const; // returns block index of associated counter.
int getUniformType(int index) const; // can be used for glGetActiveUniformsiv(GL_UNIFORM_TYPE)
int getUniformBufferOffset(int index) const; // can be used for glGetActiveUniformsiv(GL_UNIFORM_OFFSET)
int getUniformArraySize(int index) const; // can be used for glGetActiveUniformsiv(GL_UNIFORM_SIZE)
int getNumLiveAttributes() const; // can be used for glGetProgramiv(GL_ACTIVE_ATTRIBUTES)
// call first, to do liveness analysis, index mapping, etc.; returns false on failure
bool buildReflection(int opts = EShReflectionDefault);
unsigned getLocalSize(int dim) const; // return dim'th local size
const char *getAttributeName(int index) const; // can be used for glGetActiveAttrib()
int getAttributeType(int index) const; // can be used for glGetActiveAttrib()
const TType* getUniformTType(int index) const; // returns a TType*
const TType* getUniformBlockTType(int index) const; // returns a TType*
const TType* getAttributeTType(int index) const; // returns a TType*
int getReflectionIndex(const char *name) const;
int getReflectionPipeIOIndex(const char* name, const bool inOrOut) const;
int getNumUniformVariables() const;
const TObjectReflection& getUniform(int index) const;
int getNumUniformBlocks() const;
const TObjectReflection& getUniformBlock(int index) const;
int getNumPipeInputs() const;
const TObjectReflection& getPipeInput(int index) const;
int getNumPipeOutputs() const;
const TObjectReflection& getPipeOutput(int index) const;
int getNumBufferVariables() const;
const TObjectReflection& getBufferVariable(int index) const;
int getNumBufferBlocks() const;
const TObjectReflection& getBufferBlock(int index) const;
int getNumAtomicCounters() const;
const TObjectReflection& getAtomicCounter(int index) const;
// Legacy Reflection Interface - expressed in terms of above interface
// can be used for glGetProgramiv(GL_ACTIVE_UNIFORMS)
int getNumLiveUniformVariables() const { return getNumUniformVariables(); }
// can be used for glGetProgramiv(GL_ACTIVE_UNIFORM_BLOCKS)
int getNumLiveUniformBlocks() const { return getNumUniformBlocks(); }
// can be used for glGetProgramiv(GL_ACTIVE_ATTRIBUTES)
int getNumLiveAttributes() const { return getNumPipeInputs(); }
// can be used for glGetUniformIndices()
int getUniformIndex(const char *name) const { return getReflectionIndex(name); }
int getPipeIOIndex(const char *name, const bool inOrOut) const
{ return getReflectionPipeIOIndex(name, inOrOut); }
// can be used for "name" part of glGetActiveUniform()
const char *getUniformName(int index) const { return getUniform(index).name.c_str(); }
// returns the binding number
int getUniformBinding(int index) const { return getUniform(index).getBinding(); }
// returns Shaders Stages where a Uniform is present
EShLanguageMask getUniformStages(int index) const { return getUniform(index).stages; }
// can be used for glGetActiveUniformsiv(GL_UNIFORM_BLOCK_INDEX)
int getUniformBlockIndex(int index) const { return getUniform(index).index; }
// can be used for glGetActiveUniformsiv(GL_UNIFORM_TYPE)
int getUniformType(int index) const { return getUniform(index).glDefineType; }
// can be used for glGetActiveUniformsiv(GL_UNIFORM_OFFSET)
int getUniformBufferOffset(int index) const { return getUniform(index).offset; }
// can be used for glGetActiveUniformsiv(GL_UNIFORM_SIZE)
int getUniformArraySize(int index) const { return getUniform(index).size; }
// returns a TType*
const TType *getUniformTType(int index) const { return getUniform(index).getType(); }
// can be used for glGetActiveUniformBlockName()
const char *getUniformBlockName(int index) const { return getUniformBlock(index).name.c_str(); }
// can be used for glGetActiveUniformBlockiv(UNIFORM_BLOCK_DATA_SIZE)
int getUniformBlockSize(int index) const { return getUniformBlock(index).size; }
// returns the block binding number
int getUniformBlockBinding(int index) const { return getUniformBlock(index).getBinding(); }
// returns block index of associated counter.
int getUniformBlockCounterIndex(int index) const { return getUniformBlock(index).counterIndex; }
// returns a TType*
const TType *getUniformBlockTType(int index) const { return getUniformBlock(index).getType(); }
// can be used for glGetActiveAttrib()
const char *getAttributeName(int index) const { return getPipeInput(index).name.c_str(); }
// can be used for glGetActiveAttrib()
int getAttributeType(int index) const { return getPipeInput(index).glDefineType; }
// returns a TType*
const TType *getAttributeTType(int index) const { return getPipeInput(index).getType(); }
void dumpReflection();
// I/O mapping: apply base offsets and map live unbound variables
// If resolver is not provided it uses the previous approach
// and respects auto assignment and offsets.
bool mapIO(TIoMapResolver* resolver = NULL);
bool mapIO(TIoMapResolver* pResolver = nullptr, TIoMapper* pIoMapper = nullptr);
#endif
protected:
bool linkStage(EShLanguage, EShMessages);
@ -723,8 +887,9 @@ protected:
TIntermediate* intermediate[EShLangCount];
bool newedIntermediate[EShLangCount]; // track which intermediate were "new" versus reusing a singleton unit in a stage
TInfoSink* infoSink;
#ifndef GLSLANG_WEB
TReflection* reflection;
TIoMapper* ioMapper;
#endif
bool linked;
private:

View file

@ -1,3 +1,16 @@
#!/usr/bin/env bash
#!/bin/bash
if [ "$1" = 'web' ]
then
m4 -P -DGLSLANG_WEB MachineIndependent/glslang.m4 > MachineIndependent/glslang.y
elif [ "$#" -eq 0 ]
then
m4 -P MachineIndependent/glslang.m4 > MachineIndependent/glslang.y
else
echo usage:
echo $0 web
echo $0
exit
fi
bison --defines=MachineIndependent/glslang_tab.cpp.h -t MachineIndependent/glslang.y -o MachineIndependent/glslang_tab.cpp

View file

@ -41,28 +41,20 @@ set(HEADERS
spvIR.h
doc.h
SpvTools.h
disassemble.h)
disassemble.h
GLSL.ext.AMD.h
GLSL.ext.NV.h)
set(SPVREMAP_HEADERS
SPVRemapper.h
doc.h)
if(ENABLE_AMD_EXTENSIONS)
list(APPEND
HEADERS
GLSL.ext.AMD.h)
endif(ENABLE_AMD_EXTENSIONS)
if(ENABLE_NV_EXTENSIONS)
list(APPEND
HEADERS
GLSL.ext.NV.h)
endif(ENABLE_NV_EXTENSIONS)
add_library(SPIRV STATIC ${LIB_TYPE} ${SOURCES} ${HEADERS})
add_library(SPIRV ${LIB_TYPE} ${SOURCES} ${HEADERS})
set_property(TARGET SPIRV PROPERTY FOLDER glslang)
set_property(TARGET SPIRV PROPERTY POSITION_INDEPENDENT_CODE ON)
target_include_directories(SPIRV PUBLIC ..)
target_include_directories(SPIRV PUBLIC
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/..>
$<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}>)
if (ENABLE_SPVREMAPPER)
add_library(SPVRemapper ${LIB_TYPE} ${SPVREMAP_SOURCES} ${SPVREMAP_HEADERS})
@ -83,7 +75,9 @@ if(ENABLE_OPT)
PRIVATE ${spirv-tools_SOURCE_DIR}/source
)
target_link_libraries(SPIRV glslang SPIRV-Tools-opt)
target_include_directories(SPIRV PUBLIC ../External)
target_include_directories(SPIRV PUBLIC
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/../External>
$<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}/External>)
else()
target_link_libraries(SPIRV glslang)
endif(ENABLE_OPT)
@ -96,21 +90,29 @@ endif(WIN32)
if(ENABLE_GLSLANG_INSTALL)
if(BUILD_SHARED_LIBS)
if (ENABLE_SPVREMAPPER)
install(TARGETS SPVRemapper
install(TARGETS SPVRemapper EXPORT SPVRemapperTargets
ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}
LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR})
endif()
install(TARGETS SPIRV
install(TARGETS SPIRV EXPORT SPIRVTargets
ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}
LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR})
LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}
RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR})
else()
if (ENABLE_SPVREMAPPER)
install(TARGETS SPVRemapper
install(TARGETS SPVRemapper EXPORT SPVRemapperTargets
ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR})
endif()
install(TARGETS SPIRV
install(TARGETS SPIRV EXPORT SPIRVTargets
ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR})
endif()
if (ENABLE_SPVREMAPPER)
install(EXPORT SPVRemapperTargets DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake)
endif()
install(EXPORT SPIRVTargets DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake)
install(FILES ${HEADERS} ${SPVREMAP_HEADERS} DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/SPIRV/)
install(FILES ${HEADERS} ${SPVREMAP_HEADERS} DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/glslang/SPIRV/)
endif(ENABLE_GLSLANG_INSTALL)

View file

@ -34,5 +34,6 @@ static const char* const E_SPV_EXT_shader_stencil_export = "SPV_EXT_shade
static const char* const E_SPV_EXT_shader_viewport_index_layer = "SPV_EXT_shader_viewport_index_layer";
static const char* const E_SPV_EXT_fragment_fully_covered = "SPV_EXT_fragment_fully_covered";
static const char* const E_SPV_EXT_fragment_invocation_density = "SPV_EXT_fragment_invocation_density";
static const char* const E_SPV_EXT_demote_to_helper_invocation = "SPV_EXT_demote_to_helper_invocation";
#endif // #ifndef GLSLextEXT_H

View file

@ -41,5 +41,8 @@ static const char* const E_SPV_KHR_storage_buffer_storage_class = "SPV_KHR_stora
static const char* const E_SPV_KHR_post_depth_coverage = "SPV_KHR_post_depth_coverage";
static const char* const E_SPV_KHR_vulkan_memory_model = "SPV_KHR_vulkan_memory_model";
static const char* const E_SPV_EXT_physical_storage_buffer = "SPV_EXT_physical_storage_buffer";
static const char* const E_SPV_KHR_physical_storage_buffer = "SPV_KHR_physical_storage_buffer";
static const char* const E_SPV_EXT_fragment_shader_interlock = "SPV_EXT_fragment_shader_interlock";
static const char* const E_SPV_KHR_shader_clock = "SPV_KHR_shader_clock";
#endif // #ifndef GLSLextKHR_H

View file

@ -72,4 +72,10 @@ const char* const E_SPV_NV_ray_tracing = "SPV_NV_ray_tracing";
//SPV_NV_shading_rate
const char* const E_SPV_NV_shading_rate = "SPV_NV_shading_rate";
//SPV_NV_cooperative_matrix
const char* const E_SPV_NV_cooperative_matrix = "SPV_NV_cooperative_matrix";
//SPV_NV_shader_sm_builtins
const char* const E_SPV_NV_shader_sm_builtins = "SPV_NV_shader_sm_builtins";
#endif // #ifndef GLSLextNV_H

File diff suppressed because it is too large Load diff

View file

@ -40,7 +40,7 @@
#endif
#include "SpvTools.h"
#include "../glslang/Include/intermediate.h"
#include "glslang/Include/intermediate.h"
#include <string>
#include <vector>

View file

@ -61,17 +61,22 @@ namespace {
// Use by calling visit() on the root block.
class ReadableOrderTraverser {
public:
explicit ReadableOrderTraverser(std::function<void(Block*)> callback) : callback_(callback) {}
ReadableOrderTraverser(std::function<void(Block*, spv::ReachReason, Block*)> callback)
: callback_(callback) {}
// Visits the block if it hasn't been visited already and isn't currently
// being delayed. Invokes callback(block), then descends into its
// being delayed. Invokes callback(block, why, header), then descends into its
// successors. Delays merge-block and continue-block processing until all
// the branches have been completed.
void visit(Block* block)
// the branches have been completed. If |block| is an unreachable merge block or
// an unreachable continue target, then |header| is the corresponding header block.
void visit(Block* block, spv::ReachReason why, Block* header)
{
assert(block);
if (why == spv::ReachViaControlFlow) {
reachableViaControlFlow_.insert(block);
}
if (visited_.count(block) || delayed_.count(block))
return;
callback_(block);
callback_(block, why, header);
visited_.insert(block);
Block* mergeBlock = nullptr;
Block* continueBlock = nullptr;
@ -87,27 +92,40 @@ public:
delayed_.insert(continueBlock);
}
}
const auto successors = block->getSuccessors();
for (auto it = successors.cbegin(); it != successors.cend(); ++it)
visit(*it);
if (why == spv::ReachViaControlFlow) {
const auto& successors = block->getSuccessors();
for (auto it = successors.cbegin(); it != successors.cend(); ++it)
visit(*it, why, nullptr);
}
if (continueBlock) {
const spv::ReachReason continueWhy =
(reachableViaControlFlow_.count(continueBlock) > 0)
? spv::ReachViaControlFlow
: spv::ReachDeadContinue;
delayed_.erase(continueBlock);
visit(continueBlock);
visit(continueBlock, continueWhy, block);
}
if (mergeBlock) {
const spv::ReachReason mergeWhy =
(reachableViaControlFlow_.count(mergeBlock) > 0)
? spv::ReachViaControlFlow
: spv::ReachDeadMerge;
delayed_.erase(mergeBlock);
visit(mergeBlock);
visit(mergeBlock, mergeWhy, block);
}
}
private:
std::function<void(Block*)> callback_;
std::function<void(Block*, spv::ReachReason, Block*)> callback_;
// Whether a block has already been visited or is being delayed.
std::unordered_set<Block *> visited_, delayed_;
// The set of blocks that actually are reached via control flow.
std::unordered_set<Block *> reachableViaControlFlow_;
};
}
void spv::inReadableOrder(Block* root, std::function<void(Block*)> callback)
void spv::inReadableOrder(Block* root, std::function<void(Block*, spv::ReachReason, Block*)> callback)
{
ReadableOrderTraverser(callback).visit(root);
ReadableOrderTraverser(callback).visit(root, spv::ReachViaControlFlow, nullptr);
}

View file

@ -32,6 +32,8 @@
// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
#ifndef GLSLANG_WEB
#include "Logger.h"
#include <algorithm>
@ -66,3 +68,5 @@ std::string SpvBuildLogger::getAllMessages() const {
}
} // end spv namespace
#endif

View file

@ -46,6 +46,14 @@ class SpvBuildLogger {
public:
SpvBuildLogger() {}
#ifdef GLSLANG_WEB
void tbdFunctionality(const std::string& f) { }
void missingFunctionality(const std::string& f) { }
void warning(const std::string& w) { }
void error(const std::string& e) { errors.push_back(e); }
std::string getAllMessages() { return ""; }
#else
// Registers a TBD functionality.
void tbdFunctionality(const std::string& f);
// Registers a missing functionality.
@ -59,6 +67,7 @@ public:
// Returns all messages accumulated in the order of:
// TBD functionalities, missing functionalities, warnings, errors.
std::string getAllMessages() const;
#endif
private:
SpvBuildLogger(const SpvBuildLogger&);

View file

@ -45,7 +45,7 @@ namespace spv {
// MSVC defines __cplusplus as an older value, even when it supports almost all of 11.
// We handle that here by making our own symbol.
#if __cplusplus >= 201103L || _MSC_VER >= 1700
#if __cplusplus >= 201103L || (defined(_MSC_VER) && _MSC_VER >= 1700)
# define use_cpp11 1
#endif
@ -195,7 +195,7 @@ private:
// Header access & set methods
spirword_t magic() const { return spv[0]; } // return magic number
spirword_t bound() const { return spv[3]; } // return Id bound from header
spirword_t bound(spirword_t b) { return spv[3] = b; };
spirword_t bound(spirword_t b) { return spv[3] = b; }
spirword_t genmagic() const { return spv[2]; } // generator magic
spirword_t genmagic(spirword_t m) { return spv[2] = m; }
spirword_t schemaNum() const { return spv[4]; } // schema number from header

View file

@ -46,7 +46,9 @@
#include "SpvBuilder.h"
#ifndef GLSLANG_WEB
#include "hex_float.h"
#endif
#ifndef _WIN32
#include <cstdio>
@ -230,6 +232,11 @@ Id Builder::makePointerFromForwardPointer(StorageClass storageClass, Id forwardP
Id Builder::makeIntegerType(int width, bool hasSign)
{
#ifdef GLSLANG_WEB
assert(width == 32);
width = 32;
#endif
// try to find it
Instruction* type;
for (int t = 0; t < (int)groupedTypes[OpTypeInt].size(); ++t) {
@ -265,6 +272,11 @@ Id Builder::makeIntegerType(int width, bool hasSign)
Id Builder::makeFloatType(int width)
{
#ifdef GLSLANG_WEB
assert(width == 32);
width = 32;
#endif
// try to find it
Instruction* type;
for (int t = 0; t < (int)groupedTypes[OpTypeFloat].size(); ++t) {
@ -388,6 +400,33 @@ Id Builder::makeMatrixType(Id component, int cols, int rows)
return type->getResultId();
}
Id Builder::makeCooperativeMatrixType(Id component, Id scope, Id rows, Id cols)
{
// try to find it
Instruction* type;
for (int t = 0; t < (int)groupedTypes[OpTypeCooperativeMatrixNV].size(); ++t) {
type = groupedTypes[OpTypeCooperativeMatrixNV][t];
if (type->getIdOperand(0) == component &&
type->getIdOperand(1) == scope &&
type->getIdOperand(2) == rows &&
type->getIdOperand(3) == cols)
return type->getResultId();
}
// not found, make it
type = new Instruction(getUniqueId(), NoType, OpTypeCooperativeMatrixNV);
type->addIdOperand(component);
type->addIdOperand(scope);
type->addIdOperand(rows);
type->addIdOperand(cols);
groupedTypes[OpTypeCooperativeMatrixNV].push_back(type);
constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
module.mapInstruction(type);
return type->getResultId();
}
// TODO: performance: track arrays per stride
// If a stride is supplied (non-zero) make an array.
// If no stride (0), reuse previous array types.
@ -489,6 +528,7 @@ Id Builder::makeImageType(Id sampledType, Dim dim, bool depth, bool arrayed, boo
constantsTypesGlobals.push_back(std::unique_ptr<Instruction>(type));
module.mapInstruction(type);
#ifndef GLSLANG_WEB
// deal with capabilities
switch (dim) {
case DimBuffer:
@ -534,6 +574,7 @@ Id Builder::makeImageType(Id sampledType, Dim dim, bool depth, bool arrayed, boo
addCapability(CapabilityImageMSArray);
}
}
#endif
return type->getResultId();
}
@ -559,7 +600,7 @@ Id Builder::makeSampledImageType(Id imageType)
return type->getResultId();
}
#ifdef NV_EXTENSIONS
#ifndef GLSLANG_WEB
Id Builder::makeAccelerationStructureNVType()
{
Instruction *type;
@ -575,6 +616,7 @@ Id Builder::makeAccelerationStructureNVType()
return type->getResultId();
}
#endif
Id Builder::getDerefTypeId(Id resultId) const
{
Id typeId = getTypeId(resultId);
@ -623,6 +665,9 @@ int Builder::getNumTypeConstituents(Id typeId) const
}
case OpTypeStruct:
return instr->getNumOperands();
case OpTypeCooperativeMatrixNV:
// has only one constituent when used with OpCompositeConstruct.
return 1;
default:
assert(0);
return 1;
@ -669,6 +714,7 @@ Id Builder::getContainedTypeId(Id typeId, int member) const
case OpTypeMatrix:
case OpTypeArray:
case OpTypeRuntimeArray:
case OpTypeCooperativeMatrixNV:
return instr->getIdOperand(0);
case OpTypePointer:
return instr->getIdOperand(1);
@ -908,6 +954,10 @@ Id Builder::makeFloatConstant(float f, bool specConstant)
Id Builder::makeDoubleConstant(double d, bool specConstant)
{
#ifdef GLSLANG_WEB
assert(0);
return NoResult;
#else
Op opcode = specConstant ? OpSpecConstant : OpConstant;
Id typeId = makeFloatType(64);
union { double db; unsigned long long ull; } u;
@ -932,10 +982,15 @@ Id Builder::makeDoubleConstant(double d, bool specConstant)
module.mapInstruction(c);
return c->getResultId();
#endif
}
Id Builder::makeFloat16Constant(float f16, bool specConstant)
{
#ifdef GLSLANG_WEB
assert(0);
return NoResult;
#else
Op opcode = specConstant ? OpSpecConstant : OpConstant;
Id typeId = makeFloatType(16);
@ -960,36 +1015,43 @@ Id Builder::makeFloat16Constant(float f16, bool specConstant)
module.mapInstruction(c);
return c->getResultId();
#endif
}
Id Builder::makeFpConstant(Id type, double d, bool specConstant)
{
assert(isFloatType(type));
#ifdef GLSLANG_WEB
const int width = 32;
assert(width == getScalarTypeWidth(type));
#else
const int width = getScalarTypeWidth(type);
#endif
switch (getScalarTypeWidth(type)) {
case 16:
return makeFloat16Constant((float)d, specConstant);
case 32:
return makeFloatConstant((float)d, specConstant);
case 64:
return makeDoubleConstant(d, specConstant);
default:
break;
}
assert(isFloatType(type));
assert(false);
return NoResult;
switch (width) {
case 16:
return makeFloat16Constant((float)d, specConstant);
case 32:
return makeFloatConstant((float)d, specConstant);
case 64:
return makeDoubleConstant(d, specConstant);
default:
break;
}
assert(false);
return NoResult;
}
Id Builder::findCompositeConstant(Op typeClass, const std::vector<Id>& comps)
Id Builder::findCompositeConstant(Op typeClass, Id typeId, const std::vector<Id>& comps)
{
Instruction* constant = 0;
bool found = false;
for (int i = 0; i < (int)groupedConstants[typeClass].size(); ++i) {
constant = groupedConstants[typeClass][i];
// same shape?
if (constant->getNumOperands() != (int)comps.size())
if (constant->getTypeId() != typeId)
continue;
// same contents?
@ -1044,8 +1106,9 @@ Id Builder::makeCompositeConstant(Id typeId, const std::vector<Id>& members, boo
case OpTypeVector:
case OpTypeArray:
case OpTypeMatrix:
case OpTypeCooperativeMatrixNV:
if (! specConstant) {
Id existing = findCompositeConstant(typeClass, members);
Id existing = findCompositeConstant(typeClass, typeId, members);
if (existing)
return existing;
}
@ -1275,11 +1338,13 @@ void Builder::makeDiscard()
}
// Comments in header
Id Builder::createVariable(StorageClass storageClass, Id type, const char* name)
Id Builder::createVariable(StorageClass storageClass, Id type, const char* name, Id initializer)
{
Id pointerType = makePointer(storageClass, type);
Instruction* inst = new Instruction(getUniqueId(), pointerType, OpVariable);
inst->addImmediateOperand(storageClass);
if (initializer != NoResult)
inst->addIdOperand(initializer);
switch (storageClass) {
case StorageClassFunction:
@ -1408,6 +1473,23 @@ Id Builder::createArrayLength(Id base, unsigned int member)
return length->getResultId();
}
Id Builder::createCooperativeMatrixLength(Id type)
{
spv::Id intType = makeUintType(32);
// Generate code for spec constants if in spec constant operation
// generation mode.
if (generatingOpCodeForSpecConst) {
return createSpecConstantOp(OpCooperativeMatrixLengthNV, intType, std::vector<Id>(1, type), std::vector<Id>());
}
Instruction* length = new Instruction(getUniqueId(), intType, OpCooperativeMatrixLengthNV);
length->addIdOperand(type);
buildPoint->addInstruction(std::unique_ptr<Instruction>(length));
return length->getResultId();
}
Id Builder::createCompositeExtract(Id composite, Id typeId, unsigned index)
{
// Generate code for spec constants if in spec constant operation
@ -1758,7 +1840,7 @@ Id Builder::createBuiltinCall(Id resultType, Id builtins, int entryPoint, const
// Accept all parameters needed to create a texture instruction.
// Create the correct instruction based on the inputs, and make the call.
Id Builder::createTextureCall(Decoration precision, Id resultType, bool sparse, bool fetch, bool proj, bool gather,
bool noImplicitLod, const TextureParameters& parameters)
bool noImplicitLod, const TextureParameters& parameters, ImageOperandsMask signExtensionMask)
{
static const int maxTextureArgs = 10;
Id texArgs[maxTextureArgs] = {};
@ -1775,7 +1857,7 @@ Id Builder::createTextureCall(Decoration precision, Id resultType, bool sparse,
if (parameters.component != NoResult)
texArgs[numArgs++] = parameters.component;
#ifdef NV_EXTENSIONS
#ifndef GLSLANG_WEB
if (parameters.granularity != NoResult)
texArgs[numArgs++] = parameters.granularity;
if (parameters.coarse != NoResult)
@ -1785,8 +1867,8 @@ Id Builder::createTextureCall(Decoration precision, Id resultType, bool sparse,
//
// Set up the optional arguments
//
int optArgNum = numArgs; // track which operand, if it exists, is the mask of optional arguments
++numArgs; // speculatively make room for the mask operand
int optArgNum = numArgs; // track which operand, if it exists, is the mask of optional arguments
++numArgs; // speculatively make room for the mask operand
ImageOperandsMask mask = ImageOperandsMaskNone; // the mask operand
if (parameters.bias) {
mask = (ImageOperandsMask)(mask | ImageOperandsBiasMask);
@ -1822,6 +1904,7 @@ Id Builder::createTextureCall(Decoration precision, Id resultType, bool sparse,
mask = (ImageOperandsMask)(mask | ImageOperandsConstOffsetsMask);
texArgs[numArgs++] = parameters.offsets;
}
#ifndef GLSLANG_WEB
if (parameters.sample) {
mask = (ImageOperandsMask)(mask | ImageOperandsSampleMask);
texArgs[numArgs++] = parameters.sample;
@ -1839,6 +1922,8 @@ Id Builder::createTextureCall(Decoration precision, Id resultType, bool sparse,
if (parameters.volatil) {
mask = mask | ImageOperandsVolatileTexelKHRMask;
}
#endif
mask = mask | signExtensionMask;
if (mask == ImageOperandsMaskNone)
--numArgs; // undo speculative reservation for the mask argument
else
@ -1853,10 +1938,9 @@ Id Builder::createTextureCall(Decoration precision, Id resultType, bool sparse,
opCode = OpImageSparseFetch;
else
opCode = OpImageFetch;
#ifdef NV_EXTENSIONS
#ifndef GLSLANG_WEB
} else if (parameters.granularity && parameters.coarse) {
opCode = OpImageSampleFootprintNV;
#endif
} else if (gather) {
if (parameters.Dref)
if (sparse)
@ -1868,6 +1952,7 @@ Id Builder::createTextureCall(Decoration precision, Id resultType, bool sparse,
opCode = OpImageSparseGather;
else
opCode = OpImageGather;
#endif
} else if (explicitLod) {
if (parameters.Dref) {
if (proj)
@ -2016,11 +2101,7 @@ Id Builder::createTextureQueryCall(Op opCode, const TextureParameters& parameter
break;
}
case OpImageQueryLod:
#ifdef AMD_EXTENSIONS
resultType = makeVectorType(getScalarTypeId(getTypeId(parameters.coords)), 2);
#else
resultType = makeVectorType(makeFloatType(32), 2);
#endif
break;
case OpImageQueryLevels:
case OpImageQuerySamples:
@ -2038,6 +2119,7 @@ Id Builder::createTextureQueryCall(Op opCode, const TextureParameters& parameter
if (parameters.lod)
query->addIdOperand(parameters.lod);
buildPoint->addInstruction(std::unique_ptr<Instruction>(query));
addCapability(CapabilityImageQuery);
return query->getResultId();
}
@ -2231,7 +2313,12 @@ Id Builder::createMatrixConstructor(Decoration precision, const std::vector<Id>&
int numRows = getTypeNumRows(resultTypeId);
Instruction* instr = module.getInstruction(componentTypeId);
unsigned bitCount = instr->getImmediateOperand(0);
#ifdef GLSLANG_WEB
const unsigned bitCount = 32;
assert(bitCount == instr->getImmediateOperand(0));
#else
const unsigned bitCount = instr->getImmediateOperand(0);
#endif
// Optimize matrix constructed from a bigger matrix
if (isMatrix(sources[0]) && getNumColumns(sources[0]) >= numCols && getNumRows(sources[0]) >= numRows) {
@ -2598,15 +2685,22 @@ Id Builder::accessChainLoad(Decoration precision, Decoration nonUniform, Id resu
}
}
if (constant)
if (constant) {
id = createCompositeExtract(accessChain.base, swizzleBase, indexes);
else {
// make a new function variable for this r-value
Id lValue = createVariable(StorageClassFunction, getTypeId(accessChain.base), "indexable");
// store into it
createStore(accessChain.base, lValue);
} else {
Id lValue = NoResult;
if (spvVersion >= Spv_1_4) {
// make a new function variable for this r-value, using an initializer,
// and mark it as NonWritable so that downstream it can be detected as a lookup
// table
lValue = createVariable(StorageClassFunction, getTypeId(accessChain.base), "indexable",
accessChain.base);
addDecoration(lValue, DecorationNonWritable);
} else {
lValue = createVariable(StorageClassFunction, getTypeId(accessChain.base), "indexable");
// store into it
createStore(accessChain.base, lValue);
}
// move base to the new variable
accessChain.base = lValue;
accessChain.isRValue = false;
@ -2908,14 +3002,14 @@ void Builder::createSelectionMerge(Block* mergeBlock, unsigned int control)
}
void Builder::createLoopMerge(Block* mergeBlock, Block* continueBlock, unsigned int control,
unsigned int dependencyLength)
const std::vector<unsigned int>& operands)
{
Instruction* merge = new Instruction(OpLoopMerge);
merge->addIdOperand(mergeBlock->getId());
merge->addIdOperand(continueBlock->getId());
merge->addImmediateOperand(control);
if ((control & LoopControlDependencyLengthMask) != 0)
merge->addImmediateOperand(dependencyLength);
for (int op = 0; op < (int)operands.size(); ++op)
merge->addImmediateOperand(operands[op]);
buildPoint->addInstruction(std::unique_ptr<Instruction>(merge));
}

View file

@ -61,6 +61,15 @@
namespace spv {
typedef enum {
Spv_1_0 = (1 << 16),
Spv_1_1 = (1 << 16) | (1 << 8),
Spv_1_2 = (1 << 16) | (2 << 8),
Spv_1_3 = (1 << 16) | (3 << 8),
Spv_1_4 = (1 << 16) | (4 << 8),
Spv_1_5 = (1 << 16) | (5 << 8),
} SpvVersion;
class Builder {
public:
Builder(unsigned int spvVersion, unsigned int userNumber, SpvBuildLogger* logger);
@ -97,6 +106,20 @@ public:
void addModuleProcessed(const std::string& p) { moduleProcesses.push_back(p.c_str()); }
void setEmitOpLines() { emitOpLines = true; }
void addExtension(const char* ext) { extensions.insert(ext); }
void removeExtension(const char* ext)
{
extensions.erase(ext);
}
void addIncorporatedExtension(const char* ext, SpvVersion incorporatedVersion)
{
if (getSpvVersion() < static_cast<unsigned>(incorporatedVersion))
addExtension(ext);
}
void promoteIncorporatedExtension(const char* baseExt, const char* promoExt, SpvVersion incorporatedVersion)
{
removeExtension(baseExt);
addIncorporatedExtension(promoExt, incorporatedVersion);
}
void addInclude(const std::string& name, const std::string& text)
{
spv::Id incId = getStringId(name);
@ -155,6 +178,7 @@ public:
Id makeImageType(Id sampledType, Dim, bool depth, bool arrayed, bool ms, unsigned sampled, ImageFormat format);
Id makeSamplerType();
Id makeSampledImageType(Id imageType);
Id makeCooperativeMatrixType(Id component, Id scope, Id rows, Id cols);
// accelerationStructureNV type
Id makeAccelerationStructureNVType();
@ -178,6 +202,7 @@ public:
bool isScalar(Id resultId) const { return isScalarType(getTypeId(resultId)); }
bool isVector(Id resultId) const { return isVectorType(getTypeId(resultId)); }
bool isMatrix(Id resultId) const { return isMatrixType(getTypeId(resultId)); }
bool isCooperativeMatrix(Id resultId)const { return isCooperativeMatrixType(getTypeId(resultId)); }
bool isAggregate(Id resultId) const { return isAggregateType(getTypeId(resultId)); }
bool isSampledImage(Id resultId) const { return isSampledImageType(getTypeId(resultId)); }
@ -191,7 +216,12 @@ public:
bool isMatrixType(Id typeId) const { return getTypeClass(typeId) == OpTypeMatrix; }
bool isStructType(Id typeId) const { return getTypeClass(typeId) == OpTypeStruct; }
bool isArrayType(Id typeId) const { return getTypeClass(typeId) == OpTypeArray; }
bool isAggregateType(Id typeId) const { return isArrayType(typeId) || isStructType(typeId); }
#ifdef GLSLANG_WEB
bool isCooperativeMatrixType(Id typeId)const { return false; }
#else
bool isCooperativeMatrixType(Id typeId)const { return getTypeClass(typeId) == OpTypeCooperativeMatrixNV; }
#endif
bool isAggregateType(Id typeId) const { return isArrayType(typeId) || isStructType(typeId) || isCooperativeMatrixType(typeId); }
bool isImageType(Id typeId) const { return getTypeClass(typeId) == OpTypeImage; }
bool isSamplerType(Id typeId) const { return getTypeClass(typeId) == OpTypeSampler; }
bool isSampledImageType(Id typeId) const { return getTypeClass(typeId) == OpTypeSampledImage; }
@ -297,7 +327,7 @@ public:
void makeDiscard();
// Create a global or function local or IO variable.
Id createVariable(StorageClass, Id type, const char* name = 0);
Id createVariable(StorageClass, Id type, const char* name = 0, Id initializer = NoResult);
// Create an intermediate with an undefined value.
Id createUndefined(Id type);
@ -314,6 +344,9 @@ public:
// Create an OpArrayLength instruction
Id createArrayLength(Id base, unsigned int member);
// Create an OpCooperativeMatrixLengthNV instruction
Id createCooperativeMatrixLength(Id type);
// Create an OpCompositeExtract instruction
Id createCompositeExtract(Id composite, Id typeId, unsigned index);
Id createCompositeExtract(Id composite, Id typeId, const std::vector<unsigned>& indexes);
@ -402,7 +435,8 @@ public:
};
// Select the correct texture operation based on all inputs, and emit the correct instruction
Id createTextureCall(Decoration precision, Id resultType, bool sparse, bool fetch, bool proj, bool gather, bool noImplicit, const TextureParameters&);
Id createTextureCall(Decoration precision, Id resultType, bool sparse, bool fetch, bool proj, bool gather,
bool noImplicit, const TextureParameters&, ImageOperandsMask);
// Emit the OpTextureQuery* instruction that was passed in.
// Figure out the right return value and type, and return it.
@ -493,7 +527,7 @@ public:
// Add a branch to the continue_target of the current (innermost) loop.
void createLoopContinue();
// Add an exit(e.g. "break") from the innermost loop that we're currently
// Add an exit (e.g. "break") from the innermost loop that we're currently
// in.
void createLoopExit();
@ -542,6 +576,14 @@ public:
// Accumulate whether anything in the chain of structures has coherent decorations.
struct CoherentFlags {
CoherentFlags() { clear(); }
#ifdef GLSLANG_WEB
void clear() { }
bool isVolatile() const { return false; }
CoherentFlags operator |=(const CoherentFlags &other) { return *this; }
#else
bool isVolatile() const { return volatil; }
unsigned coherent : 1;
unsigned devicecoherent : 1;
unsigned queuefamilycoherent : 1;
@ -562,7 +604,6 @@ public:
isImage = 0;
}
CoherentFlags() { clear(); }
CoherentFlags operator |=(const CoherentFlags &other) {
coherent |= other.coherent;
devicecoherent |= other.devicecoherent;
@ -574,6 +615,7 @@ public:
isImage |= other.isImage;
return *this;
}
#endif
};
CoherentFlags coherentFlags;
};
@ -641,22 +683,27 @@ public:
// based on the type of the base and the chain of dereferences.
Id accessChainGetInferredType();
// Add capabilities, extensions, remove unneeded decorations, etc.,
// Add capabilities, extensions, remove unneeded decorations, etc.,
// based on the resulting SPIR-V.
void postProcess();
// Prune unreachable blocks in the CFG and remove unneeded decorations.
void postProcessCFG();
#ifndef GLSLANG_WEB
// Add capabilities, extensions based on instructions in the module.
void postProcessFeatures();
// Hook to visit each instruction in a block in a function
void postProcess(Instruction&);
// Hook to visit each instruction in a reachable block in a function.
void postProcessReachable(const Instruction&);
// Hook to visit each non-32-bit sized float/int operation in a block.
void postProcessType(const Instruction&, spv::Id typeId);
#endif
void dump(std::vector<unsigned int>&) const;
void createBranch(Block* block);
void createConditionalBranch(Id condition, Block* thenBlock, Block* elseBlock);
void createLoopMerge(Block* mergeBlock, Block* continueBlock, unsigned int control, unsigned int dependencyLength);
void createLoopMerge(Block* mergeBlock, Block* continueBlock, unsigned int control, const std::vector<unsigned int>& operands);
// Sets to generate opcode for specialization constants.
void setToSpecConstCodeGenMode() { generatingOpCodeForSpecConst = true; }
@ -670,7 +717,7 @@ public:
Id makeInt64Constant(Id typeId, unsigned long long value, bool specConstant);
Id findScalarConstant(Op typeClass, Op opcode, Id typeId, unsigned value);
Id findScalarConstant(Op typeClass, Op opcode, Id typeId, unsigned v1, unsigned v2);
Id findCompositeConstant(Op typeClass, const std::vector<Id>& comps);
Id findCompositeConstant(Op typeClass, Id typeId, const std::vector<Id>& comps);
Id findStructConstant(Id typeId, const std::vector<Id>& comps);
Id collapseAccessChain();
void remapDynamicSwizzle();

View file

@ -39,6 +39,7 @@
#include <cassert>
#include <cstdlib>
#include <unordered_map>
#include <unordered_set>
#include <algorithm>
@ -51,16 +52,13 @@ namespace spv {
#include "GLSL.std.450.h"
#include "GLSL.ext.KHR.h"
#include "GLSL.ext.EXT.h"
#ifdef AMD_EXTENSIONS
#include "GLSL.ext.AMD.h"
#endif
#ifdef NV_EXTENSIONS
#include "GLSL.ext.NV.h"
#endif
}
namespace spv {
#ifndef GLSLANG_WEB
// Hook to visit each operand type and result type of an instruction.
// Will be called multiple times for one instruction, once for each typed
// operand and the result.
@ -118,12 +116,48 @@ void Builder::postProcessType(const Instruction& inst, Id typeId)
case OpAccessChain:
case OpPtrAccessChain:
case OpCopyObject:
break;
case OpFConvert:
case OpSConvert:
case OpUConvert:
// Look for any 8/16-bit storage capabilities. If there are none, assume that
// the convert instruction requires the Float16/Int8/16 capability.
if (containsType(typeId, OpTypeFloat, 16) || containsType(typeId, OpTypeInt, 16)) {
bool foundStorage = false;
for (auto it = capabilities.begin(); it != capabilities.end(); ++it) {
spv::Capability cap = *it;
if (cap == spv::CapabilityStorageInputOutput16 ||
cap == spv::CapabilityStoragePushConstant16 ||
cap == spv::CapabilityStorageUniformBufferBlock16 ||
cap == spv::CapabilityStorageUniform16) {
foundStorage = true;
break;
}
}
if (!foundStorage) {
if (containsType(typeId, OpTypeFloat, 16))
addCapability(CapabilityFloat16);
if (containsType(typeId, OpTypeInt, 16))
addCapability(CapabilityInt16);
}
}
if (containsType(typeId, OpTypeInt, 8)) {
bool foundStorage = false;
for (auto it = capabilities.begin(); it != capabilities.end(); ++it) {
spv::Capability cap = *it;
if (cap == spv::CapabilityStoragePushConstant8 ||
cap == spv::CapabilityUniformAndStorageBuffer8BitAccess ||
cap == spv::CapabilityStorageBuffer8BitAccess) {
foundStorage = true;
break;
}
}
if (!foundStorage) {
addCapability(CapabilityInt8);
}
}
break;
case OpExtInst:
#if AMD_EXTENSIONS
switch (inst.getImmediateOperand(1)) {
case GLSLstd450Frexp:
case GLSLstd450FrexpStruct:
@ -139,7 +173,6 @@ void Builder::postProcessType(const Instruction& inst, Id typeId)
default:
break;
}
#endif
break;
default:
if (basicTypeOp == OpTypeFloat && width == 16)
@ -185,12 +218,10 @@ void Builder::postProcess(Instruction& inst)
addCapability(CapabilityImageQuery);
break;
#ifdef NV_EXTENSIONS
case OpGroupNonUniformPartitionNV:
addExtension(E_SPV_NV_shader_subgroup_partitioned);
addCapability(CapabilityGroupNonUniformPartitionedNV);
break;
#endif
case OpLoad:
case OpStore:
@ -258,10 +289,9 @@ void Builder::postProcess(Instruction& inst)
assert(inst.getNumOperands() >= 3);
unsigned int memoryAccess = inst.getImmediateOperand((inst.getOpCode() == OpStore) ? 2 : 1);
assert(memoryAccess & MemoryAccessAlignedMask);
static_cast<void>(memoryAccess);
// Compute the index of the alignment operand.
int alignmentIdx = 2;
if (memoryAccess & MemoryAccessVolatileMask)
alignmentIdx++;
if (inst.getOpCode() == OpStore)
alignmentIdx++;
// Merge new and old (mis)alignment
@ -290,17 +320,16 @@ void Builder::postProcess(Instruction& inst)
}
}
}
// Called for each instruction in a reachable block.
void Builder::postProcessReachable(const Instruction&)
{
// did have code here, but questionable to do so without deleting the instructions
}
#endif
// comment in header
void Builder::postProcess()
void Builder::postProcessCFG()
{
// reachableBlocks is the set of blockss reached via control flow, or which are
// unreachable continue targert or unreachable merge.
std::unordered_set<const Block*> reachableBlocks;
std::unordered_map<Block*, Block*> headerForUnreachableContinue;
std::unordered_set<Block*> unreachableMerges;
std::unordered_set<Id> unreachableDefinitions;
// Collect IDs defined in unreachable blocks. For each function, label the
// reachable blocks first. Then for each unreachable block, collect the
@ -308,16 +337,41 @@ void Builder::postProcess()
for (auto fi = module.getFunctions().cbegin(); fi != module.getFunctions().cend(); fi++) {
Function* f = *fi;
Block* entry = f->getEntryBlock();
inReadableOrder(entry, [&reachableBlocks](const Block* b) { reachableBlocks.insert(b); });
inReadableOrder(entry,
[&reachableBlocks, &unreachableMerges, &headerForUnreachableContinue]
(Block* b, ReachReason why, Block* header) {
reachableBlocks.insert(b);
if (why == ReachDeadContinue) headerForUnreachableContinue[b] = header;
if (why == ReachDeadMerge) unreachableMerges.insert(b);
});
for (auto bi = f->getBlocks().cbegin(); bi != f->getBlocks().cend(); bi++) {
Block* b = *bi;
if (reachableBlocks.count(b) == 0) {
for (auto ii = b->getInstructions().cbegin(); ii != b->getInstructions().cend(); ii++)
if (unreachableMerges.count(b) != 0 || headerForUnreachableContinue.count(b) != 0) {
auto ii = b->getInstructions().cbegin();
++ii; // Keep potential decorations on the label.
for (; ii != b->getInstructions().cend(); ++ii)
unreachableDefinitions.insert(ii->get()->getResultId());
} else if (reachableBlocks.count(b) == 0) {
// The normal case for unreachable code. All definitions are considered dead.
for (auto ii = b->getInstructions().cbegin(); ii != b->getInstructions().cend(); ++ii)
unreachableDefinitions.insert(ii->get()->getResultId());
}
}
}
// Modify unreachable merge blocks and unreachable continue targets.
// Delete their contents.
for (auto mergeIter = unreachableMerges.begin(); mergeIter != unreachableMerges.end(); ++mergeIter) {
(*mergeIter)->rewriteAsCanonicalUnreachableMerge();
}
for (auto continueIter = headerForUnreachableContinue.begin();
continueIter != headerForUnreachableContinue.end();
++continueIter) {
Block* continue_target = continueIter->first;
Block* header = continueIter->second;
continue_target->rewriteAsCanonicalUnreachableContinue(header);
}
// Remove unneeded decorations, for unreachable instructions
decorations.erase(std::remove_if(decorations.begin(), decorations.end(),
[&unreachableDefinitions](std::unique_ptr<Instruction>& I) -> bool {
@ -325,14 +379,29 @@ void Builder::postProcess()
return unreachableDefinitions.count(decoration_id) != 0;
}),
decorations.end());
}
#ifndef GLSLANG_WEB
// comment in header
void Builder::postProcessFeatures() {
// Add per-instruction capabilities, extensions, etc.,
// process all reachable instructions...
for (auto bi = reachableBlocks.cbegin(); bi != reachableBlocks.cend(); ++bi) {
const Block* block = *bi;
const auto function = [this](const std::unique_ptr<Instruction>& inst) { postProcessReachable(*inst.get()); };
std::for_each(block->getInstructions().begin(), block->getInstructions().end(), function);
// Look for any 8/16 bit type in physical storage buffer class, and set the
// appropriate capability. This happens in createSpvVariable for other storage
// classes, but there isn't always a variable for physical storage buffer.
for (int t = 0; t < (int)groupedTypes[OpTypePointer].size(); ++t) {
Instruction* type = groupedTypes[OpTypePointer][t];
if (type->getImmediateOperand(0) == (unsigned)StorageClassPhysicalStorageBufferEXT) {
if (containsType(type->getIdOperand(1), OpTypeInt, 8)) {
addIncorporatedExtension(spv::E_SPV_KHR_8bit_storage, spv::Spv_1_5);
addCapability(spv::CapabilityStorageBuffer8BitAccess);
}
if (containsType(type->getIdOperand(1), OpTypeInt, 16) ||
containsType(type->getIdOperand(1), OpTypeFloat, 16)) {
addIncorporatedExtension(spv::E_SPV_KHR_16bit_storage, spv::Spv_1_3);
addCapability(spv::CapabilityStorageBuffer16BitAccess);
}
}
}
// process all block-contained instructions
@ -367,24 +436,15 @@ void Builder::postProcess()
}
}
}
}
#endif
// Look for any 8/16 bit type in physical storage buffer class, and set the
// appropriate capability. This happens in createSpvVariable for other storage
// classes, but there isn't always a variable for physical storage buffer.
for (int t = 0; t < (int)groupedTypes[OpTypePointer].size(); ++t) {
Instruction* type = groupedTypes[OpTypePointer][t];
if (type->getImmediateOperand(0) == (unsigned)StorageClassPhysicalStorageBufferEXT) {
if (containsType(type->getIdOperand(1), OpTypeInt, 8)) {
addExtension(spv::E_SPV_KHR_8bit_storage);
addCapability(spv::CapabilityStorageBuffer8BitAccess);
}
if (containsType(type->getIdOperand(1), OpTypeInt, 16) ||
containsType(type->getIdOperand(1), OpTypeFloat, 16)) {
addExtension(spv::E_SPV_KHR_16bit_storage);
addCapability(spv::CapabilityStorageBuffer16BitAccess);
}
}
}
// comment in header
void Builder::postProcess() {
postProcessCFG();
#ifndef GLSLANG_WEB
postProcessFeatures();
#endif
}
}; // end spv namespace

View file

@ -52,8 +52,21 @@ namespace glslang {
spv_target_env MapToSpirvToolsEnv(const SpvVersion& spvVersion, spv::SpvBuildLogger* logger)
{
switch (spvVersion.vulkan) {
case glslang::EShTargetVulkan_1_0: return spv_target_env::SPV_ENV_VULKAN_1_0;
case glslang::EShTargetVulkan_1_1: return spv_target_env::SPV_ENV_VULKAN_1_1;
case glslang::EShTargetVulkan_1_0:
return spv_target_env::SPV_ENV_VULKAN_1_0;
case glslang::EShTargetVulkan_1_1:
switch (spvVersion.spv) {
case EShTargetSpv_1_0:
case EShTargetSpv_1_1:
case EShTargetSpv_1_2:
case EShTargetSpv_1_3:
return spv_target_env::SPV_ENV_VULKAN_1_1;
case EShTargetSpv_1_4:
return spv_target_env::SPV_ENV_VULKAN_1_1_SPIRV_1_4;
default:
logger->missingFunctionality("Target version for SPIRV-Tools validator");
return spv_target_env::SPV_ENV_VULKAN_1_1;
}
default:
break;
}
@ -90,7 +103,7 @@ void SpirvToolsDisassemble(std::ostream& out, const std::vector<unsigned int>& s
// Apply the SPIRV-Tools validator to generated SPIR-V.
void SpirvToolsValidate(const glslang::TIntermediate& intermediate, std::vector<unsigned int>& spirv,
spv::SpvBuildLogger* logger)
spv::SpvBuildLogger* logger, bool prelegalization)
{
// validate
spv_context context = spvContextCreate(MapToSpirvToolsEnv(intermediate.getSpv(), logger));
@ -98,6 +111,7 @@ void SpirvToolsValidate(const glslang::TIntermediate& intermediate, std::vector<
spv_diagnostic diagnostic = nullptr;
spv_validator_options options = spvValidatorOptionsCreate();
spvValidatorOptionsSetRelaxBlockLayout(options, intermediate.usingHlslOffsets());
spvValidatorOptionsSetBeforeHlslLegalization(options, prelegalization);
spvValidateWithOptions(context, options, &binary, &diagnostic);
// report
@ -159,6 +173,7 @@ void SpirvToolsLegalize(const glslang::TIntermediate&, std::vector<unsigned int>
if (options->generateDebugInfo) {
optimizer.RegisterPass(spvtools::CreatePropagateLineInfoPass());
}
optimizer.RegisterPass(spvtools::CreateWrapOpKillPass());
optimizer.RegisterPass(spvtools::CreateDeadBranchElimPass());
optimizer.RegisterPass(spvtools::CreateMergeReturnPass());
optimizer.RegisterPass(spvtools::CreateInlineExhaustivePass());
@ -182,8 +197,6 @@ void SpirvToolsLegalize(const glslang::TIntermediate&, std::vector<unsigned int>
optimizer.RegisterPass(spvtools::CreateDeadInsertElimPass());
if (options->optimizeSize) {
optimizer.RegisterPass(spvtools::CreateRedundancyEliminationPass());
// TODO(greg-lunarg): Add this when AMD driver issues are resolved
// optimizer.RegisterPass(CreateCommonUniformElimPass());
}
optimizer.RegisterPass(spvtools::CreateAggressiveDCEPass());
optimizer.RegisterPass(spvtools::CreateCFGCleanupPass());
@ -191,7 +204,9 @@ void SpirvToolsLegalize(const glslang::TIntermediate&, std::vector<unsigned int>
optimizer.RegisterPass(spvtools::CreateRedundantLineInfoElimPass());
}
optimizer.Run(spirv.data(), spirv.size(), &spirv);
spvtools::OptimizerOptions spvOptOptions;
spvOptOptions.set_run_validator(false); // The validator may run as a seperate step later on
optimizer.Run(spirv.data(), spirv.size(), &spirv, spvOptOptions);
}
}; // end namespace glslang

View file

@ -41,10 +41,12 @@
#ifndef GLSLANG_SPV_TOOLS_H
#define GLSLANG_SPV_TOOLS_H
#ifdef ENABLE_OPT
#include <vector>
#include <ostream>
#endif
#include "../glslang/MachineIndependent/localintermediate.h"
#include "glslang/MachineIndependent/localintermediate.h"
#include "Logger.h"
namespace glslang {
@ -59,14 +61,14 @@ struct SpvOptions {
bool validate;
};
#if ENABLE_OPT
#ifdef ENABLE_OPT
// Use the SPIRV-Tools disassembler to print SPIR-V.
void SpirvToolsDisassemble(std::ostream& out, const std::vector<unsigned int>& spirv);
// Apply the SPIRV-Tools validator to generated SPIR-V.
void SpirvToolsValidate(const glslang::TIntermediate& intermediate, std::vector<unsigned int>& spirv,
spv::SpvBuildLogger*);
spv::SpvBuildLogger*, bool prelegalization);
// Apply the SPIRV-Tools optimizer to generated SPIR-V, for the purpose of
// legalizing HLSL SPIR-V.
@ -75,6 +77,6 @@ void SpirvToolsLegalize(const glslang::TIntermediate& intermediate, std::vector<
#endif
}; // end namespace glslang
} // end namespace glslang
#endif // GLSLANG_SPV_TOOLS_H
#endif // GLSLANG_SPV_TOOLS_H

View file

@ -52,26 +52,16 @@ namespace spv {
extern "C" {
// Include C-based headers that don't have a namespace
#include "GLSL.std.450.h"
#ifdef AMD_EXTENSIONS
#include "GLSL.ext.AMD.h"
#endif
#ifdef NV_EXTENSIONS
#include "GLSL.ext.NV.h"
#endif
}
}
const char* GlslStd450DebugNames[spv::GLSLstd450Count];
namespace spv {
#ifdef AMD_EXTENSIONS
static const char* GLSLextAMDGetDebugNames(const char*, unsigned);
#endif
#ifdef NV_EXTENSIONS
static const char* GLSLextNVGetDebugNames(const char*, unsigned);
#endif
static void Kill(std::ostream& out, const char* message)
{
@ -82,15 +72,8 @@ static void Kill(std::ostream& out, const char* message)
// used to identify the extended instruction library imported when printing
enum ExtInstSet {
GLSL450Inst,
#ifdef AMD_EXTENSIONS
GLSLextAMDInst,
#endif
#ifdef NV_EXTENSIONS
GLSLextNVInst,
#endif
OpenCLExtInst,
};
@ -499,37 +482,29 @@ void SpirvStream::disassembleInstruction(Id resultId, Id /*typeId*/, Op opCode,
const char* name = idDescriptor[stream[word - 2]].c_str();
if (0 == memcmp("OpenCL", name, 6)) {
extInstSet = OpenCLExtInst;
#ifdef AMD_EXTENSIONS
} else if (strcmp(spv::E_SPV_AMD_shader_ballot, name) == 0 ||
strcmp(spv::E_SPV_AMD_shader_trinary_minmax, name) == 0 ||
strcmp(spv::E_SPV_AMD_shader_explicit_vertex_parameter, name) == 0 ||
strcmp(spv::E_SPV_AMD_gcn_shader, name) == 0) {
extInstSet = GLSLextAMDInst;
#endif
#ifdef NV_EXTENSIONS
}else if (strcmp(spv::E_SPV_NV_sample_mask_override_coverage, name) == 0 ||
} else if (strcmp(spv::E_SPV_NV_sample_mask_override_coverage, name) == 0 ||
strcmp(spv::E_SPV_NV_geometry_shader_passthrough, name) == 0 ||
strcmp(spv::E_SPV_NV_viewport_array2, name) == 0 ||
strcmp(spv::E_SPV_NVX_multiview_per_view_attributes, name) == 0 ||
strcmp(spv::E_SPV_NV_fragment_shader_barycentric, name) == 0 ||
strcmp(spv::E_SPV_NV_mesh_shader, name) == 0) {
extInstSet = GLSLextNVInst;
#endif
}
unsigned entrypoint = stream[word - 1];
if (extInstSet == GLSL450Inst) {
if (entrypoint < GLSLstd450Count) {
out << "(" << GlslStd450DebugNames[entrypoint] << ")";
}
#ifdef AMD_EXTENSIONS
} else if (extInstSet == GLSLextAMDInst) {
out << "(" << GLSLextAMDGetDebugNames(name, entrypoint) << ")";
#endif
#ifdef NV_EXTENSIONS
}
else if (extInstSet == GLSLextNVInst) {
out << "(" << GLSLextNVGetDebugNames(name, entrypoint) << ")";
#endif
}
}
break;
@ -648,9 +623,11 @@ static void GLSLstd450GetDebugNames(const char** names)
names[GLSLstd450InterpolateAtCentroid] = "InterpolateAtCentroid";
names[GLSLstd450InterpolateAtSample] = "InterpolateAtSample";
names[GLSLstd450InterpolateAtOffset] = "InterpolateAtOffset";
names[GLSLstd450NMin] = "NMin";
names[GLSLstd450NMax] = "NMax";
names[GLSLstd450NClamp] = "NClamp";
}
#ifdef AMD_EXTENSIONS
static const char* GLSLextAMDGetDebugNames(const char* name, unsigned entrypoint)
{
if (strcmp(name, spv::E_SPV_AMD_shader_ballot) == 0) {
@ -692,18 +669,17 @@ static const char* GLSLextAMDGetDebugNames(const char* name, unsigned entrypoint
return "Bad";
}
#endif
#ifdef NV_EXTENSIONS
static const char* GLSLextNVGetDebugNames(const char* name, unsigned entrypoint)
{
if (strcmp(name, spv::E_SPV_NV_sample_mask_override_coverage) == 0 ||
strcmp(name, spv::E_SPV_NV_geometry_shader_passthrough) == 0 ||
strcmp(name, spv::E_ARB_shader_viewport_layer_array) == 0 ||
strcmp(name, spv::E_SPV_NV_viewport_array2) == 0 ||
strcmp(spv::E_SPV_NVX_multiview_per_view_attributes, name) == 0 ||
strcmp(spv::E_SPV_NV_fragment_shader_barycentric, name) == 0 ||
strcmp(name, spv::E_SPV_NV_mesh_shader) == 0) {
strcmp(name, spv::E_SPV_NVX_multiview_per_view_attributes) == 0 ||
strcmp(name, spv::E_SPV_NV_fragment_shader_barycentric) == 0 ||
strcmp(name, spv::E_SPV_NV_mesh_shader) == 0 ||
strcmp(name, spv::E_SPV_NV_shader_image_footprint) == 0) {
switch (entrypoint) {
// NV builtins
case BuiltInViewportMaskNV: return "ViewportMaskNV";
@ -729,6 +705,8 @@ static const char* GLSLextNVGetDebugNames(const char* name, unsigned entrypoint)
case CapabilityPerViewAttributesNV: return "PerViewAttributesNV";
case CapabilityFragmentBarycentricNV: return "FragmentBarycentricNV";
case CapabilityMeshShadingNV: return "MeshShadingNV";
case CapabilityImageFootprintNV: return "ImageFootprintNV";
case CapabilitySampleMaskOverrideCoverageNV:return "SampleMaskOverrideCoverageNV";
// NV Decorations
case DecorationOverrideCoverageNV: return "OverrideCoverageNV";
@ -745,7 +723,6 @@ static const char* GLSLextNVGetDebugNames(const char* name, unsigned entrypoint)
}
return "Bad";
}
#endif
void Disassemble(std::ostream& out, const std::vector<unsigned int>& stream)
{

View file

@ -48,6 +48,6 @@ namespace spv {
// disassemble with glslang custom disassembler
void Disassemble(std::ostream& out, const std::vector<unsigned int>&);
}; // end namespace spv
} // end namespace spv
#endif // disassembler_H

View file

@ -50,12 +50,8 @@ namespace spv {
// Include C-based headers that don't have a namespace
#include "GLSL.ext.KHR.h"
#include "GLSL.ext.EXT.h"
#ifdef AMD_EXTENSIONS
#include "GLSL.ext.AMD.h"
#endif
#ifdef NV_EXTENSIONS
#include "GLSL.ext.NV.h"
#endif
}
}
@ -98,22 +94,17 @@ const char* ExecutionModelString(int model)
case 4: return "Fragment";
case 5: return "GLCompute";
case 6: return "Kernel";
#ifdef NV_EXTENSIONS
case ExecutionModelTaskNV: return "TaskNV";
case ExecutionModelMeshNV: return "MeshNV";
#endif
default: return "Bad";
#ifdef NV_EXTENSIONS
case ExecutionModelRayGenerationNV: return "RayGenerationNV";
case ExecutionModelIntersectionNV: return "IntersectionNV";
case ExecutionModelAnyHitNV: return "AnyHitNV";
case ExecutionModelClosestHitNV: return "ClosestHitNV";
case ExecutionModelMissNV: return "MissNV";
case ExecutionModelCallableNV: return "CallableNV";
#endif
}
}
@ -183,13 +174,18 @@ const char* ExecutionModeString(int mode)
case 4446: return "PostDepthCoverage";
#ifdef NV_EXTENSIONS
case ExecutionModeOutputLinesNV: return "OutputLinesNV";
case ExecutionModeOutputPrimitivesNV: return "OutputPrimitivesNV";
case ExecutionModeOutputTrianglesNV: return "OutputTrianglesNV";
case ExecutionModeDerivativeGroupQuadsNV: return "DerivativeGroupQuadsNV";
case ExecutionModeDerivativeGroupLinearNV: return "DerivativeGroupLinearNV";
#endif
case ExecutionModePixelInterlockOrderedEXT: return "PixelInterlockOrderedEXT";
case ExecutionModePixelInterlockUnorderedEXT: return "PixelInterlockUnorderedEXT";
case ExecutionModeSampleInterlockOrderedEXT: return "SampleInterlockOrderedEXT";
case ExecutionModeSampleInterlockUnorderedEXT: return "SampleInterlockUnorderedEXT";
case ExecutionModeShadingRateInterlockOrderedEXT: return "ShadingRateInterlockOrderedEXT";
case ExecutionModeShadingRateInterlockUnorderedEXT: return "ShadingRateInterlockUnorderedEXT";
case ExecutionModeCeiling:
default: return "Bad";
@ -213,14 +209,12 @@ const char* StorageClassString(int StorageClass)
case 11: return "Image";
case 12: return "StorageBuffer";
#ifdef NV_EXTENSIONS
case StorageClassRayPayloadNV: return "RayPayloadNV";
case StorageClassHitAttributeNV: return "HitAttributeNV";
case StorageClassIncomingRayPayloadNV: return "IncomingRayPayloadNV";
case StorageClassShaderRecordBufferNV: return "ShaderRecordBufferNV";
case StorageClassCallableDataNV: return "CallableDataNV";
case StorageClassIncomingCallableDataNV: return "IncomingCallableDataNV";
#endif
case StorageClassPhysicalStorageBufferEXT: return "PhysicalStorageBufferEXT";
@ -282,10 +276,7 @@ const char* DecorationString(int decoration)
case DecorationCeiling:
default: return "Bad";
#ifdef AMD_EXTENSIONS
case DecorationExplicitInterpAMD: return "ExplicitInterpAMD";
#endif
#ifdef NV_EXTENSIONS
case DecorationOverrideCoverageNV: return "OverrideCoverageNV";
case DecorationPassthroughNV: return "PassthroughNV";
case DecorationViewportRelativeNV: return "ViewportRelativeNV";
@ -294,7 +285,6 @@ const char* DecorationString(int decoration)
case DecorationPerViewNV: return "PerViewNV";
case DecorationPerTaskNV: return "PerTaskNV";
case DecorationPerVertexNV: return "PerVertexNV";
#endif
case DecorationNonUniformEXT: return "DecorationNonUniformEXT";
case DecorationHlslCounterBufferGOOGLE: return "DecorationHlslCounterBufferGOOGLE";
@ -364,7 +354,6 @@ const char* BuiltInString(int builtIn)
case 4426: return "DrawIndex";
case 5014: return "FragStencilRefEXT";
#ifdef AMD_EXTENSIONS
case 4992: return "BaryCoordNoPerspAMD";
case 4993: return "BaryCoordNoPerspCentroidAMD";
case 4994: return "BaryCoordNoPerspSampleAMD";
@ -372,9 +361,6 @@ const char* BuiltInString(int builtIn)
case 4996: return "BaryCoordSmoothCentroidAMD";
case 4997: return "BaryCoordSmoothSampleAMD";
case 4998: return "BaryCoordPullModelAMD";
#endif
#ifdef NV_EXTENSIONS
case BuiltInLaunchIdNV: return "LaunchIdNV";
case BuiltInLaunchSizeNV: return "LaunchSizeNV";
case BuiltInWorldRayOriginNV: return "WorldRayOriginNV";
@ -398,15 +384,12 @@ const char* BuiltInString(int builtIn)
// case BuiltInInvocationsPerPixelNV: return "InvocationsPerPixelNV"; // superseded by BuiltInFragInvocationCountEXT
case BuiltInBaryCoordNV: return "BaryCoordNV";
case BuiltInBaryCoordNoPerspNV: return "BaryCoordNoPerspNV";
#endif
case BuiltInFragSizeEXT: return "FragSizeEXT";
case BuiltInFragInvocationCountEXT: return "FragInvocationCountEXT";
case 5264: return "FullyCoveredEXT";
#ifdef NV_EXTENSIONS
case BuiltInTaskCountNV: return "TaskCountNV";
case BuiltInPrimitiveCountNV: return "PrimitiveCountNV";
case BuiltInPrimitiveIndicesNV: return "PrimitiveIndicesNV";
@ -415,7 +398,10 @@ const char* BuiltInString(int builtIn)
case BuiltInLayerPerViewNV: return "LayerPerViewNV";
case BuiltInMeshViewCountNV: return "MeshViewCountNV";
case BuiltInMeshViewIndicesNV: return "MeshViewIndicesNV";
#endif
case BuiltInWarpsPerSMNV: return "WarpsPerSMNV";
case BuiltInSMCountNV: return "SMCountNV";
case BuiltInWarpIDNV: return "WarpIDNV";
case BuiltInSMIDNV: return "SMIDNV";
default: return "Bad";
}
@ -575,7 +561,7 @@ const char* ImageChannelDataTypeString(int type)
}
}
const int ImageOperandsCeiling = 12;
const int ImageOperandsCeiling = 14;
const char* ImageOperandsString(int format)
{
@ -592,6 +578,8 @@ const char* ImageOperandsString(int format)
case ImageOperandsMakeTexelVisibleKHRShift: return "MakeTexelVisibleKHR";
case ImageOperandsNonPrivateTexelKHRShift: return "NonPrivateTexelKHR";
case ImageOperandsVolatileTexelKHRShift: return "VolatileTexelKHR";
case ImageOperandsSignExtendShift: return "SignExtend";
case ImageOperandsZeroExtendShift: return "ZeroExtend";
case ImageOperandsCeiling:
default:
@ -674,15 +662,20 @@ const char* SelectControlString(int cont)
}
}
const int LoopControlCeiling = 4;
const int LoopControlCeiling = LoopControlPartialCountShift + 1;
const char* LoopControlString(int cont)
{
switch (cont) {
case 0: return "Unroll";
case 1: return "DontUnroll";
case 2: return "DependencyInfinite";
case 3: return "DependencyLength";
case LoopControlUnrollShift: return "Unroll";
case LoopControlDontUnrollShift: return "DontUnroll";
case LoopControlDependencyInfiniteShift: return "DependencyInfinite";
case LoopControlDependencyLengthShift: return "DependencyLength";
case LoopControlMinIterationsShift: return "MinIterations";
case LoopControlMaxIterationsShift: return "MaxIterations";
case LoopControlIterationMultipleShift: return "IterationMultiple";
case LoopControlPeelCountShift: return "PeelCount";
case LoopControlPartialCountShift: return "PartialCount";
case LoopControlCeiling:
default: return "Bad";
@ -763,11 +756,9 @@ const char* GroupOperationString(int gop)
case GroupOperationInclusiveScan: return "InclusiveScan";
case GroupOperationExclusiveScan: return "ExclusiveScan";
case GroupOperationClusteredReduce: return "ClusteredReduce";
#ifdef NV_EXTENSIONS
case GroupOperationPartitionedReduceNV: return "PartitionedReduceNV";
case GroupOperationPartitionedInclusiveScanNV: return "PartitionedInclusiveScanNV";
case GroupOperationPartitionedExclusiveScanNV: return "PartitionedExclusiveScanNV";
#endif
default: return "Bad";
}
@ -875,26 +866,23 @@ const char* CapabilityString(int info)
case CapabilityStoragePushConstant16: return "StoragePushConstant16";
case CapabilityStorageInputOutput16: return "StorageInputOutput16";
case CapabilityStorageBuffer8BitAccess: return "CapabilityStorageBuffer8BitAccess";
case CapabilityUniformAndStorageBuffer8BitAccess: return "CapabilityUniformAndStorageBuffer8BitAccess";
case CapabilityStoragePushConstant8: return "CapabilityStoragePushConstant8";
case CapabilityStorageBuffer8BitAccess: return "StorageBuffer8BitAccess";
case CapabilityUniformAndStorageBuffer8BitAccess: return "UniformAndStorageBuffer8BitAccess";
case CapabilityStoragePushConstant8: return "StoragePushConstant8";
case CapabilityDeviceGroup: return "DeviceGroup";
case CapabilityMultiView: return "MultiView";
case CapabilityStencilExportEXT: return "StencilExportEXT";
#ifdef AMD_EXTENSIONS
case CapabilityFloat16ImageAMD: return "Float16ImageAMD";
case CapabilityImageGatherBiasLodAMD: return "ImageGatherBiasLodAMD";
case CapabilityFragmentMaskAMD: return "FragmentMaskAMD";
case CapabilityImageReadWriteLodAMD: return "ImageReadWriteLodAMD";
#endif
case CapabilityAtomicStorageOps: return "AtomicStorageOps";
case CapabilitySampleMaskPostDepthCoverage: return "SampleMaskPostDepthCoverage";
#ifdef NV_EXTENSIONS
case CapabilityGeometryShaderPassthroughNV: return "GeometryShaderPassthroughNV";
case CapabilityShaderViewportIndexLayerNV: return "ShaderViewportIndexLayerNV";
case CapabilityShaderViewportMaskNV: return "ShaderViewportMaskNV";
@ -906,29 +894,44 @@ const char* CapabilityString(int info)
case CapabilityComputeDerivativeGroupLinearNV: return "ComputeDerivativeGroupLinearNV";
case CapabilityFragmentBarycentricNV: return "FragmentBarycentricNV";
case CapabilityMeshShadingNV: return "MeshShadingNV";
// case CapabilityShadingRateNV: return "ShadingRateNV"; // superseded by CapabilityFragmentDensityEXT
#endif
case CapabilityImageFootprintNV: return "ImageFootprintNV";
// case CapabilityShadingRateNV: return "ShadingRateNV"; // superseded by FragmentDensityEXT
case CapabilitySampleMaskOverrideCoverageNV: return "SampleMaskOverrideCoverageNV";
case CapabilityFragmentDensityEXT: return "FragmentDensityEXT";
case CapabilityFragmentFullyCoveredEXT: return "FragmentFullyCoveredEXT";
case CapabilityShaderNonUniformEXT: return "CapabilityShaderNonUniformEXT";
case CapabilityRuntimeDescriptorArrayEXT: return "CapabilityRuntimeDescriptorArrayEXT";
case CapabilityInputAttachmentArrayDynamicIndexingEXT: return "CapabilityInputAttachmentArrayDynamicIndexingEXT";
case CapabilityUniformTexelBufferArrayDynamicIndexingEXT: return "CapabilityUniformTexelBufferArrayDynamicIndexingEXT";
case CapabilityStorageTexelBufferArrayDynamicIndexingEXT: return "CapabilityStorageTexelBufferArrayDynamicIndexingEXT";
case CapabilityUniformBufferArrayNonUniformIndexingEXT: return "CapabilityUniformBufferArrayNonUniformIndexingEXT";
case CapabilitySampledImageArrayNonUniformIndexingEXT: return "CapabilitySampledImageArrayNonUniformIndexingEXT";
case CapabilityStorageBufferArrayNonUniformIndexingEXT: return "CapabilityStorageBufferArrayNonUniformIndexingEXT";
case CapabilityStorageImageArrayNonUniformIndexingEXT: return "CapabilityStorageImageArrayNonUniformIndexingEXT";
case CapabilityInputAttachmentArrayNonUniformIndexingEXT: return "CapabilityInputAttachmentArrayNonUniformIndexingEXT";
case CapabilityUniformTexelBufferArrayNonUniformIndexingEXT: return "CapabilityUniformTexelBufferArrayNonUniformIndexingEXT";
case CapabilityStorageTexelBufferArrayNonUniformIndexingEXT: return "CapabilityStorageTexelBufferArrayNonUniformIndexingEXT";
case CapabilityShaderNonUniformEXT: return "ShaderNonUniformEXT";
case CapabilityRuntimeDescriptorArrayEXT: return "RuntimeDescriptorArrayEXT";
case CapabilityInputAttachmentArrayDynamicIndexingEXT: return "InputAttachmentArrayDynamicIndexingEXT";
case CapabilityUniformTexelBufferArrayDynamicIndexingEXT: return "UniformTexelBufferArrayDynamicIndexingEXT";
case CapabilityStorageTexelBufferArrayDynamicIndexingEXT: return "StorageTexelBufferArrayDynamicIndexingEXT";
case CapabilityUniformBufferArrayNonUniformIndexingEXT: return "UniformBufferArrayNonUniformIndexingEXT";
case CapabilitySampledImageArrayNonUniformIndexingEXT: return "SampledImageArrayNonUniformIndexingEXT";
case CapabilityStorageBufferArrayNonUniformIndexingEXT: return "StorageBufferArrayNonUniformIndexingEXT";
case CapabilityStorageImageArrayNonUniformIndexingEXT: return "StorageImageArrayNonUniformIndexingEXT";
case CapabilityInputAttachmentArrayNonUniformIndexingEXT: return "InputAttachmentArrayNonUniformIndexingEXT";
case CapabilityUniformTexelBufferArrayNonUniformIndexingEXT: return "UniformTexelBufferArrayNonUniformIndexingEXT";
case CapabilityStorageTexelBufferArrayNonUniformIndexingEXT: return "StorageTexelBufferArrayNonUniformIndexingEXT";
case CapabilityVulkanMemoryModelKHR: return "CapabilityVulkanMemoryModelKHR";
case CapabilityVulkanMemoryModelDeviceScopeKHR: return "CapabilityVulkanMemoryModelDeviceScopeKHR";
case CapabilityVulkanMemoryModelKHR: return "VulkanMemoryModelKHR";
case CapabilityVulkanMemoryModelDeviceScopeKHR: return "VulkanMemoryModelDeviceScopeKHR";
case CapabilityPhysicalStorageBufferAddressesEXT: return "CapabilityPhysicalStorageBufferAddressesEXT";
case CapabilityPhysicalStorageBufferAddressesEXT: return "PhysicalStorageBufferAddressesEXT";
case CapabilityVariablePointers: return "VariablePointers";
case CapabilityCooperativeMatrixNV: return "CooperativeMatrixNV";
case CapabilityShaderSMBuiltinsNV: return "ShaderSMBuiltinsNV";
case CapabilityFragmentShaderSampleInterlockEXT: return "CapabilityFragmentShaderSampleInterlockEXT";
case CapabilityFragmentShaderPixelInterlockEXT: return "CapabilityFragmentShaderPixelInterlockEXT";
case CapabilityFragmentShaderShadingRateInterlockEXT: return "CapabilityFragmentShaderShadingRateInterlockEXT";
case CapabilityDemoteToHelperInvocationEXT: return "DemoteToHelperInvocationEXT";
case CapabilityShaderClockKHR: return "ShaderClockKHR";
case CapabilityIntegerFunctions2INTEL: return "CapabilityIntegerFunctions2INTEL";
default: return "Bad";
}
@ -1022,6 +1025,7 @@ const char* OpcodeString(int op)
case 82: return "OpCompositeInsert";
case 83: return "OpCopyObject";
case 84: return "OpTranspose";
case OpCopyLogical: return "OpCopyLogical";
case 85: return "Bad";
case 86: return "OpSampledImage";
case 87: return "OpImageSampleImplicitLod";
@ -1304,7 +1308,6 @@ const char* OpcodeString(int op)
case 4430: return "OpSubgroupAllEqualKHR";
case 4432: return "OpSubgroupReadInvocationKHR";
#ifdef AMD_EXTENSIONS
case 5000: return "OpGroupIAddNonUniformAMD";
case 5001: return "OpGroupFAddNonUniformAMD";
case 5002: return "OpGroupFMinNonUniformAMD";
@ -1316,12 +1319,12 @@ const char* OpcodeString(int op)
case 5011: return "OpFragmentMaskFetchAMD";
case 5012: return "OpFragmentFetchAMD";
#endif
case OpReadClockKHR: return "OpReadClockKHR";
case OpDecorateStringGOOGLE: return "OpDecorateStringGOOGLE";
case OpMemberDecorateStringGOOGLE: return "OpMemberDecorateStringGOOGLE";
#ifdef NV_EXTENSIONS
case OpGroupNonUniformPartitionNV: return "OpGroupNonUniformPartitionNV";
case OpReportIntersectionNV: return "OpReportIntersectionNV";
case OpIgnoreIntersectionNV: return "OpIgnoreIntersectionNV";
@ -1331,7 +1334,17 @@ const char* OpcodeString(int op)
case OpExecuteCallableNV: return "OpExecuteCallableNV";
case OpImageSampleFootprintNV: return "OpImageSampleFootprintNV";
case OpWritePackedPrimitiveIndices4x8NV: return "OpWritePackedPrimitiveIndices4x8NV";
#endif
case OpTypeCooperativeMatrixNV: return "OpTypeCooperativeMatrixNV";
case OpCooperativeMatrixLoadNV: return "OpCooperativeMatrixLoadNV";
case OpCooperativeMatrixStoreNV: return "OpCooperativeMatrixStoreNV";
case OpCooperativeMatrixMulAddNV: return "OpCooperativeMatrixMulAddNV";
case OpCooperativeMatrixLengthNV: return "OpCooperativeMatrixLengthNV";
case OpDemoteToHelperInvocationEXT: return "OpDemoteToHelperInvocationEXT";
case OpIsHelperInvocationEXT: return "OpIsHelperInvocationEXT";
case OpBeginInvocationInterlockEXT: return "OpBeginInvocationInterlockEXT";
case OpEndInvocationInterlockEXT: return "OpEndInvocationInterlockEXT";
default:
return "Bad";
@ -1444,6 +1457,10 @@ void Parameterize()
InstructionDesc[OpGroupWaitEvents].setResultAndType(false, false);
InstructionDesc[OpAtomicFlagClear].setResultAndType(false, false);
InstructionDesc[OpModuleProcessed].setResultAndType(false, false);
InstructionDesc[OpTypeCooperativeMatrixNV].setResultAndType(true, false);
InstructionDesc[OpCooperativeMatrixStoreNV].setResultAndType(false, false);
InstructionDesc[OpBeginInvocationInterlockEXT].setResultAndType(false, false);
InstructionDesc[OpEndInvocationInterlockEXT].setResultAndType(false, false);
// Specific additional context-dependent operands
@ -1921,6 +1938,8 @@ void Parameterize()
InstructionDesc[OpTranspose].operands.push(OperandId, "'Matrix'");
InstructionDesc[OpCopyLogical].operands.push(OperandId, "'Operand'");
InstructionDesc[OpIsNan].operands.push(OperandId, "'x'");
InstructionDesc[OpIsInf].operands.push(OperandId, "'x'");
@ -2634,7 +2653,6 @@ void Parameterize()
InstructionDesc[OpModuleProcessed].operands.push(OperandLiteralString, "'process'");
#ifdef AMD_EXTENSIONS
InstructionDesc[OpGroupIAddNonUniformAMD].operands.push(OperandScope, "'Execution'");
InstructionDesc[OpGroupIAddNonUniformAMD].operands.push(OperandGroupOperation, "'Operation'");
InstructionDesc[OpGroupIAddNonUniformAMD].operands.push(OperandId, "'X'");
@ -2673,9 +2691,7 @@ void Parameterize()
InstructionDesc[OpFragmentFetchAMD].operands.push(OperandId, "'Image'");
InstructionDesc[OpFragmentFetchAMD].operands.push(OperandId, "'Coordinate'");
InstructionDesc[OpFragmentFetchAMD].operands.push(OperandId, "'Fragment Index'");
#endif
#ifdef NV_EXTENSIONS
InstructionDesc[OpGroupNonUniformPartitionNV].operands.push(OperandId, "X");
InstructionDesc[OpTypeAccelerationStructureNV].setResultAndType(true, false);
@ -2713,7 +2729,36 @@ void Parameterize()
InstructionDesc[OpWritePackedPrimitiveIndices4x8NV].operands.push(OperandId, "'Index Offset'");
InstructionDesc[OpWritePackedPrimitiveIndices4x8NV].operands.push(OperandId, "'Packed Indices'");
#endif
InstructionDesc[OpTypeCooperativeMatrixNV].operands.push(OperandId, "'Component Type'");
InstructionDesc[OpTypeCooperativeMatrixNV].operands.push(OperandId, "'Scope'");
InstructionDesc[OpTypeCooperativeMatrixNV].operands.push(OperandId, "'Rows'");
InstructionDesc[OpTypeCooperativeMatrixNV].operands.push(OperandId, "'Columns'");
InstructionDesc[OpCooperativeMatrixLoadNV].operands.push(OperandId, "'Pointer'");
InstructionDesc[OpCooperativeMatrixLoadNV].operands.push(OperandId, "'Stride'");
InstructionDesc[OpCooperativeMatrixLoadNV].operands.push(OperandId, "'Column Major'");
InstructionDesc[OpCooperativeMatrixLoadNV].operands.push(OperandMemoryAccess, "'Memory Access'");
InstructionDesc[OpCooperativeMatrixLoadNV].operands.push(OperandLiteralNumber, "", true);
InstructionDesc[OpCooperativeMatrixLoadNV].operands.push(OperandId, "", true);
InstructionDesc[OpCooperativeMatrixStoreNV].operands.push(OperandId, "'Pointer'");
InstructionDesc[OpCooperativeMatrixStoreNV].operands.push(OperandId, "'Object'");
InstructionDesc[OpCooperativeMatrixStoreNV].operands.push(OperandId, "'Stride'");
InstructionDesc[OpCooperativeMatrixStoreNV].operands.push(OperandId, "'Column Major'");
InstructionDesc[OpCooperativeMatrixStoreNV].operands.push(OperandMemoryAccess, "'Memory Access'");
InstructionDesc[OpCooperativeMatrixStoreNV].operands.push(OperandLiteralNumber, "", true);
InstructionDesc[OpCooperativeMatrixStoreNV].operands.push(OperandId, "", true);
InstructionDesc[OpCooperativeMatrixMulAddNV].operands.push(OperandId, "'A'");
InstructionDesc[OpCooperativeMatrixMulAddNV].operands.push(OperandId, "'B'");
InstructionDesc[OpCooperativeMatrixMulAddNV].operands.push(OperandId, "'C'");
InstructionDesc[OpCooperativeMatrixLengthNV].operands.push(OperandId, "'Type'");
InstructionDesc[OpDemoteToHelperInvocationEXT].setResultAndType(false, false);
InstructionDesc[OpReadClockKHR].operands.push(OperandScope, "'Scope'");
}
}; // end spv namespace

View file

@ -255,4 +255,4 @@ const char* AccessQualifierString(int attr);
void PrintOperands(const OperandParameters& operands, int reservedOperands);
}; // end namespace spv
} // end namespace spv

View file

@ -49,12 +49,12 @@ namespace spv {
typedef unsigned int Id;
#define SPV_VERSION 0x10300
#define SPV_REVISION 6
#define SPV_VERSION 0x10400
#define SPV_REVISION 1
static const unsigned int MagicNumber = 0x07230203;
static const unsigned int Version = 0x00010300;
static const unsigned int Revision = 6;
static const unsigned int Version = 0x00010400;
static const unsigned int Revision = 1;
static const unsigned int OpCodeMask = 0xffff;
static const unsigned int WordCountShift = 16;
@ -91,6 +91,7 @@ enum AddressingModel {
AddressingModelLogical = 0,
AddressingModelPhysical32 = 1,
AddressingModelPhysical64 = 2,
AddressingModelPhysicalStorageBuffer64 = 5348,
AddressingModelPhysicalStorageBuffer64EXT = 5348,
AddressingModelMax = 0x7fffffff,
};
@ -99,6 +100,7 @@ enum MemoryModel {
MemoryModelSimple = 0,
MemoryModelGLSL450 = 1,
MemoryModelOpenCL = 2,
MemoryModelVulkan = 3,
MemoryModelVulkanKHR = 3,
MemoryModelMax = 0x7fffffff,
};
@ -154,6 +156,12 @@ enum ExecutionMode {
ExecutionModeDerivativeGroupQuadsNV = 5289,
ExecutionModeDerivativeGroupLinearNV = 5290,
ExecutionModeOutputTrianglesNV = 5298,
ExecutionModePixelInterlockOrderedEXT = 5366,
ExecutionModePixelInterlockUnorderedEXT = 5367,
ExecutionModeSampleInterlockOrderedEXT = 5368,
ExecutionModeSampleInterlockUnorderedEXT = 5369,
ExecutionModeShadingRateInterlockOrderedEXT = 5370,
ExecutionModeShadingRateInterlockUnorderedEXT = 5371,
ExecutionModeMax = 0x7fffffff,
};
@ -177,6 +185,7 @@ enum StorageClass {
StorageClassHitAttributeNV = 5339,
StorageClassIncomingRayPayloadNV = 5342,
StorageClassShaderRecordBufferNV = 5343,
StorageClassPhysicalStorageBuffer = 5349,
StorageClassPhysicalStorageBufferEXT = 5349,
StorageClassMax = 0x7fffffff,
};
@ -305,10 +314,16 @@ enum ImageOperandsShift {
ImageOperandsConstOffsetsShift = 5,
ImageOperandsSampleShift = 6,
ImageOperandsMinLodShift = 7,
ImageOperandsMakeTexelAvailableShift = 8,
ImageOperandsMakeTexelAvailableKHRShift = 8,
ImageOperandsMakeTexelVisibleShift = 9,
ImageOperandsMakeTexelVisibleKHRShift = 9,
ImageOperandsNonPrivateTexelShift = 10,
ImageOperandsNonPrivateTexelKHRShift = 10,
ImageOperandsVolatileTexelShift = 11,
ImageOperandsVolatileTexelKHRShift = 11,
ImageOperandsSignExtendShift = 12,
ImageOperandsZeroExtendShift = 13,
ImageOperandsMax = 0x7fffffff,
};
@ -322,10 +337,16 @@ enum ImageOperandsMask {
ImageOperandsConstOffsetsMask = 0x00000020,
ImageOperandsSampleMask = 0x00000040,
ImageOperandsMinLodMask = 0x00000080,
ImageOperandsMakeTexelAvailableMask = 0x00000100,
ImageOperandsMakeTexelAvailableKHRMask = 0x00000100,
ImageOperandsMakeTexelVisibleMask = 0x00000200,
ImageOperandsMakeTexelVisibleKHRMask = 0x00000200,
ImageOperandsNonPrivateTexelMask = 0x00000400,
ImageOperandsNonPrivateTexelKHRMask = 0x00000400,
ImageOperandsVolatileTexelMask = 0x00000800,
ImageOperandsVolatileTexelKHRMask = 0x00000800,
ImageOperandsSignExtendMask = 0x00001000,
ImageOperandsZeroExtendMask = 0x00002000,
};
enum FPFastMathModeShift {
@ -406,6 +427,7 @@ enum Decoration {
DecorationNonWritable = 24,
DecorationNonReadable = 25,
DecorationUniform = 26,
DecorationUniformId = 27,
DecorationSaturatedConversion = 28,
DecorationStream = 29,
DecorationLocation = 30,
@ -437,11 +459,17 @@ enum Decoration {
DecorationPerViewNV = 5272,
DecorationPerTaskNV = 5273,
DecorationPerVertexNV = 5285,
DecorationNonUniform = 5300,
DecorationNonUniformEXT = 5300,
DecorationRestrictPointer = 5355,
DecorationRestrictPointerEXT = 5355,
DecorationAliasedPointer = 5356,
DecorationAliasedPointerEXT = 5356,
DecorationCounterBuffer = 5634,
DecorationHlslCounterBufferGOOGLE = 5634,
DecorationHlslSemanticGOOGLE = 5635,
DecorationUserSemantic = 5635,
DecorationUserTypeGOOGLE = 5636,
DecorationMax = 0x7fffffff,
};
@ -544,6 +572,10 @@ enum BuiltIn {
BuiltInHitTNV = 5332,
BuiltInHitKindNV = 5333,
BuiltInIncomingRayFlagsNV = 5351,
BuiltInWarpsPerSMNV = 5374,
BuiltInSMCountNV = 5375,
BuiltInWarpIDNV = 5376,
BuiltInSMIDNV = 5377,
BuiltInMax = 0x7fffffff,
};
@ -564,6 +596,11 @@ enum LoopControlShift {
LoopControlDontUnrollShift = 1,
LoopControlDependencyInfiniteShift = 2,
LoopControlDependencyLengthShift = 3,
LoopControlMinIterationsShift = 4,
LoopControlMaxIterationsShift = 5,
LoopControlIterationMultipleShift = 6,
LoopControlPeelCountShift = 7,
LoopControlPartialCountShift = 8,
LoopControlMax = 0x7fffffff,
};
@ -573,6 +610,11 @@ enum LoopControlMask {
LoopControlDontUnrollMask = 0x00000002,
LoopControlDependencyInfiniteMask = 0x00000004,
LoopControlDependencyLengthMask = 0x00000008,
LoopControlMinIterationsMask = 0x00000010,
LoopControlMaxIterationsMask = 0x00000020,
LoopControlIterationMultipleMask = 0x00000040,
LoopControlPeelCountMask = 0x00000080,
LoopControlPartialCountMask = 0x00000100,
};
enum FunctionControlShift {
@ -602,9 +644,13 @@ enum MemorySemanticsShift {
MemorySemanticsCrossWorkgroupMemoryShift = 9,
MemorySemanticsAtomicCounterMemoryShift = 10,
MemorySemanticsImageMemoryShift = 11,
MemorySemanticsOutputMemoryShift = 12,
MemorySemanticsOutputMemoryKHRShift = 12,
MemorySemanticsMakeAvailableShift = 13,
MemorySemanticsMakeAvailableKHRShift = 13,
MemorySemanticsMakeVisibleShift = 14,
MemorySemanticsMakeVisibleKHRShift = 14,
MemorySemanticsVolatileShift = 15,
MemorySemanticsMax = 0x7fffffff,
};
@ -620,17 +666,24 @@ enum MemorySemanticsMask {
MemorySemanticsCrossWorkgroupMemoryMask = 0x00000200,
MemorySemanticsAtomicCounterMemoryMask = 0x00000400,
MemorySemanticsImageMemoryMask = 0x00000800,
MemorySemanticsOutputMemoryMask = 0x00001000,
MemorySemanticsOutputMemoryKHRMask = 0x00001000,
MemorySemanticsMakeAvailableMask = 0x00002000,
MemorySemanticsMakeAvailableKHRMask = 0x00002000,
MemorySemanticsMakeVisibleMask = 0x00004000,
MemorySemanticsMakeVisibleKHRMask = 0x00004000,
MemorySemanticsVolatileMask = 0x00008000,
};
enum MemoryAccessShift {
MemoryAccessVolatileShift = 0,
MemoryAccessAlignedShift = 1,
MemoryAccessNontemporalShift = 2,
MemoryAccessMakePointerAvailableShift = 3,
MemoryAccessMakePointerAvailableKHRShift = 3,
MemoryAccessMakePointerVisibleShift = 4,
MemoryAccessMakePointerVisibleKHRShift = 4,
MemoryAccessNonPrivatePointerShift = 5,
MemoryAccessNonPrivatePointerKHRShift = 5,
MemoryAccessMax = 0x7fffffff,
};
@ -640,8 +693,11 @@ enum MemoryAccessMask {
MemoryAccessVolatileMask = 0x00000001,
MemoryAccessAlignedMask = 0x00000002,
MemoryAccessNontemporalMask = 0x00000004,
MemoryAccessMakePointerAvailableMask = 0x00000008,
MemoryAccessMakePointerAvailableKHRMask = 0x00000008,
MemoryAccessMakePointerVisibleMask = 0x00000010,
MemoryAccessMakePointerVisibleKHRMask = 0x00000010,
MemoryAccessNonPrivatePointerMask = 0x00000020,
MemoryAccessNonPrivatePointerKHRMask = 0x00000020,
};
@ -651,6 +707,7 @@ enum Scope {
ScopeWorkgroup = 2,
ScopeSubgroup = 3,
ScopeInvocation = 4,
ScopeQueueFamily = 5,
ScopeQueueFamilyKHR = 5,
ScopeMax = 0x7fffffff,
};
@ -751,6 +808,8 @@ enum Capability {
CapabilityGroupNonUniformShuffleRelative = 66,
CapabilityGroupNonUniformClustered = 67,
CapabilityGroupNonUniformQuad = 68,
CapabilityShaderLayer = 69,
CapabilityShaderViewportIndex = 70,
CapabilitySubgroupBallotKHR = 4423,
CapabilityDrawParameters = 4427,
CapabilitySubgroupVoteKHR = 4431,
@ -779,6 +838,7 @@ enum Capability {
CapabilityFragmentMaskAMD = 5010,
CapabilityStencilExportEXT = 5013,
CapabilityImageReadWriteLodAMD = 5015,
CapabilityShaderClockKHR = 5055,
CapabilitySampleMaskOverrideCoverageNV = 5249,
CapabilityGeometryShaderPassthroughNV = 5251,
CapabilityShaderViewportIndexLayerEXT = 5254,
@ -794,26 +854,52 @@ enum Capability {
CapabilityFragmentDensityEXT = 5291,
CapabilityShadingRateNV = 5291,
CapabilityGroupNonUniformPartitionedNV = 5297,
CapabilityShaderNonUniform = 5301,
CapabilityShaderNonUniformEXT = 5301,
CapabilityRuntimeDescriptorArray = 5302,
CapabilityRuntimeDescriptorArrayEXT = 5302,
CapabilityInputAttachmentArrayDynamicIndexing = 5303,
CapabilityInputAttachmentArrayDynamicIndexingEXT = 5303,
CapabilityUniformTexelBufferArrayDynamicIndexing = 5304,
CapabilityUniformTexelBufferArrayDynamicIndexingEXT = 5304,
CapabilityStorageTexelBufferArrayDynamicIndexing = 5305,
CapabilityStorageTexelBufferArrayDynamicIndexingEXT = 5305,
CapabilityUniformBufferArrayNonUniformIndexing = 5306,
CapabilityUniformBufferArrayNonUniformIndexingEXT = 5306,
CapabilitySampledImageArrayNonUniformIndexing = 5307,
CapabilitySampledImageArrayNonUniformIndexingEXT = 5307,
CapabilityStorageBufferArrayNonUniformIndexing = 5308,
CapabilityStorageBufferArrayNonUniformIndexingEXT = 5308,
CapabilityStorageImageArrayNonUniformIndexing = 5309,
CapabilityStorageImageArrayNonUniformIndexingEXT = 5309,
CapabilityInputAttachmentArrayNonUniformIndexing = 5310,
CapabilityInputAttachmentArrayNonUniformIndexingEXT = 5310,
CapabilityUniformTexelBufferArrayNonUniformIndexing = 5311,
CapabilityUniformTexelBufferArrayNonUniformIndexingEXT = 5311,
CapabilityStorageTexelBufferArrayNonUniformIndexing = 5312,
CapabilityStorageTexelBufferArrayNonUniformIndexingEXT = 5312,
CapabilityRayTracingNV = 5340,
CapabilityVulkanMemoryModel = 5345,
CapabilityVulkanMemoryModelKHR = 5345,
CapabilityVulkanMemoryModelDeviceScope = 5346,
CapabilityVulkanMemoryModelDeviceScopeKHR = 5346,
CapabilityPhysicalStorageBufferAddresses = 5347,
CapabilityPhysicalStorageBufferAddressesEXT = 5347,
CapabilityComputeDerivativeGroupLinearNV = 5350,
CapabilityCooperativeMatrixNV = 5357,
CapabilityFragmentShaderSampleInterlockEXT = 5363,
CapabilityFragmentShaderShadingRateInterlockEXT = 5372,
CapabilityShaderSMBuiltinsNV = 5373,
CapabilityFragmentShaderPixelInterlockEXT = 5378,
CapabilityDemoteToHelperInvocationEXT = 5379,
CapabilitySubgroupShuffleINTEL = 5568,
CapabilitySubgroupBufferBlockIOINTEL = 5569,
CapabilitySubgroupImageBlockIOINTEL = 5570,
CapabilitySubgroupImageMediaBlockIOINTEL = 5579,
CapabilityIntegerFunctions2INTEL = 5584,
CapabilitySubgroupAvcMotionEstimationINTEL = 5696,
CapabilitySubgroupAvcMotionEstimationIntraINTEL = 5697,
CapabilitySubgroupAvcMotionEstimationChromaINTEL = 5698,
CapabilityMax = 0x7fffffff,
};
@ -1158,6 +1244,10 @@ enum Op {
OpGroupNonUniformLogicalXor = 364,
OpGroupNonUniformQuadBroadcast = 365,
OpGroupNonUniformQuadSwap = 366,
OpCopyLogical = 400,
OpPtrEqual = 401,
OpPtrNotEqual = 402,
OpPtrDiff = 403,
OpSubgroupBallotKHR = 4421,
OpSubgroupFirstInvocationKHR = 4422,
OpSubgroupAllKHR = 4428,
@ -1174,6 +1264,7 @@ enum Op {
OpGroupSMaxNonUniformAMD = 5007,
OpFragmentMaskFetchAMD = 5011,
OpFragmentFetchAMD = 5012,
OpReadClockKHR = 5056,
OpImageSampleFootprintNV = 5283,
OpGroupNonUniformPartitionNV = 5296,
OpWritePackedPrimitiveIndices4x8NV = 5299,
@ -1183,6 +1274,15 @@ enum Op {
OpTraceNV = 5337,
OpTypeAccelerationStructureNV = 5341,
OpExecuteCallableNV = 5344,
OpTypeCooperativeMatrixNV = 5358,
OpCooperativeMatrixLoadNV = 5359,
OpCooperativeMatrixStoreNV = 5360,
OpCooperativeMatrixMulAddNV = 5361,
OpCooperativeMatrixLengthNV = 5362,
OpBeginInvocationInterlockEXT = 5364,
OpEndInvocationInterlockEXT = 5365,
OpDemoteToHelperInvocationEXT = 5380,
OpIsHelperInvocationEXT = 5381,
OpSubgroupShuffleINTEL = 5571,
OpSubgroupShuffleDownINTEL = 5572,
OpSubgroupShuffleUpINTEL = 5573,
@ -1191,11 +1291,679 @@ enum Op {
OpSubgroupBlockWriteINTEL = 5576,
OpSubgroupImageBlockReadINTEL = 5577,
OpSubgroupImageBlockWriteINTEL = 5578,
OpSubgroupImageMediaBlockReadINTEL = 5580,
OpSubgroupImageMediaBlockWriteINTEL = 5581,
OpUCountLeadingZerosINTEL = 5585,
OpUCountTrailingZerosINTEL = 5586,
OpAbsISubINTEL = 5587,
OpAbsUSubINTEL = 5588,
OpIAddSatINTEL = 5589,
OpUAddSatINTEL = 5590,
OpIAverageINTEL = 5591,
OpUAverageINTEL = 5592,
OpIAverageRoundedINTEL = 5593,
OpUAverageRoundedINTEL = 5594,
OpISubSatINTEL = 5595,
OpUSubSatINTEL = 5596,
OpIMul32x16INTEL = 5597,
OpUMul32x16INTEL = 5598,
OpDecorateString = 5632,
OpDecorateStringGOOGLE = 5632,
OpMemberDecorateString = 5633,
OpMemberDecorateStringGOOGLE = 5633,
OpVmeImageINTEL = 5699,
OpTypeVmeImageINTEL = 5700,
OpTypeAvcImePayloadINTEL = 5701,
OpTypeAvcRefPayloadINTEL = 5702,
OpTypeAvcSicPayloadINTEL = 5703,
OpTypeAvcMcePayloadINTEL = 5704,
OpTypeAvcMceResultINTEL = 5705,
OpTypeAvcImeResultINTEL = 5706,
OpTypeAvcImeResultSingleReferenceStreamoutINTEL = 5707,
OpTypeAvcImeResultDualReferenceStreamoutINTEL = 5708,
OpTypeAvcImeSingleReferenceStreaminINTEL = 5709,
OpTypeAvcImeDualReferenceStreaminINTEL = 5710,
OpTypeAvcRefResultINTEL = 5711,
OpTypeAvcSicResultINTEL = 5712,
OpSubgroupAvcMceGetDefaultInterBaseMultiReferencePenaltyINTEL = 5713,
OpSubgroupAvcMceSetInterBaseMultiReferencePenaltyINTEL = 5714,
OpSubgroupAvcMceGetDefaultInterShapePenaltyINTEL = 5715,
OpSubgroupAvcMceSetInterShapePenaltyINTEL = 5716,
OpSubgroupAvcMceGetDefaultInterDirectionPenaltyINTEL = 5717,
OpSubgroupAvcMceSetInterDirectionPenaltyINTEL = 5718,
OpSubgroupAvcMceGetDefaultIntraLumaShapePenaltyINTEL = 5719,
OpSubgroupAvcMceGetDefaultInterMotionVectorCostTableINTEL = 5720,
OpSubgroupAvcMceGetDefaultHighPenaltyCostTableINTEL = 5721,
OpSubgroupAvcMceGetDefaultMediumPenaltyCostTableINTEL = 5722,
OpSubgroupAvcMceGetDefaultLowPenaltyCostTableINTEL = 5723,
OpSubgroupAvcMceSetMotionVectorCostFunctionINTEL = 5724,
OpSubgroupAvcMceGetDefaultIntraLumaModePenaltyINTEL = 5725,
OpSubgroupAvcMceGetDefaultNonDcLumaIntraPenaltyINTEL = 5726,
OpSubgroupAvcMceGetDefaultIntraChromaModeBasePenaltyINTEL = 5727,
OpSubgroupAvcMceSetAcOnlyHaarINTEL = 5728,
OpSubgroupAvcMceSetSourceInterlacedFieldPolarityINTEL = 5729,
OpSubgroupAvcMceSetSingleReferenceInterlacedFieldPolarityINTEL = 5730,
OpSubgroupAvcMceSetDualReferenceInterlacedFieldPolaritiesINTEL = 5731,
OpSubgroupAvcMceConvertToImePayloadINTEL = 5732,
OpSubgroupAvcMceConvertToImeResultINTEL = 5733,
OpSubgroupAvcMceConvertToRefPayloadINTEL = 5734,
OpSubgroupAvcMceConvertToRefResultINTEL = 5735,
OpSubgroupAvcMceConvertToSicPayloadINTEL = 5736,
OpSubgroupAvcMceConvertToSicResultINTEL = 5737,
OpSubgroupAvcMceGetMotionVectorsINTEL = 5738,
OpSubgroupAvcMceGetInterDistortionsINTEL = 5739,
OpSubgroupAvcMceGetBestInterDistortionsINTEL = 5740,
OpSubgroupAvcMceGetInterMajorShapeINTEL = 5741,
OpSubgroupAvcMceGetInterMinorShapeINTEL = 5742,
OpSubgroupAvcMceGetInterDirectionsINTEL = 5743,
OpSubgroupAvcMceGetInterMotionVectorCountINTEL = 5744,
OpSubgroupAvcMceGetInterReferenceIdsINTEL = 5745,
OpSubgroupAvcMceGetInterReferenceInterlacedFieldPolaritiesINTEL = 5746,
OpSubgroupAvcImeInitializeINTEL = 5747,
OpSubgroupAvcImeSetSingleReferenceINTEL = 5748,
OpSubgroupAvcImeSetDualReferenceINTEL = 5749,
OpSubgroupAvcImeRefWindowSizeINTEL = 5750,
OpSubgroupAvcImeAdjustRefOffsetINTEL = 5751,
OpSubgroupAvcImeConvertToMcePayloadINTEL = 5752,
OpSubgroupAvcImeSetMaxMotionVectorCountINTEL = 5753,
OpSubgroupAvcImeSetUnidirectionalMixDisableINTEL = 5754,
OpSubgroupAvcImeSetEarlySearchTerminationThresholdINTEL = 5755,
OpSubgroupAvcImeSetWeightedSadINTEL = 5756,
OpSubgroupAvcImeEvaluateWithSingleReferenceINTEL = 5757,
OpSubgroupAvcImeEvaluateWithDualReferenceINTEL = 5758,
OpSubgroupAvcImeEvaluateWithSingleReferenceStreaminINTEL = 5759,
OpSubgroupAvcImeEvaluateWithDualReferenceStreaminINTEL = 5760,
OpSubgroupAvcImeEvaluateWithSingleReferenceStreamoutINTEL = 5761,
OpSubgroupAvcImeEvaluateWithDualReferenceStreamoutINTEL = 5762,
OpSubgroupAvcImeEvaluateWithSingleReferenceStreaminoutINTEL = 5763,
OpSubgroupAvcImeEvaluateWithDualReferenceStreaminoutINTEL = 5764,
OpSubgroupAvcImeConvertToMceResultINTEL = 5765,
OpSubgroupAvcImeGetSingleReferenceStreaminINTEL = 5766,
OpSubgroupAvcImeGetDualReferenceStreaminINTEL = 5767,
OpSubgroupAvcImeStripSingleReferenceStreamoutINTEL = 5768,
OpSubgroupAvcImeStripDualReferenceStreamoutINTEL = 5769,
OpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeMotionVectorsINTEL = 5770,
OpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeDistortionsINTEL = 5771,
OpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeReferenceIdsINTEL = 5772,
OpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeMotionVectorsINTEL = 5773,
OpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeDistortionsINTEL = 5774,
OpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeReferenceIdsINTEL = 5775,
OpSubgroupAvcImeGetBorderReachedINTEL = 5776,
OpSubgroupAvcImeGetTruncatedSearchIndicationINTEL = 5777,
OpSubgroupAvcImeGetUnidirectionalEarlySearchTerminationINTEL = 5778,
OpSubgroupAvcImeGetWeightingPatternMinimumMotionVectorINTEL = 5779,
OpSubgroupAvcImeGetWeightingPatternMinimumDistortionINTEL = 5780,
OpSubgroupAvcFmeInitializeINTEL = 5781,
OpSubgroupAvcBmeInitializeINTEL = 5782,
OpSubgroupAvcRefConvertToMcePayloadINTEL = 5783,
OpSubgroupAvcRefSetBidirectionalMixDisableINTEL = 5784,
OpSubgroupAvcRefSetBilinearFilterEnableINTEL = 5785,
OpSubgroupAvcRefEvaluateWithSingleReferenceINTEL = 5786,
OpSubgroupAvcRefEvaluateWithDualReferenceINTEL = 5787,
OpSubgroupAvcRefEvaluateWithMultiReferenceINTEL = 5788,
OpSubgroupAvcRefEvaluateWithMultiReferenceInterlacedINTEL = 5789,
OpSubgroupAvcRefConvertToMceResultINTEL = 5790,
OpSubgroupAvcSicInitializeINTEL = 5791,
OpSubgroupAvcSicConfigureSkcINTEL = 5792,
OpSubgroupAvcSicConfigureIpeLumaINTEL = 5793,
OpSubgroupAvcSicConfigureIpeLumaChromaINTEL = 5794,
OpSubgroupAvcSicGetMotionVectorMaskINTEL = 5795,
OpSubgroupAvcSicConvertToMcePayloadINTEL = 5796,
OpSubgroupAvcSicSetIntraLumaShapePenaltyINTEL = 5797,
OpSubgroupAvcSicSetIntraLumaModeCostFunctionINTEL = 5798,
OpSubgroupAvcSicSetIntraChromaModeCostFunctionINTEL = 5799,
OpSubgroupAvcSicSetBilinearFilterEnableINTEL = 5800,
OpSubgroupAvcSicSetSkcForwardTransformEnableINTEL = 5801,
OpSubgroupAvcSicSetBlockBasedRawSkipSadINTEL = 5802,
OpSubgroupAvcSicEvaluateIpeINTEL = 5803,
OpSubgroupAvcSicEvaluateWithSingleReferenceINTEL = 5804,
OpSubgroupAvcSicEvaluateWithDualReferenceINTEL = 5805,
OpSubgroupAvcSicEvaluateWithMultiReferenceINTEL = 5806,
OpSubgroupAvcSicEvaluateWithMultiReferenceInterlacedINTEL = 5807,
OpSubgroupAvcSicConvertToMceResultINTEL = 5808,
OpSubgroupAvcSicGetIpeLumaShapeINTEL = 5809,
OpSubgroupAvcSicGetBestIpeLumaDistortionINTEL = 5810,
OpSubgroupAvcSicGetBestIpeChromaDistortionINTEL = 5811,
OpSubgroupAvcSicGetPackedIpeLumaModesINTEL = 5812,
OpSubgroupAvcSicGetIpeChromaModeINTEL = 5813,
OpSubgroupAvcSicGetPackedSkcLumaCountThresholdINTEL = 5814,
OpSubgroupAvcSicGetPackedSkcLumaSumThresholdINTEL = 5815,
OpSubgroupAvcSicGetInterRawSadsINTEL = 5816,
OpMax = 0x7fffffff,
};
#ifdef SPV_ENABLE_UTILITY_CODE
inline void HasResultAndType(Op opcode, bool *hasResult, bool *hasResultType) {
*hasResult = *hasResultType = false;
switch (opcode) {
default: /* unknown opcode */ break;
case OpNop: *hasResult = false; *hasResultType = false; break;
case OpUndef: *hasResult = true; *hasResultType = true; break;
case OpSourceContinued: *hasResult = false; *hasResultType = false; break;
case OpSource: *hasResult = false; *hasResultType = false; break;
case OpSourceExtension: *hasResult = false; *hasResultType = false; break;
case OpName: *hasResult = false; *hasResultType = false; break;
case OpMemberName: *hasResult = false; *hasResultType = false; break;
case OpString: *hasResult = true; *hasResultType = false; break;
case OpLine: *hasResult = false; *hasResultType = false; break;
case OpExtension: *hasResult = false; *hasResultType = false; break;
case OpExtInstImport: *hasResult = true; *hasResultType = false; break;
case OpExtInst: *hasResult = true; *hasResultType = true; break;
case OpMemoryModel: *hasResult = false; *hasResultType = false; break;
case OpEntryPoint: *hasResult = false; *hasResultType = false; break;
case OpExecutionMode: *hasResult = false; *hasResultType = false; break;
case OpCapability: *hasResult = false; *hasResultType = false; break;
case OpTypeVoid: *hasResult = true; *hasResultType = false; break;
case OpTypeBool: *hasResult = true; *hasResultType = false; break;
case OpTypeInt: *hasResult = true; *hasResultType = false; break;
case OpTypeFloat: *hasResult = true; *hasResultType = false; break;
case OpTypeVector: *hasResult = true; *hasResultType = false; break;
case OpTypeMatrix: *hasResult = true; *hasResultType = false; break;
case OpTypeImage: *hasResult = true; *hasResultType = false; break;
case OpTypeSampler: *hasResult = true; *hasResultType = false; break;
case OpTypeSampledImage: *hasResult = true; *hasResultType = false; break;
case OpTypeArray: *hasResult = true; *hasResultType = false; break;
case OpTypeRuntimeArray: *hasResult = true; *hasResultType = false; break;
case OpTypeStruct: *hasResult = true; *hasResultType = false; break;
case OpTypeOpaque: *hasResult = true; *hasResultType = false; break;
case OpTypePointer: *hasResult = true; *hasResultType = false; break;
case OpTypeFunction: *hasResult = true; *hasResultType = false; break;
case OpTypeEvent: *hasResult = true; *hasResultType = false; break;
case OpTypeDeviceEvent: *hasResult = true; *hasResultType = false; break;
case OpTypeReserveId: *hasResult = true; *hasResultType = false; break;
case OpTypeQueue: *hasResult = true; *hasResultType = false; break;
case OpTypePipe: *hasResult = true; *hasResultType = false; break;
case OpTypeForwardPointer: *hasResult = false; *hasResultType = false; break;
case OpConstantTrue: *hasResult = true; *hasResultType = true; break;
case OpConstantFalse: *hasResult = true; *hasResultType = true; break;
case OpConstant: *hasResult = true; *hasResultType = true; break;
case OpConstantComposite: *hasResult = true; *hasResultType = true; break;
case OpConstantSampler: *hasResult = true; *hasResultType = true; break;
case OpConstantNull: *hasResult = true; *hasResultType = true; break;
case OpSpecConstantTrue: *hasResult = true; *hasResultType = true; break;
case OpSpecConstantFalse: *hasResult = true; *hasResultType = true; break;
case OpSpecConstant: *hasResult = true; *hasResultType = true; break;
case OpSpecConstantComposite: *hasResult = true; *hasResultType = true; break;
case OpSpecConstantOp: *hasResult = true; *hasResultType = true; break;
case OpFunction: *hasResult = true; *hasResultType = true; break;
case OpFunctionParameter: *hasResult = true; *hasResultType = true; break;
case OpFunctionEnd: *hasResult = false; *hasResultType = false; break;
case OpFunctionCall: *hasResult = true; *hasResultType = true; break;
case OpVariable: *hasResult = true; *hasResultType = true; break;
case OpImageTexelPointer: *hasResult = true; *hasResultType = true; break;
case OpLoad: *hasResult = true; *hasResultType = true; break;
case OpStore: *hasResult = false; *hasResultType = false; break;
case OpCopyMemory: *hasResult = false; *hasResultType = false; break;
case OpCopyMemorySized: *hasResult = false; *hasResultType = false; break;
case OpAccessChain: *hasResult = true; *hasResultType = true; break;
case OpInBoundsAccessChain: *hasResult = true; *hasResultType = true; break;
case OpPtrAccessChain: *hasResult = true; *hasResultType = true; break;
case OpArrayLength: *hasResult = true; *hasResultType = true; break;
case OpGenericPtrMemSemantics: *hasResult = true; *hasResultType = true; break;
case OpInBoundsPtrAccessChain: *hasResult = true; *hasResultType = true; break;
case OpDecorate: *hasResult = false; *hasResultType = false; break;
case OpMemberDecorate: *hasResult = false; *hasResultType = false; break;
case OpDecorationGroup: *hasResult = true; *hasResultType = false; break;
case OpGroupDecorate: *hasResult = false; *hasResultType = false; break;
case OpGroupMemberDecorate: *hasResult = false; *hasResultType = false; break;
case OpVectorExtractDynamic: *hasResult = true; *hasResultType = true; break;
case OpVectorInsertDynamic: *hasResult = true; *hasResultType = true; break;
case OpVectorShuffle: *hasResult = true; *hasResultType = true; break;
case OpCompositeConstruct: *hasResult = true; *hasResultType = true; break;
case OpCompositeExtract: *hasResult = true; *hasResultType = true; break;
case OpCompositeInsert: *hasResult = true; *hasResultType = true; break;
case OpCopyObject: *hasResult = true; *hasResultType = true; break;
case OpTranspose: *hasResult = true; *hasResultType = true; break;
case OpSampledImage: *hasResult = true; *hasResultType = true; break;
case OpImageSampleImplicitLod: *hasResult = true; *hasResultType = true; break;
case OpImageSampleExplicitLod: *hasResult = true; *hasResultType = true; break;
case OpImageSampleDrefImplicitLod: *hasResult = true; *hasResultType = true; break;
case OpImageSampleDrefExplicitLod: *hasResult = true; *hasResultType = true; break;
case OpImageSampleProjImplicitLod: *hasResult = true; *hasResultType = true; break;
case OpImageSampleProjExplicitLod: *hasResult = true; *hasResultType = true; break;
case OpImageSampleProjDrefImplicitLod: *hasResult = true; *hasResultType = true; break;
case OpImageSampleProjDrefExplicitLod: *hasResult = true; *hasResultType = true; break;
case OpImageFetch: *hasResult = true; *hasResultType = true; break;
case OpImageGather: *hasResult = true; *hasResultType = true; break;
case OpImageDrefGather: *hasResult = true; *hasResultType = true; break;
case OpImageRead: *hasResult = true; *hasResultType = true; break;
case OpImageWrite: *hasResult = false; *hasResultType = false; break;
case OpImage: *hasResult = true; *hasResultType = true; break;
case OpImageQueryFormat: *hasResult = true; *hasResultType = true; break;
case OpImageQueryOrder: *hasResult = true; *hasResultType = true; break;
case OpImageQuerySizeLod: *hasResult = true; *hasResultType = true; break;
case OpImageQuerySize: *hasResult = true; *hasResultType = true; break;
case OpImageQueryLod: *hasResult = true; *hasResultType = true; break;
case OpImageQueryLevels: *hasResult = true; *hasResultType = true; break;
case OpImageQuerySamples: *hasResult = true; *hasResultType = true; break;
case OpConvertFToU: *hasResult = true; *hasResultType = true; break;
case OpConvertFToS: *hasResult = true; *hasResultType = true; break;
case OpConvertSToF: *hasResult = true; *hasResultType = true; break;
case OpConvertUToF: *hasResult = true; *hasResultType = true; break;
case OpUConvert: *hasResult = true; *hasResultType = true; break;
case OpSConvert: *hasResult = true; *hasResultType = true; break;
case OpFConvert: *hasResult = true; *hasResultType = true; break;
case OpQuantizeToF16: *hasResult = true; *hasResultType = true; break;
case OpConvertPtrToU: *hasResult = true; *hasResultType = true; break;
case OpSatConvertSToU: *hasResult = true; *hasResultType = true; break;
case OpSatConvertUToS: *hasResult = true; *hasResultType = true; break;
case OpConvertUToPtr: *hasResult = true; *hasResultType = true; break;
case OpPtrCastToGeneric: *hasResult = true; *hasResultType = true; break;
case OpGenericCastToPtr: *hasResult = true; *hasResultType = true; break;
case OpGenericCastToPtrExplicit: *hasResult = true; *hasResultType = true; break;
case OpBitcast: *hasResult = true; *hasResultType = true; break;
case OpSNegate: *hasResult = true; *hasResultType = true; break;
case OpFNegate: *hasResult = true; *hasResultType = true; break;
case OpIAdd: *hasResult = true; *hasResultType = true; break;
case OpFAdd: *hasResult = true; *hasResultType = true; break;
case OpISub: *hasResult = true; *hasResultType = true; break;
case OpFSub: *hasResult = true; *hasResultType = true; break;
case OpIMul: *hasResult = true; *hasResultType = true; break;
case OpFMul: *hasResult = true; *hasResultType = true; break;
case OpUDiv: *hasResult = true; *hasResultType = true; break;
case OpSDiv: *hasResult = true; *hasResultType = true; break;
case OpFDiv: *hasResult = true; *hasResultType = true; break;
case OpUMod: *hasResult = true; *hasResultType = true; break;
case OpSRem: *hasResult = true; *hasResultType = true; break;
case OpSMod: *hasResult = true; *hasResultType = true; break;
case OpFRem: *hasResult = true; *hasResultType = true; break;
case OpFMod: *hasResult = true; *hasResultType = true; break;
case OpVectorTimesScalar: *hasResult = true; *hasResultType = true; break;
case OpMatrixTimesScalar: *hasResult = true; *hasResultType = true; break;
case OpVectorTimesMatrix: *hasResult = true; *hasResultType = true; break;
case OpMatrixTimesVector: *hasResult = true; *hasResultType = true; break;
case OpMatrixTimesMatrix: *hasResult = true; *hasResultType = true; break;
case OpOuterProduct: *hasResult = true; *hasResultType = true; break;
case OpDot: *hasResult = true; *hasResultType = true; break;
case OpIAddCarry: *hasResult = true; *hasResultType = true; break;
case OpISubBorrow: *hasResult = true; *hasResultType = true; break;
case OpUMulExtended: *hasResult = true; *hasResultType = true; break;
case OpSMulExtended: *hasResult = true; *hasResultType = true; break;
case OpAny: *hasResult = true; *hasResultType = true; break;
case OpAll: *hasResult = true; *hasResultType = true; break;
case OpIsNan: *hasResult = true; *hasResultType = true; break;
case OpIsInf: *hasResult = true; *hasResultType = true; break;
case OpIsFinite: *hasResult = true; *hasResultType = true; break;
case OpIsNormal: *hasResult = true; *hasResultType = true; break;
case OpSignBitSet: *hasResult = true; *hasResultType = true; break;
case OpLessOrGreater: *hasResult = true; *hasResultType = true; break;
case OpOrdered: *hasResult = true; *hasResultType = true; break;
case OpUnordered: *hasResult = true; *hasResultType = true; break;
case OpLogicalEqual: *hasResult = true; *hasResultType = true; break;
case OpLogicalNotEqual: *hasResult = true; *hasResultType = true; break;
case OpLogicalOr: *hasResult = true; *hasResultType = true; break;
case OpLogicalAnd: *hasResult = true; *hasResultType = true; break;
case OpLogicalNot: *hasResult = true; *hasResultType = true; break;
case OpSelect: *hasResult = true; *hasResultType = true; break;
case OpIEqual: *hasResult = true; *hasResultType = true; break;
case OpINotEqual: *hasResult = true; *hasResultType = true; break;
case OpUGreaterThan: *hasResult = true; *hasResultType = true; break;
case OpSGreaterThan: *hasResult = true; *hasResultType = true; break;
case OpUGreaterThanEqual: *hasResult = true; *hasResultType = true; break;
case OpSGreaterThanEqual: *hasResult = true; *hasResultType = true; break;
case OpULessThan: *hasResult = true; *hasResultType = true; break;
case OpSLessThan: *hasResult = true; *hasResultType = true; break;
case OpULessThanEqual: *hasResult = true; *hasResultType = true; break;
case OpSLessThanEqual: *hasResult = true; *hasResultType = true; break;
case OpFOrdEqual: *hasResult = true; *hasResultType = true; break;
case OpFUnordEqual: *hasResult = true; *hasResultType = true; break;
case OpFOrdNotEqual: *hasResult = true; *hasResultType = true; break;
case OpFUnordNotEqual: *hasResult = true; *hasResultType = true; break;
case OpFOrdLessThan: *hasResult = true; *hasResultType = true; break;
case OpFUnordLessThan: *hasResult = true; *hasResultType = true; break;
case OpFOrdGreaterThan: *hasResult = true; *hasResultType = true; break;
case OpFUnordGreaterThan: *hasResult = true; *hasResultType = true; break;
case OpFOrdLessThanEqual: *hasResult = true; *hasResultType = true; break;
case OpFUnordLessThanEqual: *hasResult = true; *hasResultType = true; break;
case OpFOrdGreaterThanEqual: *hasResult = true; *hasResultType = true; break;
case OpFUnordGreaterThanEqual: *hasResult = true; *hasResultType = true; break;
case OpShiftRightLogical: *hasResult = true; *hasResultType = true; break;
case OpShiftRightArithmetic: *hasResult = true; *hasResultType = true; break;
case OpShiftLeftLogical: *hasResult = true; *hasResultType = true; break;
case OpBitwiseOr: *hasResult = true; *hasResultType = true; break;
case OpBitwiseXor: *hasResult = true; *hasResultType = true; break;
case OpBitwiseAnd: *hasResult = true; *hasResultType = true; break;
case OpNot: *hasResult = true; *hasResultType = true; break;
case OpBitFieldInsert: *hasResult = true; *hasResultType = true; break;
case OpBitFieldSExtract: *hasResult = true; *hasResultType = true; break;
case OpBitFieldUExtract: *hasResult = true; *hasResultType = true; break;
case OpBitReverse: *hasResult = true; *hasResultType = true; break;
case OpBitCount: *hasResult = true; *hasResultType = true; break;
case OpDPdx: *hasResult = true; *hasResultType = true; break;
case OpDPdy: *hasResult = true; *hasResultType = true; break;
case OpFwidth: *hasResult = true; *hasResultType = true; break;
case OpDPdxFine: *hasResult = true; *hasResultType = true; break;
case OpDPdyFine: *hasResult = true; *hasResultType = true; break;
case OpFwidthFine: *hasResult = true; *hasResultType = true; break;
case OpDPdxCoarse: *hasResult = true; *hasResultType = true; break;
case OpDPdyCoarse: *hasResult = true; *hasResultType = true; break;
case OpFwidthCoarse: *hasResult = true; *hasResultType = true; break;
case OpEmitVertex: *hasResult = false; *hasResultType = false; break;
case OpEndPrimitive: *hasResult = false; *hasResultType = false; break;
case OpEmitStreamVertex: *hasResult = false; *hasResultType = false; break;
case OpEndStreamPrimitive: *hasResult = false; *hasResultType = false; break;
case OpControlBarrier: *hasResult = false; *hasResultType = false; break;
case OpMemoryBarrier: *hasResult = false; *hasResultType = false; break;
case OpAtomicLoad: *hasResult = true; *hasResultType = true; break;
case OpAtomicStore: *hasResult = false; *hasResultType = false; break;
case OpAtomicExchange: *hasResult = true; *hasResultType = true; break;
case OpAtomicCompareExchange: *hasResult = true; *hasResultType = true; break;
case OpAtomicCompareExchangeWeak: *hasResult = true; *hasResultType = true; break;
case OpAtomicIIncrement: *hasResult = true; *hasResultType = true; break;
case OpAtomicIDecrement: *hasResult = true; *hasResultType = true; break;
case OpAtomicIAdd: *hasResult = true; *hasResultType = true; break;
case OpAtomicISub: *hasResult = true; *hasResultType = true; break;
case OpAtomicSMin: *hasResult = true; *hasResultType = true; break;
case OpAtomicUMin: *hasResult = true; *hasResultType = true; break;
case OpAtomicSMax: *hasResult = true; *hasResultType = true; break;
case OpAtomicUMax: *hasResult = true; *hasResultType = true; break;
case OpAtomicAnd: *hasResult = true; *hasResultType = true; break;
case OpAtomicOr: *hasResult = true; *hasResultType = true; break;
case OpAtomicXor: *hasResult = true; *hasResultType = true; break;
case OpPhi: *hasResult = true; *hasResultType = true; break;
case OpLoopMerge: *hasResult = false; *hasResultType = false; break;
case OpSelectionMerge: *hasResult = false; *hasResultType = false; break;
case OpLabel: *hasResult = true; *hasResultType = false; break;
case OpBranch: *hasResult = false; *hasResultType = false; break;
case OpBranchConditional: *hasResult = false; *hasResultType = false; break;
case OpSwitch: *hasResult = false; *hasResultType = false; break;
case OpKill: *hasResult = false; *hasResultType = false; break;
case OpReturn: *hasResult = false; *hasResultType = false; break;
case OpReturnValue: *hasResult = false; *hasResultType = false; break;
case OpUnreachable: *hasResult = false; *hasResultType = false; break;
case OpLifetimeStart: *hasResult = false; *hasResultType = false; break;
case OpLifetimeStop: *hasResult = false; *hasResultType = false; break;
case OpGroupAsyncCopy: *hasResult = true; *hasResultType = true; break;
case OpGroupWaitEvents: *hasResult = false; *hasResultType = false; break;
case OpGroupAll: *hasResult = true; *hasResultType = true; break;
case OpGroupAny: *hasResult = true; *hasResultType = true; break;
case OpGroupBroadcast: *hasResult = true; *hasResultType = true; break;
case OpGroupIAdd: *hasResult = true; *hasResultType = true; break;
case OpGroupFAdd: *hasResult = true; *hasResultType = true; break;
case OpGroupFMin: *hasResult = true; *hasResultType = true; break;
case OpGroupUMin: *hasResult = true; *hasResultType = true; break;
case OpGroupSMin: *hasResult = true; *hasResultType = true; break;
case OpGroupFMax: *hasResult = true; *hasResultType = true; break;
case OpGroupUMax: *hasResult = true; *hasResultType = true; break;
case OpGroupSMax: *hasResult = true; *hasResultType = true; break;
case OpReadPipe: *hasResult = true; *hasResultType = true; break;
case OpWritePipe: *hasResult = true; *hasResultType = true; break;
case OpReservedReadPipe: *hasResult = true; *hasResultType = true; break;
case OpReservedWritePipe: *hasResult = true; *hasResultType = true; break;
case OpReserveReadPipePackets: *hasResult = true; *hasResultType = true; break;
case OpReserveWritePipePackets: *hasResult = true; *hasResultType = true; break;
case OpCommitReadPipe: *hasResult = false; *hasResultType = false; break;
case OpCommitWritePipe: *hasResult = false; *hasResultType = false; break;
case OpIsValidReserveId: *hasResult = true; *hasResultType = true; break;
case OpGetNumPipePackets: *hasResult = true; *hasResultType = true; break;
case OpGetMaxPipePackets: *hasResult = true; *hasResultType = true; break;
case OpGroupReserveReadPipePackets: *hasResult = true; *hasResultType = true; break;
case OpGroupReserveWritePipePackets: *hasResult = true; *hasResultType = true; break;
case OpGroupCommitReadPipe: *hasResult = false; *hasResultType = false; break;
case OpGroupCommitWritePipe: *hasResult = false; *hasResultType = false; break;
case OpEnqueueMarker: *hasResult = true; *hasResultType = true; break;
case OpEnqueueKernel: *hasResult = true; *hasResultType = true; break;
case OpGetKernelNDrangeSubGroupCount: *hasResult = true; *hasResultType = true; break;
case OpGetKernelNDrangeMaxSubGroupSize: *hasResult = true; *hasResultType = true; break;
case OpGetKernelWorkGroupSize: *hasResult = true; *hasResultType = true; break;
case OpGetKernelPreferredWorkGroupSizeMultiple: *hasResult = true; *hasResultType = true; break;
case OpRetainEvent: *hasResult = false; *hasResultType = false; break;
case OpReleaseEvent: *hasResult = false; *hasResultType = false; break;
case OpCreateUserEvent: *hasResult = true; *hasResultType = true; break;
case OpIsValidEvent: *hasResult = true; *hasResultType = true; break;
case OpSetUserEventStatus: *hasResult = false; *hasResultType = false; break;
case OpCaptureEventProfilingInfo: *hasResult = false; *hasResultType = false; break;
case OpGetDefaultQueue: *hasResult = true; *hasResultType = true; break;
case OpBuildNDRange: *hasResult = true; *hasResultType = true; break;
case OpImageSparseSampleImplicitLod: *hasResult = true; *hasResultType = true; break;
case OpImageSparseSampleExplicitLod: *hasResult = true; *hasResultType = true; break;
case OpImageSparseSampleDrefImplicitLod: *hasResult = true; *hasResultType = true; break;
case OpImageSparseSampleDrefExplicitLod: *hasResult = true; *hasResultType = true; break;
case OpImageSparseSampleProjImplicitLod: *hasResult = true; *hasResultType = true; break;
case OpImageSparseSampleProjExplicitLod: *hasResult = true; *hasResultType = true; break;
case OpImageSparseSampleProjDrefImplicitLod: *hasResult = true; *hasResultType = true; break;
case OpImageSparseSampleProjDrefExplicitLod: *hasResult = true; *hasResultType = true; break;
case OpImageSparseFetch: *hasResult = true; *hasResultType = true; break;
case OpImageSparseGather: *hasResult = true; *hasResultType = true; break;
case OpImageSparseDrefGather: *hasResult = true; *hasResultType = true; break;
case OpImageSparseTexelsResident: *hasResult = true; *hasResultType = true; break;
case OpNoLine: *hasResult = false; *hasResultType = false; break;
case OpAtomicFlagTestAndSet: *hasResult = true; *hasResultType = true; break;
case OpAtomicFlagClear: *hasResult = false; *hasResultType = false; break;
case OpImageSparseRead: *hasResult = true; *hasResultType = true; break;
case OpSizeOf: *hasResult = true; *hasResultType = true; break;
case OpTypePipeStorage: *hasResult = true; *hasResultType = false; break;
case OpConstantPipeStorage: *hasResult = true; *hasResultType = true; break;
case OpCreatePipeFromPipeStorage: *hasResult = true; *hasResultType = true; break;
case OpGetKernelLocalSizeForSubgroupCount: *hasResult = true; *hasResultType = true; break;
case OpGetKernelMaxNumSubgroups: *hasResult = true; *hasResultType = true; break;
case OpTypeNamedBarrier: *hasResult = true; *hasResultType = false; break;
case OpNamedBarrierInitialize: *hasResult = true; *hasResultType = true; break;
case OpMemoryNamedBarrier: *hasResult = false; *hasResultType = false; break;
case OpModuleProcessed: *hasResult = false; *hasResultType = false; break;
case OpExecutionModeId: *hasResult = false; *hasResultType = false; break;
case OpDecorateId: *hasResult = false; *hasResultType = false; break;
case OpGroupNonUniformElect: *hasResult = true; *hasResultType = true; break;
case OpGroupNonUniformAll: *hasResult = true; *hasResultType = true; break;
case OpGroupNonUniformAny: *hasResult = true; *hasResultType = true; break;
case OpGroupNonUniformAllEqual: *hasResult = true; *hasResultType = true; break;
case OpGroupNonUniformBroadcast: *hasResult = true; *hasResultType = true; break;
case OpGroupNonUniformBroadcastFirst: *hasResult = true; *hasResultType = true; break;
case OpGroupNonUniformBallot: *hasResult = true; *hasResultType = true; break;
case OpGroupNonUniformInverseBallot: *hasResult = true; *hasResultType = true; break;
case OpGroupNonUniformBallotBitExtract: *hasResult = true; *hasResultType = true; break;
case OpGroupNonUniformBallotBitCount: *hasResult = true; *hasResultType = true; break;
case OpGroupNonUniformBallotFindLSB: *hasResult = true; *hasResultType = true; break;
case OpGroupNonUniformBallotFindMSB: *hasResult = true; *hasResultType = true; break;
case OpGroupNonUniformShuffle: *hasResult = true; *hasResultType = true; break;
case OpGroupNonUniformShuffleXor: *hasResult = true; *hasResultType = true; break;
case OpGroupNonUniformShuffleUp: *hasResult = true; *hasResultType = true; break;
case OpGroupNonUniformShuffleDown: *hasResult = true; *hasResultType = true; break;
case OpGroupNonUniformIAdd: *hasResult = true; *hasResultType = true; break;
case OpGroupNonUniformFAdd: *hasResult = true; *hasResultType = true; break;
case OpGroupNonUniformIMul: *hasResult = true; *hasResultType = true; break;
case OpGroupNonUniformFMul: *hasResult = true; *hasResultType = true; break;
case OpGroupNonUniformSMin: *hasResult = true; *hasResultType = true; break;
case OpGroupNonUniformUMin: *hasResult = true; *hasResultType = true; break;
case OpGroupNonUniformFMin: *hasResult = true; *hasResultType = true; break;
case OpGroupNonUniformSMax: *hasResult = true; *hasResultType = true; break;
case OpGroupNonUniformUMax: *hasResult = true; *hasResultType = true; break;
case OpGroupNonUniformFMax: *hasResult = true; *hasResultType = true; break;
case OpGroupNonUniformBitwiseAnd: *hasResult = true; *hasResultType = true; break;
case OpGroupNonUniformBitwiseOr: *hasResult = true; *hasResultType = true; break;
case OpGroupNonUniformBitwiseXor: *hasResult = true; *hasResultType = true; break;
case OpGroupNonUniformLogicalAnd: *hasResult = true; *hasResultType = true; break;
case OpGroupNonUniformLogicalOr: *hasResult = true; *hasResultType = true; break;
case OpGroupNonUniformLogicalXor: *hasResult = true; *hasResultType = true; break;
case OpGroupNonUniformQuadBroadcast: *hasResult = true; *hasResultType = true; break;
case OpGroupNonUniformQuadSwap: *hasResult = true; *hasResultType = true; break;
case OpCopyLogical: *hasResult = true; *hasResultType = true; break;
case OpPtrEqual: *hasResult = true; *hasResultType = true; break;
case OpPtrNotEqual: *hasResult = true; *hasResultType = true; break;
case OpPtrDiff: *hasResult = true; *hasResultType = true; break;
case OpSubgroupBallotKHR: *hasResult = true; *hasResultType = true; break;
case OpSubgroupFirstInvocationKHR: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAllKHR: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAnyKHR: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAllEqualKHR: *hasResult = true; *hasResultType = true; break;
case OpSubgroupReadInvocationKHR: *hasResult = true; *hasResultType = true; break;
case OpGroupIAddNonUniformAMD: *hasResult = true; *hasResultType = true; break;
case OpGroupFAddNonUniformAMD: *hasResult = true; *hasResultType = true; break;
case OpGroupFMinNonUniformAMD: *hasResult = true; *hasResultType = true; break;
case OpGroupUMinNonUniformAMD: *hasResult = true; *hasResultType = true; break;
case OpGroupSMinNonUniformAMD: *hasResult = true; *hasResultType = true; break;
case OpGroupFMaxNonUniformAMD: *hasResult = true; *hasResultType = true; break;
case OpGroupUMaxNonUniformAMD: *hasResult = true; *hasResultType = true; break;
case OpGroupSMaxNonUniformAMD: *hasResult = true; *hasResultType = true; break;
case OpFragmentMaskFetchAMD: *hasResult = true; *hasResultType = true; break;
case OpFragmentFetchAMD: *hasResult = true; *hasResultType = true; break;
case OpReadClockKHR: *hasResult = true; *hasResultType = true; break;
case OpImageSampleFootprintNV: *hasResult = true; *hasResultType = true; break;
case OpGroupNonUniformPartitionNV: *hasResult = true; *hasResultType = true; break;
case OpWritePackedPrimitiveIndices4x8NV: *hasResult = false; *hasResultType = false; break;
case OpReportIntersectionNV: *hasResult = true; *hasResultType = true; break;
case OpIgnoreIntersectionNV: *hasResult = false; *hasResultType = false; break;
case OpTerminateRayNV: *hasResult = false; *hasResultType = false; break;
case OpTraceNV: *hasResult = false; *hasResultType = false; break;
case OpTypeAccelerationStructureNV: *hasResult = true; *hasResultType = false; break;
case OpExecuteCallableNV: *hasResult = false; *hasResultType = false; break;
case OpTypeCooperativeMatrixNV: *hasResult = true; *hasResultType = false; break;
case OpCooperativeMatrixLoadNV: *hasResult = true; *hasResultType = true; break;
case OpCooperativeMatrixStoreNV: *hasResult = false; *hasResultType = false; break;
case OpCooperativeMatrixMulAddNV: *hasResult = true; *hasResultType = true; break;
case OpCooperativeMatrixLengthNV: *hasResult = true; *hasResultType = true; break;
case OpBeginInvocationInterlockEXT: *hasResult = false; *hasResultType = false; break;
case OpEndInvocationInterlockEXT: *hasResult = false; *hasResultType = false; break;
case OpDemoteToHelperInvocationEXT: *hasResult = false; *hasResultType = false; break;
case OpIsHelperInvocationEXT: *hasResult = true; *hasResultType = true; break;
case OpSubgroupShuffleINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupShuffleDownINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupShuffleUpINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupShuffleXorINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupBlockReadINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupBlockWriteINTEL: *hasResult = false; *hasResultType = false; break;
case OpSubgroupImageBlockReadINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupImageBlockWriteINTEL: *hasResult = false; *hasResultType = false; break;
case OpSubgroupImageMediaBlockReadINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupImageMediaBlockWriteINTEL: *hasResult = false; *hasResultType = false; break;
case OpUCountLeadingZerosINTEL: *hasResult = true; *hasResultType = true; break;
case OpUCountTrailingZerosINTEL: *hasResult = true; *hasResultType = true; break;
case OpAbsISubINTEL: *hasResult = true; *hasResultType = true; break;
case OpAbsUSubINTEL: *hasResult = true; *hasResultType = true; break;
case OpIAddSatINTEL: *hasResult = true; *hasResultType = true; break;
case OpUAddSatINTEL: *hasResult = true; *hasResultType = true; break;
case OpIAverageINTEL: *hasResult = true; *hasResultType = true; break;
case OpUAverageINTEL: *hasResult = true; *hasResultType = true; break;
case OpIAverageRoundedINTEL: *hasResult = true; *hasResultType = true; break;
case OpUAverageRoundedINTEL: *hasResult = true; *hasResultType = true; break;
case OpISubSatINTEL: *hasResult = true; *hasResultType = true; break;
case OpUSubSatINTEL: *hasResult = true; *hasResultType = true; break;
case OpIMul32x16INTEL: *hasResult = true; *hasResultType = true; break;
case OpUMul32x16INTEL: *hasResult = true; *hasResultType = true; break;
case OpDecorateString: *hasResult = false; *hasResultType = false; break;
case OpMemberDecorateString: *hasResult = false; *hasResultType = false; break;
case OpVmeImageINTEL: *hasResult = true; *hasResultType = true; break;
case OpTypeVmeImageINTEL: *hasResult = true; *hasResultType = false; break;
case OpTypeAvcImePayloadINTEL: *hasResult = true; *hasResultType = false; break;
case OpTypeAvcRefPayloadINTEL: *hasResult = true; *hasResultType = false; break;
case OpTypeAvcSicPayloadINTEL: *hasResult = true; *hasResultType = false; break;
case OpTypeAvcMcePayloadINTEL: *hasResult = true; *hasResultType = false; break;
case OpTypeAvcMceResultINTEL: *hasResult = true; *hasResultType = false; break;
case OpTypeAvcImeResultINTEL: *hasResult = true; *hasResultType = false; break;
case OpTypeAvcImeResultSingleReferenceStreamoutINTEL: *hasResult = true; *hasResultType = false; break;
case OpTypeAvcImeResultDualReferenceStreamoutINTEL: *hasResult = true; *hasResultType = false; break;
case OpTypeAvcImeSingleReferenceStreaminINTEL: *hasResult = true; *hasResultType = false; break;
case OpTypeAvcImeDualReferenceStreaminINTEL: *hasResult = true; *hasResultType = false; break;
case OpTypeAvcRefResultINTEL: *hasResult = true; *hasResultType = false; break;
case OpTypeAvcSicResultINTEL: *hasResult = true; *hasResultType = false; break;
case OpSubgroupAvcMceGetDefaultInterBaseMultiReferencePenaltyINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcMceSetInterBaseMultiReferencePenaltyINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcMceGetDefaultInterShapePenaltyINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcMceSetInterShapePenaltyINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcMceGetDefaultInterDirectionPenaltyINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcMceSetInterDirectionPenaltyINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcMceGetDefaultIntraLumaShapePenaltyINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcMceGetDefaultInterMotionVectorCostTableINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcMceGetDefaultHighPenaltyCostTableINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcMceGetDefaultMediumPenaltyCostTableINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcMceGetDefaultLowPenaltyCostTableINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcMceSetMotionVectorCostFunctionINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcMceGetDefaultIntraLumaModePenaltyINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcMceGetDefaultNonDcLumaIntraPenaltyINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcMceGetDefaultIntraChromaModeBasePenaltyINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcMceSetAcOnlyHaarINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcMceSetSourceInterlacedFieldPolarityINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcMceSetSingleReferenceInterlacedFieldPolarityINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcMceSetDualReferenceInterlacedFieldPolaritiesINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcMceConvertToImePayloadINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcMceConvertToImeResultINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcMceConvertToRefPayloadINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcMceConvertToRefResultINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcMceConvertToSicPayloadINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcMceConvertToSicResultINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcMceGetMotionVectorsINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcMceGetInterDistortionsINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcMceGetBestInterDistortionsINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcMceGetInterMajorShapeINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcMceGetInterMinorShapeINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcMceGetInterDirectionsINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcMceGetInterMotionVectorCountINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcMceGetInterReferenceIdsINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcMceGetInterReferenceInterlacedFieldPolaritiesINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcImeInitializeINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcImeSetSingleReferenceINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcImeSetDualReferenceINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcImeRefWindowSizeINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcImeAdjustRefOffsetINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcImeConvertToMcePayloadINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcImeSetMaxMotionVectorCountINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcImeSetUnidirectionalMixDisableINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcImeSetEarlySearchTerminationThresholdINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcImeSetWeightedSadINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcImeEvaluateWithSingleReferenceINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcImeEvaluateWithDualReferenceINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcImeEvaluateWithSingleReferenceStreaminINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcImeEvaluateWithDualReferenceStreaminINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcImeEvaluateWithSingleReferenceStreamoutINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcImeEvaluateWithDualReferenceStreamoutINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcImeEvaluateWithSingleReferenceStreaminoutINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcImeEvaluateWithDualReferenceStreaminoutINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcImeConvertToMceResultINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcImeGetSingleReferenceStreaminINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcImeGetDualReferenceStreaminINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcImeStripSingleReferenceStreamoutINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcImeStripDualReferenceStreamoutINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeMotionVectorsINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeDistortionsINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcImeGetStreamoutSingleReferenceMajorShapeReferenceIdsINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeMotionVectorsINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeDistortionsINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcImeGetStreamoutDualReferenceMajorShapeReferenceIdsINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcImeGetBorderReachedINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcImeGetTruncatedSearchIndicationINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcImeGetUnidirectionalEarlySearchTerminationINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcImeGetWeightingPatternMinimumMotionVectorINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcImeGetWeightingPatternMinimumDistortionINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcFmeInitializeINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcBmeInitializeINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcRefConvertToMcePayloadINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcRefSetBidirectionalMixDisableINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcRefSetBilinearFilterEnableINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcRefEvaluateWithSingleReferenceINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcRefEvaluateWithDualReferenceINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcRefEvaluateWithMultiReferenceINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcRefEvaluateWithMultiReferenceInterlacedINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcRefConvertToMceResultINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcSicInitializeINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcSicConfigureSkcINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcSicConfigureIpeLumaINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcSicConfigureIpeLumaChromaINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcSicGetMotionVectorMaskINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcSicConvertToMcePayloadINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcSicSetIntraLumaShapePenaltyINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcSicSetIntraLumaModeCostFunctionINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcSicSetIntraChromaModeCostFunctionINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcSicSetBilinearFilterEnableINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcSicSetSkcForwardTransformEnableINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcSicSetBlockBasedRawSkipSadINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcSicEvaluateIpeINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcSicEvaluateWithSingleReferenceINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcSicEvaluateWithDualReferenceINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcSicEvaluateWithMultiReferenceINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcSicEvaluateWithMultiReferenceInterlacedINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcSicConvertToMceResultINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcSicGetIpeLumaShapeINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcSicGetBestIpeLumaDistortionINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcSicGetBestIpeChromaDistortionINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcSicGetPackedIpeLumaModesINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcSicGetIpeChromaModeINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcSicGetPackedSkcLumaCountThresholdINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcSicGetPackedSkcLumaSumThresholdINTEL: *hasResult = true; *hasResultType = true; break;
case OpSubgroupAvcSicGetInterRawSadsINTEL: *hasResult = true; *hasResultType = true; break;
}
}
#endif /* SPV_ENABLE_UTILITY_CODE */
// Overload operator| for mask bit combining
inline ImageOperandsMask operator|(ImageOperandsMask a, ImageOperandsMask b) { return ImageOperandsMask(unsigned(a) | unsigned(b)); }

View file

@ -83,6 +83,7 @@ const MemorySemanticsMask MemorySemanticsAllMemory =
struct IdImmediate {
bool isId; // true if word is an Id, false if word is an immediate
unsigned word;
IdImmediate(bool i, unsigned w) : isId(i), word(w) {}
};
//
@ -225,6 +226,36 @@ public:
return nullptr;
}
// Change this block into a canonical dead merge block. Delete instructions
// as necessary. A canonical dead merge block has only an OpLabel and an
// OpUnreachable.
void rewriteAsCanonicalUnreachableMerge() {
assert(localVariables.empty());
// Delete all instructions except for the label.
assert(instructions.size() > 0);
instructions.resize(1);
successors.clear();
Instruction* unreachable = new Instruction(OpUnreachable);
addInstruction(std::unique_ptr<Instruction>(unreachable));
}
// Change this block into a canonical dead continue target branching to the
// given header ID. Delete instructions as necessary. A canonical dead continue
// target has only an OpLabel and an unconditional branch back to the corresponding
// header.
void rewriteAsCanonicalUnreachableContinue(Block* header) {
assert(localVariables.empty());
// Delete all instructions except for the label.
assert(instructions.size() > 0);
instructions.resize(1);
successors.clear();
// Add OpBranch back to the header.
assert(header != nullptr);
Instruction* branch = new Instruction(OpBranch);
branch->addIdOperand(header->getId());
addInstruction(std::unique_ptr<Instruction>(branch));
successors.push_back(header);
}
bool isTerminated() const
{
switch (instructions.back()->getOpCode()) {
@ -234,6 +265,7 @@ public:
case OpKill:
case OpReturn:
case OpReturnValue:
case OpUnreachable:
return true;
default:
return false;
@ -267,10 +299,24 @@ protected:
bool unreachable;
};
// The different reasons for reaching a block in the inReadableOrder traversal.
enum ReachReason {
// Reachable from the entry block via transfers of control, i.e. branches.
ReachViaControlFlow = 0,
// A continue target that is not reachable via control flow.
ReachDeadContinue,
// A merge block that is not reachable via control flow.
ReachDeadMerge
};
// Traverses the control-flow graph rooted at root in an order suited for
// readable code generation. Invokes callback at every node in the traversal
// order.
void inReadableOrder(Block* root, std::function<void(Block*)> callback);
// order. The callback arguments are:
// - the block,
// - the reason we reached the block,
// - if the reason was that block is an unreachable continue or unreachable merge block
// then the last parameter is the corresponding header block.
void inReadableOrder(Block* root, std::function<void(Block*, ReachReason, Block* header)> callback);
//
// SPIR-V IR Function.
@ -320,7 +366,7 @@ public:
parameterInstructions[p]->dump(out);
// Blocks
inReadableOrder(blocks[0], [&out](const Block* b) { b->dump(out); });
inReadableOrder(blocks[0], [&out](const Block* b, ReachReason, Block*) { b->dump(out); });
Instruction end(0, 0, OpFunctionEnd);
end.dump(out);
}
@ -435,6 +481,6 @@ __inline void Block::addInstruction(std::unique_ptr<Instruction> inst)
parent.getParent().mapInstruction(raw_instruction);
}
}; // end spv namespace
} // end spv namespace
#endif // spvIR_H