Deleted old unused OpenGL and Vulkan code that lead up to confusion

This commit is contained in:
Robert Beckebans 2024-10-24 11:58:56 +02:00
parent d6dabb6869
commit 6f2aace910
18 changed files with 0 additions and 23164 deletions

View file

@ -1,695 +0,0 @@
/*
===========================================================================
Doom 3 BFG Edition GPL Source Code
Copyright (C) 1993-2012 id Software LLC, a ZeniMax Media company.
Copyright (C) 2013 Robert Beckebans
Copyright (C) 2016-2017 Dustin Land
This file is part of the Doom 3 BFG Edition GPL Source Code ("Doom 3 BFG Edition Source Code").
Doom 3 BFG Edition Source Code is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Doom 3 BFG Edition Source Code is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Doom 3 BFG Edition Source Code. If not, see <http://www.gnu.org/licenses/>.
In addition, the Doom 3 BFG Edition Source Code is also subject to certain additional terms. You should have received a copy of these additional terms immediately following the terms and conditions of the GNU General Public License which accompanied the Doom 3 BFG Edition Source Code. If not, please request a copy in writing from id Software at the address below.
If you have questions concerning this license or the applicable additional terms, you may contact in writing id Software LLC, c/o ZeniMax Media Inc., Suite 120, Rockville, Maryland 20850 USA.
===========================================================================
*/
#include "precompiled.h"
#pragma hdrstop
#include "../RenderCommon.h"
extern idCVar r_showBuffers;
//static const GLenum bufferUsage = GL_STATIC_DRAW;
static const GLenum bufferUsage = GL_DYNAMIC_DRAW;
/*
================================================================================================
Buffer Objects
================================================================================================
*/
/*
========================
UnbindBufferObjects
========================
*/
void UnbindBufferObjects()
{
glBindBuffer( GL_ARRAY_BUFFER, 0 );
glBindBuffer( GL_ELEMENT_ARRAY_BUFFER, 0 );
}
/*
================================================================================================
idVertexBuffer
================================================================================================
*/
/*
========================
idVertexBuffer::idVertexBuffer
========================
*/
idVertexBuffer::idVertexBuffer()
{
size = 0;
offsetInOtherBuffer = OWNS_BUFFER_FLAG;
apiObject = 0xFFFF;
SetUnmapped();
}
/*
========================
idVertexBuffer::AllocBufferObject
========================
*/
bool idVertexBuffer::AllocBufferObject( const void* data, int allocSize, bufferUsageType_t _usage )
{
assert( apiObject == 0xFFFF );
assert_16_byte_aligned( data );
if( allocSize <= 0 )
{
idLib::Error( "idVertexBuffer::AllocBufferObject: allocSize = %i", allocSize );
}
size = allocSize;
usage = _usage;
bool allocationFailed = false;
int numBytes = GetAllocedSize();
// clear out any previous error
GL_CheckErrors();
glGenBuffers( 1, ( GLuint* ) &apiObject );
if( apiObject == 0xFFFF )
{
idLib::FatalError( "idVertexBuffer::AllocBufferObject: failed" );
}
glBindBuffer( GL_ARRAY_BUFFER, apiObject );
// these are rewritten every frame
glBufferDataARB( GL_ARRAY_BUFFER, numBytes, NULL, bufferUsage );
GLenum err = glGetError();
if( err == GL_OUT_OF_MEMORY )
{
idLib::Warning( "idVertexBuffer::AllocBufferObject: allocation failed" );
allocationFailed = true;
}
if( r_showBuffers.GetBool() )
{
idLib::Printf( "vertex buffer alloc %p, api %p (%i bytes)\n", this, ( GLuint* )&apiObject, GetSize() );
}
// copy the data
if( data != NULL )
{
Update( data, allocSize );
}
return !allocationFailed;
}
/*
========================
idVertexBuffer::FreeBufferObject
========================
*/
void idVertexBuffer::FreeBufferObject()
{
if( IsMapped() )
{
UnmapBuffer();
}
// if this is a sub-allocation inside a larger buffer, don't actually free anything.
if( OwnsBuffer() == false )
{
ClearWithoutFreeing();
return;
}
if( apiObject == 0xFFFF )
{
return;
}
if( r_showBuffers.GetBool() )
{
idLib::Printf( "vertex buffer free %p, api %p (%i bytes)\n", this, ( GLuint* )&apiObject, GetSize() );
}
glDeleteBuffers( 1, ( GLuint* )&apiObject );
ClearWithoutFreeing();
}
/*
========================
idVertexBuffer::Update
========================
*/
void idVertexBuffer::Update( const void* data, int updateSize, int offset ) const
{
assert( apiObject != 0xFFFF );
assert_16_byte_aligned( data );
assert( ( GetOffset() & 15 ) == 0 );
if( updateSize > GetSize() )
{
idLib::FatalError( "idVertexBuffer::Update: size overrun, %i > %i\n", updateSize, GetSize() );
}
int numBytes = ( updateSize + 15 ) & ~15;
if( usage == BU_DYNAMIC )
{
CopyBuffer( ( byte* )buffer + offset, ( const byte* )data, numBytes );
}
else
{
glBindBuffer( GL_ARRAY_BUFFER, apiObject );
glBufferSubData( GL_ARRAY_BUFFER, GetOffset() + offset, ( GLsizeiptrARB )numBytes, data );
}
}
/*
========================
idVertexBuffer::MapBuffer
========================
*/
void* idVertexBuffer::MapBuffer( bufferMapType_t mapType )
{
assert( apiObject != 0xFFFF );
assert( IsMapped() == false );
buffer = NULL;
glBindBuffer( GL_ARRAY_BUFFER, apiObject );
if( mapType == BM_READ )
{
buffer = glMapBufferRange( GL_ARRAY_BUFFER_ARB, 0, GetAllocedSize(), GL_MAP_READ_BIT | GL_MAP_UNSYNCHRONIZED_BIT );
if( buffer != NULL )
{
buffer = ( byte* )buffer + GetOffset();
}
}
else if( mapType == BM_WRITE )
{
// RB: removed GL_MAP_INVALIDATE_RANGE_BIT as it breaks with an optimization in the Nvidia WHQL drivers >= 344.11
buffer = glMapBufferRange( GL_ARRAY_BUFFER, 0, GetAllocedSize(), GL_MAP_WRITE_BIT /*| GL_MAP_INVALIDATE_RANGE_BIT*/ | GL_MAP_UNSYNCHRONIZED_BIT );
if( buffer != NULL )
{
buffer = ( byte* )buffer + GetOffset();
}
// assert( IsWriteCombined( buffer ) ); // commented out because it spams the console
}
else
{
assert( false );
}
SetMapped();
if( buffer == NULL )
{
idLib::FatalError( "idVertexBuffer::MapBuffer: failed" );
}
return buffer;
}
/*
========================
idVertexBuffer::UnmapBuffer
========================
*/
void idVertexBuffer::UnmapBuffer()
{
assert( apiObject != 0xFFFF );
assert( IsMapped() );
glBindBuffer( GL_ARRAY_BUFFER, apiObject );
if( !glUnmapBuffer( GL_ARRAY_BUFFER ) )
{
idLib::Printf( "idVertexBuffer::UnmapBuffer failed\n" );
}
SetUnmapped();
}
/*
========================
idVertexBuffer::ClearWithoutFreeing
========================
*/
void idVertexBuffer::ClearWithoutFreeing()
{
size = 0;
offsetInOtherBuffer = OWNS_BUFFER_FLAG;
apiObject = 0xFFFF;
}
/*
================================================================================================
idIndexBuffer
================================================================================================
*/
/*
========================
idIndexBuffer::idIndexBuffer
========================
*/
idIndexBuffer::idIndexBuffer()
{
size = 0;
offsetInOtherBuffer = OWNS_BUFFER_FLAG;
apiObject = 0xFFFF;
SetUnmapped();
}
/*
========================
idIndexBuffer::AllocBufferObject
========================
*/
bool idIndexBuffer::AllocBufferObject( const void* data, int allocSize, bufferUsageType_t _usage )
{
assert( apiObject == 0xFFFF );
assert_16_byte_aligned( data );
if( allocSize <= 0 )
{
idLib::Error( "idIndexBuffer::AllocBufferObject: allocSize = %i", allocSize );
}
size = allocSize;
usage = _usage;
bool allocationFailed = false;
int numBytes = GetAllocedSize();
// clear out any previous error
GL_CheckErrors();
glGenBuffersARB( 1, ( GLuint* )&apiObject );
if( apiObject == 0xFFFF )
{
GLenum error = glGetError();
idLib::FatalError( "idIndexBuffer::AllocBufferObject: failed - GL_Error %d", error );
}
glBindBuffer( GL_ELEMENT_ARRAY_BUFFER, apiObject );
// these are rewritten every frame
glBufferData( GL_ELEMENT_ARRAY_BUFFER, numBytes, NULL, bufferUsage );
GLenum err = glGetError();
if( err == GL_OUT_OF_MEMORY )
{
idLib::Warning( "idIndexBuffer:AllocBufferObject: allocation failed" );
allocationFailed = true;
}
if( r_showBuffers.GetBool() )
{
idLib::Printf( "index buffer alloc %p, api %p (%i bytes)\n", this, ( GLuint* )&apiObject, GetSize() );
}
// copy the data
if( data != NULL )
{
Update( data, allocSize );
}
return !allocationFailed;
}
/*
========================
idIndexBuffer::FreeBufferObject
========================
*/
void idIndexBuffer::FreeBufferObject()
{
if( IsMapped() )
{
UnmapBuffer();
}
// if this is a sub-allocation inside a larger buffer, don't actually free anything.
if( OwnsBuffer() == false )
{
ClearWithoutFreeing();
return;
}
if( apiObject == 0xFFFF )
{
return;
}
if( r_showBuffers.GetBool() )
{
idLib::Printf( "index buffer free %p, api %p (%i bytes)\n", this, ( GLuint* )&apiObject, GetSize() );
}
glDeleteBuffers( 1, ( GLuint* )&apiObject );
ClearWithoutFreeing();
}
/*
========================
idIndexBuffer::Update
========================
*/
void idIndexBuffer::Update( const void* data, int updateSize, int offset ) const
{
assert( apiObject != 0xFFFF );
assert_16_byte_aligned( data );
assert( ( GetOffset() & 15 ) == 0 );
if( updateSize > GetSize() )
{
idLib::FatalError( "idIndexBuffer::Update: size overrun, %i > %i\n", updateSize, GetSize() );
}
int numBytes = ( updateSize + 15 ) & ~15;
if( usage == BU_DYNAMIC )
{
CopyBuffer( ( byte* )buffer + offset, ( const byte* )data, numBytes );
}
else
{
glBindBuffer( GL_ELEMENT_ARRAY_BUFFER, apiObject );
glBufferSubData( GL_ELEMENT_ARRAY_BUFFER, GetOffset() + offset, ( GLsizeiptrARB )numBytes, data );
}
}
/*
========================
idIndexBuffer::MapBuffer
========================
*/
void* idIndexBuffer::MapBuffer( bufferMapType_t mapType )
{
assert( apiObject != 0xFFFF );
assert( IsMapped() == false );
buffer = NULL;
glBindBuffer( GL_ELEMENT_ARRAY_BUFFER, apiObject );
if( mapType == BM_READ )
{
//buffer = glMapBufferARB( GL_ELEMENT_ARRAY_BUFFER_ARB, GL_READ_ONLY_ARB );
buffer = glMapBufferRange( GL_ELEMENT_ARRAY_BUFFER, 0, GetAllocedSize(), GL_MAP_READ_BIT | GL_MAP_UNSYNCHRONIZED_BIT );
if( buffer != NULL )
{
buffer = ( byte* )buffer + GetOffset();
}
}
else if( mapType == BM_WRITE )
{
//buffer = glMapBufferARB( GL_ELEMENT_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB );
// RB: removed GL_MAP_INVALIDATE_RANGE_BIT as it breaks with an optimization in the Nvidia WHQL drivers >= 344.11
buffer = glMapBufferRange( GL_ELEMENT_ARRAY_BUFFER, 0, GetAllocedSize(), GL_MAP_WRITE_BIT /*| GL_MAP_INVALIDATE_RANGE_BIT*/ | GL_MAP_UNSYNCHRONIZED_BIT );
if( buffer != NULL )
{
buffer = ( byte* )buffer + GetOffset();
}
// assert( IsWriteCombined( buffer ) ); // commented out because it spams the console
}
else
{
assert( false );
}
SetMapped();
if( buffer == NULL )
{
idLib::FatalError( "idIndexBuffer::MapBuffer: failed" );
}
return buffer;
}
/*
========================
idIndexBuffer::UnmapBuffer
========================
*/
void idIndexBuffer::UnmapBuffer()
{
assert( apiObject != 0xFFFF );
assert( IsMapped() );
glBindBuffer( GL_ELEMENT_ARRAY_BUFFER, apiObject );
if( !glUnmapBuffer( GL_ELEMENT_ARRAY_BUFFER ) )
{
idLib::Printf( "idIndexBuffer::UnmapBuffer failed\n" );
}
buffer = NULL;
SetUnmapped();
}
/*
========================
idIndexBuffer::ClearWithoutFreeing
========================
*/
void idIndexBuffer::ClearWithoutFreeing()
{
size = 0;
offsetInOtherBuffer = OWNS_BUFFER_FLAG;
apiObject = 0xFFFF;
}
/*
================================================================================================
idUniformBuffer
================================================================================================
*/
/*
========================
idUniformBuffer::idUniformBuffer
========================
*/
idUniformBuffer::idUniformBuffer()
{
size = 0;
offsetInOtherBuffer = OWNS_BUFFER_FLAG;
apiObject = 0xFFFF;
SetUnmapped();
}
/*
========================
idUniformBuffer::AllocBufferObject
========================
*/
bool idUniformBuffer::AllocBufferObject( const void* data, int allocSize, bufferUsageType_t _usage )
{
assert( apiObject == 0xFFFF );
assert_16_byte_aligned( data );
if( allocSize <= 0 )
{
idLib::Error( "idUniformBuffer::AllocBufferObject: allocSize = %i", allocSize );
}
size = allocSize;
usage = _usage;
bool allocationFailed = false;
const int numBytes = GetAllocedSize();
glGenBuffers( 1, ( GLuint* )&apiObject );
glBindBuffer( GL_UNIFORM_BUFFER, apiObject );
glBufferData( GL_UNIFORM_BUFFER, numBytes, NULL, GL_STREAM_DRAW_ARB );
glBindBuffer( GL_UNIFORM_BUFFER, 0 );
if( r_showBuffers.GetBool() )
{
idLib::Printf( "joint buffer alloc %p, api %p (%i joints)\n", this, ( GLuint* )&apiObject, GetSize() );
}
// copy the data
if( data != NULL )
{
Update( data, allocSize );
}
return !allocationFailed;
}
/*
========================
idUniformBuffer::FreeBufferObject
========================
*/
void idUniformBuffer::FreeBufferObject()
{
if( IsMapped() )
{
UnmapBuffer();
}
// if this is a sub-allocation inside a larger buffer, don't actually free anything.
if( OwnsBuffer() == false )
{
ClearWithoutFreeing();
return;
}
if( apiObject == 0xFFFF )
{
return;
}
if( r_showBuffers.GetBool() )
{
idLib::Printf( "joint buffer free %p, api %p (%i size)\n", this, ( GLuint* )&apiObject, GetSize() );
}
glBindBuffer( GL_UNIFORM_BUFFER, 0 );
glDeleteBuffers( 1, ( GLuint* )&apiObject );
ClearWithoutFreeing();
}
/*
========================
idUniformBuffer::Update
========================
*/
void idUniformBuffer::Update( const void* data, int updateSize, int offset ) const
{
assert( apiObject != 0xFFFF );
assert_16_byte_aligned( data );
assert( ( GetOffset() & 15 ) == 0 );
if( updateSize > GetSize() )
{
idLib::FatalError( "idUniformBuffer::Update: size overrun, %i > %i\n", updateSize, GetSize() );
}
const int numBytes = ( updateSize + 15 ) & ~15;
if( usage == BU_DYNAMIC )
{
CopyBuffer( ( byte* )buffer + offset, ( const byte* )data, numBytes );
}
else
{
glBindBuffer( GL_ARRAY_BUFFER, apiObject );
glBufferSubData( GL_ARRAY_BUFFER, GetOffset() + offset, ( GLsizeiptr )numBytes, data );
}
}
/*
========================
idUniformBuffer::MapBuffer
========================
*/
void* idUniformBuffer::MapBuffer( bufferMapType_t mapType )
{
assert( IsMapped() == false );
assert( mapType == BM_WRITE );
assert( apiObject != 0xFFFF );
int numBytes = GetAllocedSize();
buffer = NULL;
glBindBuffer( GL_UNIFORM_BUFFER, apiObject );
numBytes = numBytes;
assert( GetOffset() == 0 );
// RB: removed GL_MAP_INVALIDATE_RANGE_BIT as it breaks with an optimization in the Nvidia WHQL drivers >= 344.11
buffer = glMapBufferRange( GL_UNIFORM_BUFFER, 0, GetAllocedSize(), GL_MAP_WRITE_BIT /*| GL_MAP_INVALIDATE_RANGE_BIT*/ | GL_MAP_UNSYNCHRONIZED_BIT );
if( buffer != NULL )
{
buffer = ( byte* )buffer + GetOffset();
}
SetMapped();
if( buffer == NULL )
{
idLib::FatalError( "idUniformBuffer::MapBuffer: failed" );
}
return ( float* ) buffer;
}
/*
========================
idUniformBuffer::UnmapBuffer
========================
*/
void idUniformBuffer::UnmapBuffer()
{
assert( apiObject != 0xFFFF );
assert( IsMapped() );
glBindBuffer( GL_UNIFORM_BUFFER, apiObject );
if( !glUnmapBuffer( GL_UNIFORM_BUFFER ) )
{
idLib::Printf( "idUniformBuffer::UnmapBuffer failed\n" );
}
buffer = NULL;
SetUnmapped();
}
/*
========================
idUniformBuffer::ClearWithoutFreeing
========================
*/
void idUniformBuffer::ClearWithoutFreeing()
{
size = 0;
offsetInOtherBuffer = OWNS_BUFFER_FLAG;
apiObject = 0xFFFF;
}

View file

@ -1,622 +0,0 @@
/*
===========================================================================
Doom 3 BFG Edition GPL Source Code
Copyright (C) 2014-2020 Robert Beckebans
This file is part of the Doom 3 BFG Edition GPL Source Code ("Doom 3 BFG Edition Source Code").
Doom 3 BFG Edition Source Code is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Doom 3 BFG Edition Source Code is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Doom 3 BFG Edition Source Code. If not, see <http://www.gnu.org/licenses/>.
In addition, the Doom 3 BFG Edition Source Code is also subject to certain additional terms. You should have received a copy of these additional terms immediately following the terms and conditions of the GNU General Public License which accompanied the Doom 3 BFG Edition Source Code. If not, please request a copy in writing from id Software at the address below.
If you have questions concerning this license or the applicable additional terms, you may contact in writing id Software LLC, c/o ZeniMax Media Inc., Suite 120, Rockville, Maryland 20850 USA.
===========================================================================
*/
#include "precompiled.h"
#pragma hdrstop
#include "../RenderCommon.h"
#include "../Framebuffer.h"
#if !defined(USE_VULKAN)
static void R_ListFramebuffers_f( const idCmdArgs& args )
{
if( !glConfig.framebufferObjectAvailable )
{
common->Printf( "GL_EXT_framebuffer_object is not available.\n" );
return;
}
}
Framebuffer::Framebuffer( const char* name, int w, int h )
{
fboName = name;
frameBuffer = 0;
memset( colorBuffers, 0, sizeof( colorBuffers ) );
colorFormat = 0;
depthBuffer = 0;
depthFormat = 0;
stencilBuffer = 0;
stencilFormat = 0;
width = w;
height = h;
msaaSamples = false;
glGenFramebuffers( 1, &frameBuffer );
framebuffers.Append( this );
}
Framebuffer::~Framebuffer()
{
glDeleteFramebuffers( 1, &frameBuffer );
}
void Framebuffer::Init()
{
cmdSystem->AddCommand( "listFramebuffers", R_ListFramebuffers_f, CMD_FL_RENDERER, "lists framebuffers" );
tr.backend.currentFramebuffer = NULL;
// SHADOWMAPS
int width, height;
width = height = r_shadowMapImageSize.GetInteger();
for( int i = 0; i < MAX_SHADOWMAP_RESOLUTIONS; i++ )
{
width = height = shadowMapResolutions[i];
globalFramebuffers.shadowFBO[i] = new Framebuffer( va( "_shadowMap%i", i ) , width, height );
globalFramebuffers.shadowFBO[i]->Bind();
glDrawBuffers( 0, NULL );
}
// HDR
int screenWidth = renderSystem->GetWidth();
int screenHeight = renderSystem->GetHeight();
globalFramebuffers.hdrFBO = new Framebuffer( "_hdr", screenWidth, screenHeight );
globalFramebuffers.hdrFBO->Bind();
#if defined(USE_HDR_MSAA)
if( glConfig.multisamples )
{
globalFramebuffers.hdrFBO->AddColorBuffer( GL_RGBA16F, 0, glConfig.multisamples );
globalFramebuffers.hdrFBO->AddDepthBuffer( GL_DEPTH24_STENCIL8, glConfig.multisamples );
globalFramebuffers.hdrFBO->AttachImage2D( GL_TEXTURE_2D_MULTISAMPLE, globalImages->currentRenderHDRImage, 0 );
globalFramebuffers.hdrFBO->AttachImageDepth( GL_TEXTURE_2D_MULTISAMPLE, globalImages->currentDepthImage );
}
else
#endif
{
globalFramebuffers.hdrFBO->AddColorBuffer( GL_RGBA16F, 0 );
globalFramebuffers.hdrFBO->AddDepthBuffer( GL_DEPTH24_STENCIL8 );
globalFramebuffers.hdrFBO->AttachImage2D( GL_TEXTURE_2D, globalImages->currentRenderHDRImage, 0 );
globalFramebuffers.hdrFBO->AttachImageDepth( GL_TEXTURE_2D, globalImages->currentDepthImage );
}
globalFramebuffers.hdrFBO->Check();
// HDR no MSAA
#if defined(USE_HDR_MSAA)
globalFramebuffers.hdrNonMSAAFBO = new Framebuffer( "_hdrNoMSAA", screenWidth, screenHeight );
globalFramebuffers.hdrNonMSAAFBO->Bind();
globalFramebuffers.hdrNonMSAAFBO->AddColorBuffer( GL_RGBA16F, 0 );
globalFramebuffers.hdrNonMSAAFBO->AttachImage2D( GL_TEXTURE_2D, globalImages->currentRenderHDRImageNoMSAA, 0 );
globalFramebuffers.hdrNonMSAAFBO->Check();
#endif
// HDR CUBEMAP CAPTURE
globalFramebuffers.envprobeFBO = new Framebuffer( "_envprobeRender", ENVPROBE_CAPTURE_SIZE, ENVPROBE_CAPTURE_SIZE );
globalFramebuffers.envprobeFBO->Bind();
globalFramebuffers.envprobeFBO->AddColorBuffer( GL_RGBA16F, 0 );
globalFramebuffers.envprobeFBO->AddDepthBuffer( GL_DEPTH24_STENCIL8 );
globalFramebuffers.envprobeFBO->AttachImage2D( GL_TEXTURE_2D, globalImages->envprobeHDRImage, 0 );
globalFramebuffers.envprobeFBO->AttachImageDepth( GL_TEXTURE_2D, globalImages->envprobeDepthImage );
globalFramebuffers.envprobeFBO->Check();
// HDR DOWNSCALE
globalFramebuffers.hdr64FBO = new Framebuffer( "_hdr64", 64, 64 );
globalFramebuffers.hdr64FBO->Bind();
globalFramebuffers.hdr64FBO->AddColorBuffer( GL_RGBA16F, 0 );
globalFramebuffers.hdr64FBO->AttachImage2D( GL_TEXTURE_2D, globalImages->currentRenderHDRImage64, 0 );
globalFramebuffers.hdr64FBO->Check();
// BLOOM
for( int i = 0; i < MAX_BLOOM_BUFFERS; i++ )
{
globalFramebuffers.bloomRenderFBO[i] = new Framebuffer( va( "_bloomRender%i", i ), screenWidth, screenHeight );
globalFramebuffers.bloomRenderFBO[i]->Bind();
globalFramebuffers.bloomRenderFBO[i]->AddColorBuffer( GL_RGBA8, 0 );
globalFramebuffers.bloomRenderFBO[i]->AttachImage2D( GL_TEXTURE_2D, globalImages->bloomRenderImage[i], 0 );
globalFramebuffers.bloomRenderFBO[i]->Check();
}
// TODO(Stephen): change the name from glow to something else. Or make these dynamic.
// General framebuffers
for( int i = 0; i < MAX_GLOW_BUFFERS; i++ )
{
globalFramebuffers.glowFBO[i] = new Framebuffer( va( "_glowImage%i", i ), screenWidth, screenHeight );
globalFramebuffers.glowFBO[i]->Bind();
globalFramebuffers.glowFBO[i]->AddColorBuffer( GL_RGBA8, 0 );
globalFramebuffers.glowFBO[i]->AttachImage2D( GL_TEXTURE_2D, globalImages->glowImage[i], 0 );
globalFramebuffers.glowFBO[i]->AddStencilBuffer( GL_STENCIL_INDEX ); // stencil buffer for gui masks
//globalFramebuffers.glowFBO[i]->AddDepthBuffer( GL_DEPTH24_STENCIL8 ); // probably don't need depth?
//globalFramebuffers.envprobeFBO->AttachImageDepth( GL_TEXTURE_2D, globalImages->glowDepthImage[i] );
globalFramebuffers.glowFBO[i]->Check();
}
// TRANSPARENCY OIT
globalFramebuffers.transparencyFBO = new Framebuffer( "_transparencyImage", screenWidth, screenHeight );
globalFramebuffers.transparencyFBO->Bind();
globalFramebuffers.transparencyFBO->AddColorBuffer( GL_RGBA16F, 0 );
globalFramebuffers.transparencyFBO->AttachImage2D( GL_TEXTURE_2D, globalImages->accumTransparencyImage, 0 );
globalFramebuffers.transparencyFBO->AddColorBuffer( GL_R8, 1 );
globalFramebuffers.transparencyFBO->AttachImage2D( GL_TEXTURE_2D, globalImages->revealTransparencyImage, 1 );
globalFramebuffers.transparencyFBO->Check();
// AMBIENT OCCLUSION
for( int i = 0; i < MAX_SSAO_BUFFERS; i++ )
{
globalFramebuffers.ambientOcclusionFBO[i] = new Framebuffer( va( "_aoRender%i", i ), screenWidth, screenHeight );
globalFramebuffers.ambientOcclusionFBO[i]->Bind();
globalFramebuffers.ambientOcclusionFBO[i]->AddColorBuffer( GL_RGBA8, 0 );
globalFramebuffers.ambientOcclusionFBO[i]->AttachImage2D( GL_TEXTURE_2D, globalImages->ambientOcclusionImage[i], 0 );
globalFramebuffers.ambientOcclusionFBO[i]->Check();
}
// HIERARCHICAL Z BUFFER
for( int i = 0; i < MAX_HIERARCHICAL_ZBUFFERS; i++ )
{
globalFramebuffers.csDepthFBO[i] = new Framebuffer( va( "_csz%i", i ), screenWidth / ( 1 << i ), screenHeight / ( 1 << i ) );
globalFramebuffers.csDepthFBO[i]->Bind();
globalFramebuffers.csDepthFBO[i]->AddColorBuffer( GL_R32F, 0 );
globalFramebuffers.csDepthFBO[i]->AttachImage2D( GL_TEXTURE_2D, globalImages->hierarchicalZbufferImage, 0, i );
globalFramebuffers.csDepthFBO[i]->Check();
}
// GEOMETRY BUFFER
globalFramebuffers.geometryBufferFBO = new Framebuffer( "_gbuffer", screenWidth, screenHeight );
globalFramebuffers.geometryBufferFBO->Bind();
globalFramebuffers.geometryBufferFBO->AddColorBuffer( GL_RGBA16F, 0 );
globalFramebuffers.geometryBufferFBO->AddDepthBuffer( GL_DEPTH24_STENCIL8 );
// it is ideal to share the depth buffer between the HDR main context and the geometry render target
globalFramebuffers.geometryBufferFBO->AttachImage2D( GL_TEXTURE_2D, globalImages->currentNormalsImage, 0 );
globalFramebuffers.geometryBufferFBO->AttachImageDepth( GL_TEXTURE_2D, globalImages->currentDepthImage );
globalFramebuffers.geometryBufferFBO->Check();
// SMAA
globalFramebuffers.smaaEdgesFBO = new Framebuffer( "_smaaEdges", screenWidth, screenHeight );
globalFramebuffers.smaaEdgesFBO->Bind();
globalFramebuffers.smaaEdgesFBO->AddColorBuffer( GL_RGBA8, 0 );
globalFramebuffers.smaaEdgesFBO->AttachImage2D( GL_TEXTURE_2D, globalImages->smaaEdgesImage, 0 );
globalFramebuffers.smaaEdgesFBO->Check();
globalFramebuffers.smaaBlendFBO = new Framebuffer( "_smaaBlend", screenWidth, screenHeight );
globalFramebuffers.smaaBlendFBO->Bind();
globalFramebuffers.smaaBlendFBO->AddColorBuffer( GL_RGBA8, 0 );
globalFramebuffers.smaaBlendFBO->AttachImage2D( GL_TEXTURE_2D, globalImages->smaaBlendImage, 0 );
globalFramebuffers.smaaBlendFBO->Check();
Unbind();
}
void Framebuffer::CheckFramebuffers()
{
int screenWidth = renderSystem->GetWidth();
int screenHeight = renderSystem->GetHeight();
if( globalFramebuffers.hdrFBO->GetWidth() != screenWidth || globalFramebuffers.hdrFBO->GetHeight() != screenHeight )
{
Unbind();
// HDR
globalImages->currentRenderHDRImage->Resize( screenWidth, screenHeight );
globalImages->currentDepthImage->Resize( screenWidth, screenHeight );
#if defined(USE_HDR_MSAA)
if( glConfig.multisamples )
{
globalImages->currentRenderHDRImageNoMSAA->Resize( screenWidth, screenHeight );
globalFramebuffers.hdrNonMSAAFBO->Bind();
globalFramebuffers.hdrNonMSAAFBO->AttachImage2D( GL_TEXTURE_2D, globalImages->currentRenderHDRImageNoMSAA, 0 );
globalFramebuffers.hdrNonMSAAFBO->Check();
globalFramebuffers.hdrNonMSAAFBO->width = screenWidth;
globalFramebuffers.hdrNonMSAAFBO->height = screenHeight;
globalFramebuffers.hdrFBO->Bind();
globalFramebuffers.hdrFBO->AttachImage2D( GL_TEXTURE_2D_MULTISAMPLE, globalImages->currentRenderHDRImage, 0 );
globalFramebuffers.hdrFBO->AttachImageDepth( GL_TEXTURE_2D_MULTISAMPLE, globalImages->currentDepthImage );
globalFramebuffers.hdrFBO->Check();
}
else
#endif
{
globalFramebuffers.hdrFBO->Bind();
globalFramebuffers.hdrFBO->AttachImage2D( GL_TEXTURE_2D, globalImages->currentRenderHDRImage, 0 );
globalFramebuffers.hdrFBO->AttachImageDepth( GL_TEXTURE_2D, globalImages->currentDepthImage );
globalFramebuffers.hdrFBO->Check();
}
globalFramebuffers.hdrFBO->width = screenWidth;
globalFramebuffers.hdrFBO->height = screenHeight;
// HDR quarter
/*
globalImages->currentRenderHDRImageQuarter->Resize( screenWidth / 4, screenHeight / 4 );
globalFramebuffers.hdrQuarterFBO->Bind();
globalFramebuffers.hdrQuarterFBO->AttachImage2D( GL_TEXTURE_2D, globalImages->currentRenderHDRImageQuarter, 0 );
globalFramebuffers.hdrQuarterFBO->Check();
*/
// BLOOM
for( int i = 0; i < MAX_BLOOM_BUFFERS; i++ )
{
globalImages->bloomRenderImage[i]->Resize( screenWidth / 4, screenHeight / 4 );
globalFramebuffers.bloomRenderFBO[i]->width = screenWidth / 4;
globalFramebuffers.bloomRenderFBO[i]->height = screenHeight / 4;
globalFramebuffers.bloomRenderFBO[i]->Bind();
globalFramebuffers.bloomRenderFBO[i]->AttachImage2D( GL_TEXTURE_2D, globalImages->bloomRenderImage[i], 0 );
globalFramebuffers.bloomRenderFBO[i]->Check();
}
// GLOW
for( int i = 0; i < MAX_GLOW_BUFFERS; i++ )
{
globalFramebuffers.glowFBO[i]->Bind();
globalFramebuffers.glowFBO[i]->AttachImage2D( GL_TEXTURE_2D, globalImages->glowImage[i], 0 );
globalFramebuffers.glowFBO[i]->AttachImageDepth( GL_TEXTURE_2D, globalImages->glowDepthImage[i] );
globalFramebuffers.glowFBO[i]->Check();
}
// AMBIENT OCCLUSION
for( int i = 0; i < MAX_SSAO_BUFFERS; i++ )
{
globalImages->ambientOcclusionImage[i]->Resize( screenWidth, screenHeight );
globalFramebuffers.ambientOcclusionFBO[i]->width = screenWidth;
globalFramebuffers.ambientOcclusionFBO[i]->height = screenHeight;
globalFramebuffers.ambientOcclusionFBO[i]->Bind();
globalFramebuffers.ambientOcclusionFBO[i]->AttachImage2D( GL_TEXTURE_2D, globalImages->ambientOcclusionImage[i], 0 );
globalFramebuffers.ambientOcclusionFBO[i]->Check();
}
// HIERARCHICAL Z BUFFER
globalImages->hierarchicalZbufferImage->Resize( screenWidth, screenHeight );
for( int i = 0; i < MAX_HIERARCHICAL_ZBUFFERS; i++ )
{
globalFramebuffers.csDepthFBO[i]->width = screenWidth / ( 1 << i );
globalFramebuffers.csDepthFBO[i]->height = screenHeight / ( 1 << i );
globalFramebuffers.csDepthFBO[i]->Bind();
globalFramebuffers.csDepthFBO[i]->AttachImage2D( GL_TEXTURE_2D, globalImages->hierarchicalZbufferImage, 0, i );
globalFramebuffers.csDepthFBO[i]->Check();
}
// GEOMETRY BUFFER
globalImages->currentNormalsImage->Resize( screenWidth, screenHeight );
globalFramebuffers.geometryBufferFBO->width = screenWidth;
globalFramebuffers.geometryBufferFBO->height = screenHeight;
globalFramebuffers.geometryBufferFBO->Bind();
globalFramebuffers.geometryBufferFBO->AttachImage2D( GL_TEXTURE_2D, globalImages->currentNormalsImage, 0 );
globalFramebuffers.geometryBufferFBO->AttachImageDepth( GL_TEXTURE_2D, globalImages->currentDepthImage );
globalFramebuffers.geometryBufferFBO->Check();
// SMAA
globalImages->smaaEdgesImage->Resize( screenWidth, screenHeight );
globalFramebuffers.smaaEdgesFBO->width = screenWidth;
globalFramebuffers.smaaEdgesFBO->height = screenHeight;
globalFramebuffers.smaaEdgesFBO->Bind();
globalFramebuffers.smaaEdgesFBO->AttachImage2D( GL_TEXTURE_2D, globalImages->smaaEdgesImage, 0 );
globalFramebuffers.smaaEdgesFBO->Check();
globalImages->smaaBlendImage->Resize( screenWidth, screenHeight );
globalFramebuffers.smaaBlendFBO->width = screenWidth;
globalFramebuffers.smaaBlendFBO->height = screenHeight;
globalFramebuffers.smaaBlendFBO->Bind();
globalFramebuffers.smaaBlendFBO->AttachImage2D( GL_TEXTURE_2D, globalImages->smaaBlendImage, 0 );
globalFramebuffers.smaaBlendFBO->Check();
Unbind();
}
}
void Framebuffer::Shutdown()
{
framebuffers.DeleteContents( true );
}
void Framebuffer::Bind()
{
RENDERLOG_PRINTF( "Framebuffer::Bind( %s )\n", fboName.c_str() );
if( tr.backend.currentFramebuffer != this )
{
glBindFramebuffer( GL_FRAMEBUFFER, frameBuffer );
tr.backend.currentFramebuffer = this;
}
}
bool Framebuffer::IsBound()
{
return ( tr.backend.currentFramebuffer == this );
}
void Framebuffer::Unbind()
{
RENDERLOG_PRINTF( "Framebuffer::Unbind()\n" );
//if(tr.backend.framebuffer != NULL)
{
glBindFramebuffer( GL_FRAMEBUFFER, 0 );
glBindRenderbuffer( GL_RENDERBUFFER, 0 );
tr.backend.currentFramebuffer = NULL;
}
}
bool Framebuffer::IsDefaultFramebufferActive()
{
return ( tr.backend.currentFramebuffer == NULL );
}
Framebuffer* Framebuffer::GetActiveFramebuffer()
{
return tr.backend.currentFramebuffer;
}
void Framebuffer::AddColorBuffer( int format, int index, int multiSamples )
{
if( index < 0 || index >= glConfig.maxColorAttachments )
{
common->Warning( "Framebuffer::AddColorBuffer( %s ): bad index = %i", fboName.c_str(), index );
return;
}
colorFormat = format;
bool notCreatedYet = colorBuffers[index] == 0;
if( notCreatedYet )
{
glGenRenderbuffers( 1, &colorBuffers[index] );
}
glBindRenderbuffer( GL_RENDERBUFFER, colorBuffers[index] );
if( multiSamples > 0 )
{
glRenderbufferStorageMultisample( GL_RENDERBUFFER, multiSamples, format, width, height );
msaaSamples = true;
}
else
{
glRenderbufferStorage( GL_RENDERBUFFER, format, width, height );
}
if( notCreatedYet )
{
glFramebufferRenderbuffer( GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0 + index, GL_RENDERBUFFER, colorBuffers[index] );
}
GL_CheckErrors();
}
void Framebuffer::AddDepthBuffer( int format, int multiSamples )
{
depthFormat = format;
bool notCreatedYet = depthBuffer == 0;
if( notCreatedYet )
{
glGenRenderbuffers( 1, &depthBuffer );
}
glBindRenderbuffer( GL_RENDERBUFFER, depthBuffer );
if( multiSamples > 0 )
{
glRenderbufferStorageMultisample( GL_RENDERBUFFER, multiSamples, format, width, height );
msaaSamples = true;
}
else
{
glRenderbufferStorage( GL_RENDERBUFFER, format, width, height );
}
if( notCreatedYet )
{
glFramebufferRenderbuffer( GL_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, GL_RENDERBUFFER, depthBuffer );
}
GL_CheckErrors();
}
void Framebuffer::AddStencilBuffer( int format, int multiSamples )
{
stencilFormat = format;
bool notCreatedYet = stencilBuffer == 0;
if( notCreatedYet )
{
glGenRenderbuffers( 1, &stencilBuffer );
}
glBindRenderbuffer( GL_RENDERBUFFER, stencilBuffer );
if( multiSamples > 0 )
{
glRenderbufferStorageMultisample( GL_RENDERBUFFER, multiSamples, format, width, height );
msaaSamples = true;
}
else
{
glRenderbufferStorage( GL_RENDERBUFFER, format, width, height );
}
if( notCreatedYet )
{
glFramebufferRenderbuffer( GL_FRAMEBUFFER, GL_STENCIL_ATTACHMENT, GL_RENDERBUFFER, stencilBuffer );
}
GL_CheckErrors();
}
void Framebuffer::AttachImage2D( int target, idImage* image, int index, int mipmapLod )
{
if( ( target != GL_TEXTURE_2D ) && ( target != GL_TEXTURE_2D_MULTISAMPLE ) && ( target < GL_TEXTURE_CUBE_MAP_POSITIVE_X || target > GL_TEXTURE_CUBE_MAP_NEGATIVE_Z ) )
{
common->Warning( "Framebuffer::AttachImage2D( %s ): invalid target", fboName.c_str() );
return;
}
if( index < 0 || index >= glConfig.maxColorAttachments )
{
common->Warning( "Framebuffer::AttachImage2D( %s ): bad index = %i", fboName.c_str(), index );
return;
}
glFramebufferTexture2D( GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0 + index, target, image->texnum, mipmapLod );
image->opts.isRenderTarget = true;
}
void Framebuffer::AttachImageDepth( int target, idImage* image )
{
if( ( target != GL_TEXTURE_2D ) && ( target != GL_TEXTURE_2D_MULTISAMPLE ) )
{
common->Warning( "Framebuffer::AttachImageDepth( %s ): invalid target", fboName.c_str() );
return;
}
glFramebufferTexture2D( GL_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, target, image->texnum, 0 );
image->opts.isRenderTarget = true;
}
void Framebuffer::AttachImageDepthLayer( idImage* image, int layer )
{
glFramebufferTextureLayer( GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, image->texnum, 0, layer );
image->opts.isRenderTarget = true;
}
void Framebuffer::Check()
{
int prev;
glGetIntegerv( GL_FRAMEBUFFER_BINDING, &prev );
glBindFramebuffer( GL_FRAMEBUFFER, frameBuffer );
int status = glCheckFramebufferStatus( GL_FRAMEBUFFER );
if( status == GL_FRAMEBUFFER_COMPLETE )
{
glBindFramebuffer( GL_FRAMEBUFFER, prev );
return;
}
// something went wrong
switch( status )
{
case GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT:
common->Error( "Framebuffer::Check( %s ): Framebuffer incomplete, incomplete attachment", fboName.c_str() );
break;
case GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT:
common->Error( "Framebuffer::Check( %s ): Framebuffer incomplete, missing attachment", fboName.c_str() );
break;
case GL_FRAMEBUFFER_INCOMPLETE_DRAW_BUFFER:
common->Error( "Framebuffer::Check( %s ): Framebuffer incomplete, missing draw buffer", fboName.c_str() );
break;
case GL_FRAMEBUFFER_INCOMPLETE_READ_BUFFER:
common->Error( "Framebuffer::Check( %s ): Framebuffer incomplete, missing read buffer", fboName.c_str() );
break;
case GL_FRAMEBUFFER_INCOMPLETE_LAYER_TARGETS:
common->Error( "Framebuffer::Check( %s ): Framebuffer incomplete, missing layer targets", fboName.c_str() );
break;
case GL_FRAMEBUFFER_INCOMPLETE_MULTISAMPLE:
common->Error( "Framebuffer::Check( %s ): Framebuffer incomplete, missing multisample", fboName.c_str() );
break;
case GL_FRAMEBUFFER_UNSUPPORTED:
common->Error( "Framebuffer::Check( %s ): Unsupported framebuffer format", fboName.c_str() );
break;
default:
common->Error( "Framebuffer::Check( %s ): Unknown error 0x%X", fboName.c_str(), status );
break;
};
glBindFramebuffer( GL_FRAMEBUFFER, prev );
}
#endif // #if !defined(USE_VULKAN)

View file

@ -1,900 +0,0 @@
/*
===========================================================================
Doom 3 BFG Edition GPL Source Code
Copyright (C) 1993-2012 id Software LLC, a ZeniMax Media company.
Copyright (C) 2013-2016 Robert Beckebans
This file is part of the Doom 3 BFG Edition GPL Source Code ("Doom 3 BFG Edition Source Code").
Doom 3 BFG Edition Source Code is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Doom 3 BFG Edition Source Code is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Doom 3 BFG Edition Source Code. If not, see <http://www.gnu.org/licenses/>.
In addition, the Doom 3 BFG Edition Source Code is also subject to certain additional terms. You should have received a copy of these additional terms immediately following the terms and conditions of the GNU General Public License which accompanied the Doom 3 BFG Edition Source Code. If not, please request a copy in writing from id Software at the address below.
If you have questions concerning this license or the applicable additional terms, you may contact in writing id Software LLC, c/o ZeniMax Media Inc., Suite 120, Rockville, Maryland 20850 USA.
===========================================================================
*/
#include "precompiled.h"
#pragma hdrstop
/*
================================================================================================
Contains the Image implementation for OpenGL.
================================================================================================
*/
#include "../RenderCommon.h"
/*
====================
idImage::idImage
====================
*/
idImage::idImage( const char* name ) : imgName( name )
{
texnum = TEXTURE_NOT_LOADED;
internalFormat = 0;
dataFormat = 0;
dataType = 0;
generatorFunction = NULL;
filter = TF_DEFAULT;
repeat = TR_REPEAT;
usage = TD_DEFAULT;
cubeFiles = CF_2D;
cubeMapSize = 0;
referencedOutsideLevelLoad = false;
levelLoadReferenced = false;
defaulted = false;
sourceFileTime = FILE_NOT_FOUND_TIMESTAMP;
binaryFileTime = FILE_NOT_FOUND_TIMESTAMP;
refCount = 0;
}
/*
====================
idImage::~idImage
====================
*/
idImage::~idImage()
{
PurgeImage();
}
/*
====================
idImage::IsLoaded
====================
*/
bool idImage::IsLoaded() const
{
return texnum != TEXTURE_NOT_LOADED;
}
/*
==============
Bind
Automatically enables 2D mapping or cube mapping if needed
==============
*/
void idImage::Bind()
{
RENDERLOG_PRINTF( "idImage::Bind( %s )\n", GetName() );
// load the image if necessary (FIXME: not SMP safe!)
// RB: don't try again if last time failed
if( !IsLoaded() && !defaulted )
{
// load the image on demand here, which isn't our normal game operating mode
ActuallyLoadImage( true );
}
const int texUnit = tr.backend.GetCurrentTextureUnit();
// RB: added support for more types
tmu_t* tmu = &glcontext.tmu[texUnit];
// bind the texture
if( opts.textureType == TT_2D )
{
if( tmu->current2DMap != texnum )
{
tmu->current2DMap = texnum;
#if !defined(USE_GLES2) && !defined(USE_GLES3)
if( glConfig.directStateAccess )
{
glBindMultiTextureEXT( GL_TEXTURE0 + texUnit, GL_TEXTURE_2D, texnum );
}
else
#endif
{
glActiveTexture( GL_TEXTURE0 + texUnit );
glBindTexture( GL_TEXTURE_2D, texnum );
}
}
}
else if( opts.textureType == TT_CUBIC )
{
if( tmu->currentCubeMap != texnum )
{
tmu->currentCubeMap = texnum;
#if !defined(USE_GLES2) && !defined(USE_GLES3)
if( glConfig.directStateAccess )
{
glBindMultiTextureEXT( GL_TEXTURE0 + texUnit, GL_TEXTURE_CUBE_MAP, texnum );
}
else
#endif
{
glActiveTexture( GL_TEXTURE0 + texUnit );
glBindTexture( GL_TEXTURE_CUBE_MAP, texnum );
}
}
}
else if( opts.textureType == TT_2D_ARRAY )
{
if( tmu->current2DArray != texnum )
{
tmu->current2DArray = texnum;
#if !defined(USE_GLES2) && !defined(USE_GLES3)
if( glConfig.directStateAccess )
{
glBindMultiTextureEXT( GL_TEXTURE0 + texUnit, GL_TEXTURE_2D_ARRAY, texnum );
}
else
#endif
{
glActiveTexture( GL_TEXTURE0 + texUnit );
glBindTexture( GL_TEXTURE_2D_ARRAY, texnum );
}
}
}
else if( opts.textureType == TT_2D_MULTISAMPLE )
{
if( tmu->current2DMap != texnum )
{
tmu->current2DMap = texnum;
#if !defined(USE_GLES2) && !defined(USE_GLES3)
if( glConfig.directStateAccess )
{
glBindMultiTextureEXT( GL_TEXTURE0 + texUnit, GL_TEXTURE_2D_MULTISAMPLE, texnum );
}
else
#endif
{
glActiveTexture( GL_TEXTURE0 + texUnit );
glBindTexture( GL_TEXTURE_2D_MULTISAMPLE, texnum );
}
}
}
// RB end
}
/*
====================
CopyFramebuffer
====================
*/
void idImage::CopyFramebuffer( int x, int y, int imageWidth, int imageHeight )
{
int target = GL_TEXTURE_2D;
switch( opts.textureType )
{
case TT_2D:
target = GL_TEXTURE_2D;
break;
case TT_CUBIC:
target = GL_TEXTURE_CUBE_MAP;
break;
case TT_2D_ARRAY:
target = GL_TEXTURE_2D_ARRAY;
break;
case TT_2D_MULTISAMPLE:
target = GL_TEXTURE_2D_MULTISAMPLE;
break;
default:
//idLib::FatalError( "%s: bad texture type %d", GetName(), opts.textureType );
return;
}
glBindTexture( target, texnum );
#if !defined(USE_GLES2)
if( Framebuffer::IsDefaultFramebufferActive() )
{
glReadBuffer( GL_BACK );
}
#endif
opts.width = imageWidth;
opts.height = imageHeight;
#if defined(USE_GLES2)
glCopyTexImage2D( GL_TEXTURE_2D, 0, GL_RGBA, x, y, imageWidth, imageHeight, 0 );
#else
if( r_useHDR.GetBool() && globalFramebuffers.hdrFBO->IsBound() )
{
//if( backEnd.glState.currentFramebuffer != NULL && backEnd.glState.currentFramebuffer->IsMultiSampled() )
#if defined(USE_HDR_MSAA)
if( globalFramebuffers.hdrFBO->IsMultiSampled() )
{
glBindFramebuffer( GL_READ_FRAMEBUFFER, globalFramebuffers.hdrFBO->GetFramebuffer() );
glBindFramebuffer( GL_DRAW_FRAMEBUFFER, globalFramebuffers.hdrNonMSAAFBO->GetFramebuffer() );
glBlitFramebuffer( 0, 0, glConfig.nativeScreenWidth, glConfig.nativeScreenHeight,
0, 0, glConfig.nativeScreenWidth, glConfig.nativeScreenHeight,
GL_COLOR_BUFFER_BIT,
GL_LINEAR );
globalFramebuffers.hdrNonMSAAFBO->Bind();
glCopyTexImage2D( target, 0, GL_RGBA16F, x, y, imageWidth, imageHeight, 0 );
globalFramebuffers.hdrFBO->Bind();
}
else
#endif
{
glCopyTexImage2D( target, 0, GL_RGBA16F, x, y, imageWidth, imageHeight, 0 );
}
}
else
{
glCopyTexImage2D( target, 0, GL_RGBA8, x, y, imageWidth, imageHeight, 0 );
}
#endif
// these shouldn't be necessary if the image was initialized properly
glTexParameterf( target, GL_TEXTURE_MIN_FILTER, GL_LINEAR );
glTexParameterf( target, GL_TEXTURE_MAG_FILTER, GL_LINEAR );
glTexParameterf( target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE );
glTexParameterf( target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE );
tr.backend.pc.c_copyFrameBuffer++;
}
/*
====================
CopyDepthbuffer
====================
*/
void idImage::CopyDepthbuffer( int x, int y, int imageWidth, int imageHeight )
{
glBindTexture( ( opts.textureType == TT_CUBIC ) ? GL_TEXTURE_CUBE_MAP : GL_TEXTURE_2D, texnum );
opts.width = imageWidth;
opts.height = imageHeight;
glCopyTexImage2D( GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT, x, y, imageWidth, imageHeight, 0 );
tr.backend.pc.c_copyFrameBuffer++;
}
/*
========================
idImage::SubImageUpload
========================
*/
void idImage::SubImageUpload( int mipLevel, int x, int y, int z, int width, int height, const void* pic, int pixelPitch )
{
assert( x >= 0 && y >= 0 && mipLevel >= 0 && width >= 0 && height >= 0 && mipLevel < opts.numLevels );
int compressedSize = 0;
if( IsCompressed() )
{
assert( !( x & 3 ) && !( y & 3 ) );
// compressed size may be larger than the dimensions due to padding to quads
int quadW = ( width + 3 ) & ~3;
int quadH = ( height + 3 ) & ~3;
compressedSize = quadW * quadH * BitsForFormat( opts.format ) / 8;
int padW = ( opts.width + 3 ) & ~3;
int padH = ( opts.height + 3 ) & ~3;
assert( x + width <= padW && y + height <= padH );
// upload the non-aligned value, OpenGL understands that there
// will be padding
if( x + width > opts.width )
{
width = opts.width - x;
}
if( y + height > opts.height )
{
height = opts.height - x;
}
}
else
{
assert( x + width <= opts.width && y + height <= opts.height );
}
int target;
int uploadTarget;
if( opts.textureType == TT_2D )
{
target = GL_TEXTURE_2D;
uploadTarget = GL_TEXTURE_2D;
}
else if( opts.textureType == TT_CUBIC )
{
target = GL_TEXTURE_CUBE_MAP;
uploadTarget = GL_TEXTURE_CUBE_MAP_POSITIVE_X + z;
}
else
{
assert( !"invalid opts.textureType" );
target = GL_TEXTURE_2D;
uploadTarget = GL_TEXTURE_2D;
}
glBindTexture( target, texnum );
if( pixelPitch != 0 )
{
glPixelStorei( GL_UNPACK_ROW_LENGTH, pixelPitch );
}
if( opts.format == FMT_RGB565 )
{
#if !defined(USE_GLES3)
glPixelStorei( GL_UNPACK_SWAP_BYTES, GL_TRUE );
#endif
}
#if defined(DEBUG) || defined(__ANDROID__)
GL_CheckErrors();
#endif
if( IsCompressed() )
{
glCompressedTexSubImage2D( uploadTarget, mipLevel, x, y, width, height, internalFormat, compressedSize, pic );
}
else
{
// make sure the pixel store alignment is correct so that lower mips get created
// properly for odd shaped textures - this fixes the mip mapping issues with
// fonts
int unpackAlignment = width * BitsForFormat( ( textureFormat_t )opts.format ) / 8;
if( ( unpackAlignment & 3 ) == 0 )
{
glPixelStorei( GL_UNPACK_ALIGNMENT, 4 );
}
else
{
glPixelStorei( GL_UNPACK_ALIGNMENT, 1 );
}
glTexSubImage2D( uploadTarget, mipLevel, x, y, width, height, dataFormat, dataType, pic );
}
#if defined(DEBUG) || defined(__ANDROID__)
GL_CheckErrors();
#endif
if( opts.format == FMT_RGB565 )
{
glPixelStorei( GL_UNPACK_SWAP_BYTES, GL_FALSE );
}
if( pixelPitch != 0 )
{
glPixelStorei( GL_UNPACK_ROW_LENGTH, 0 );
}
}
/*
========================
idImage::SetSamplerState
========================
*/
void idImage::SetSamplerState( textureFilter_t tf, textureRepeat_t tr )
{
if( tf == filter && tr == repeat )
{
return;
}
filter = tf;
repeat = tr;
glBindTexture( ( opts.textureType == TT_CUBIC ) ? GL_TEXTURE_CUBE_MAP : GL_TEXTURE_2D, texnum );
SetTexParameters();
}
/*
========================
idImage::SetTexParameters
========================
*/
void idImage::SetTexParameters()
{
int target = GL_TEXTURE_2D;
switch( opts.textureType )
{
case TT_2D:
target = GL_TEXTURE_2D;
break;
case TT_CUBIC:
target = GL_TEXTURE_CUBE_MAP;
break;
// RB begin
case TT_2D_ARRAY:
target = GL_TEXTURE_2D_ARRAY;
break;
case TT_2D_MULTISAMPLE:
//target = GL_TEXTURE_2D_MULTISAMPLE;
//break;
// no texture parameters for MSAA FBO textures
return;
// RB end
default:
idLib::FatalError( "%s: bad texture type %d", GetName(), opts.textureType );
return;
}
// ALPHA, LUMINANCE, LUMINANCE_ALPHA, and INTENSITY have been removed
// in OpenGL 3.2. In order to mimic those modes, we use the swizzle operators
if( opts.colorFormat == CFM_GREEN_ALPHA )
{
glTexParameteri( target, GL_TEXTURE_SWIZZLE_R, GL_ONE );
glTexParameteri( target, GL_TEXTURE_SWIZZLE_G, GL_ONE );
glTexParameteri( target, GL_TEXTURE_SWIZZLE_B, GL_ONE );
glTexParameteri( target, GL_TEXTURE_SWIZZLE_A, GL_GREEN );
}
else if( opts.format == FMT_LUM8 )
{
glTexParameteri( target, GL_TEXTURE_SWIZZLE_R, GL_RED );
glTexParameteri( target, GL_TEXTURE_SWIZZLE_G, GL_RED );
glTexParameteri( target, GL_TEXTURE_SWIZZLE_B, GL_RED );
glTexParameteri( target, GL_TEXTURE_SWIZZLE_A, GL_ONE );
}
else if( opts.format == FMT_L8A8 )//|| opts.format == FMT_RG16F )
{
glTexParameteri( target, GL_TEXTURE_SWIZZLE_R, GL_RED );
glTexParameteri( target, GL_TEXTURE_SWIZZLE_G, GL_RED );
glTexParameteri( target, GL_TEXTURE_SWIZZLE_B, GL_RED );
glTexParameteri( target, GL_TEXTURE_SWIZZLE_A, GL_GREEN );
}
else if( opts.format == FMT_ALPHA )
{
glTexParameteri( target, GL_TEXTURE_SWIZZLE_R, GL_ONE );
glTexParameteri( target, GL_TEXTURE_SWIZZLE_G, GL_ONE );
glTexParameteri( target, GL_TEXTURE_SWIZZLE_B, GL_ONE );
glTexParameteri( target, GL_TEXTURE_SWIZZLE_A, GL_RED );
}
else if( opts.format == FMT_INT8 )
{
glTexParameteri( target, GL_TEXTURE_SWIZZLE_R, GL_RED );
glTexParameteri( target, GL_TEXTURE_SWIZZLE_G, GL_RED );
glTexParameteri( target, GL_TEXTURE_SWIZZLE_B, GL_RED );
glTexParameteri( target, GL_TEXTURE_SWIZZLE_A, GL_RED );
}
else
{
glTexParameteri( target, GL_TEXTURE_SWIZZLE_R, GL_RED );
glTexParameteri( target, GL_TEXTURE_SWIZZLE_G, GL_GREEN );
glTexParameteri( target, GL_TEXTURE_SWIZZLE_B, GL_BLUE );
glTexParameteri( target, GL_TEXTURE_SWIZZLE_A, GL_ALPHA );
}
switch( filter )
{
case TF_DEFAULT:
if( r_useTrilinearFiltering.GetBool() )
{
glTexParameterf( target, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR );
}
else
{
glTexParameterf( target, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_NEAREST );
}
glTexParameterf( target, GL_TEXTURE_MAG_FILTER, GL_LINEAR );
break;
case TF_LINEAR:
glTexParameterf( target, GL_TEXTURE_MIN_FILTER, GL_LINEAR );
glTexParameterf( target, GL_TEXTURE_MAG_FILTER, GL_LINEAR );
break;
case TF_NEAREST:
case TF_NEAREST_MIPMAP:
glTexParameterf( target, GL_TEXTURE_MIN_FILTER, GL_NEAREST );
glTexParameterf( target, GL_TEXTURE_MAG_FILTER, GL_NEAREST );
break;
default:
common->FatalError( "%s: bad texture filter %d", GetName(), filter );
}
if( glConfig.anisotropicFilterAvailable )
{
// only do aniso filtering on mip mapped images
if( filter == TF_DEFAULT )
{
int aniso = r_maxAnisotropicFiltering.GetInteger();
if( aniso > glConfig.maxTextureAnisotropy )
{
aniso = glConfig.maxTextureAnisotropy;
}
if( aniso < 0 )
{
aniso = 0;
}
glTexParameterf( target, GL_TEXTURE_MAX_ANISOTROPY_EXT, aniso );
}
else
{
glTexParameterf( target, GL_TEXTURE_MAX_ANISOTROPY_EXT, 1 );
}
}
// RB: disabled use of unreliable extension that can make the game look worse but doesn't save any VRAM
/*
if( glConfig.textureLODBiasAvailable && ( usage != TD_FONT ) )
{
// use a blurring LOD bias in combination with high anisotropy to fix our aliasing grate textures...
glTexParameterf( target, GL_TEXTURE_LOD_BIAS_EXT, 0.5 ); //r_lodBias.GetFloat() );
}
*/
// RB end
// set the wrap/clamp modes
switch( repeat )
{
case TR_REPEAT:
glTexParameterf( target, GL_TEXTURE_WRAP_S, GL_REPEAT );
glTexParameterf( target, GL_TEXTURE_WRAP_T, GL_REPEAT );
break;
case TR_CLAMP_TO_ZERO:
{
float color[4] = { 0.0f, 0.0f, 0.0f, 1.0f };
glTexParameterfv( target, GL_TEXTURE_BORDER_COLOR, color );
glTexParameterf( target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_BORDER );
glTexParameterf( target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_BORDER );
}
break;
case TR_CLAMP_TO_ZERO_ALPHA:
{
float color[4] = { 0.0f, 0.0f, 0.0f, 0.0f };
glTexParameterfv( target, GL_TEXTURE_BORDER_COLOR, color );
glTexParameterf( target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_BORDER );
glTexParameterf( target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_BORDER );
}
break;
case TR_CLAMP:
glTexParameterf( target, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE );
glTexParameterf( target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE );
break;
default:
common->FatalError( "%s: bad texture repeat %d", GetName(), repeat );
}
// RB: added shadow compare parameters for shadow map textures
if( opts.format == FMT_SHADOW_ARRAY )
{
//glTexParameteri( target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE );
glTexParameteri( target, GL_TEXTURE_COMPARE_MODE, GL_COMPARE_R_TO_TEXTURE );
glTexParameteri( target, GL_TEXTURE_COMPARE_FUNC, GL_LEQUAL );
}
}
/*
========================
idImage::AllocImage
Every image will pass through this function. Allocates all the necessary MipMap levels for the
Image, but doesn't put anything in them.
This should not be done during normal game-play, if you can avoid it.
========================
*/
void idImage::AllocImage()
{
GL_CheckErrors();
PurgeImage();
switch( opts.format )
{
case FMT_RGBA8:
internalFormat = GL_RGBA8;
dataFormat = GL_RGBA;
dataType = GL_UNSIGNED_BYTE;
break;
case FMT_XRGB8:
internalFormat = GL_RGB;
dataFormat = GL_RGBA;
dataType = GL_UNSIGNED_BYTE;
break;
case FMT_RGB565:
internalFormat = GL_RGB;
dataFormat = GL_RGB;
dataType = GL_UNSIGNED_SHORT_5_6_5;
break;
case FMT_ALPHA:
internalFormat = GL_R8;
dataFormat = GL_RED;
dataType = GL_UNSIGNED_BYTE;
break;
case FMT_L8A8:
internalFormat = GL_RG8;
dataFormat = GL_RG;
dataType = GL_UNSIGNED_BYTE;
break;
case FMT_LUM8:
internalFormat = GL_R8;
dataFormat = GL_RED;
dataType = GL_UNSIGNED_BYTE;
break;
case FMT_INT8:
internalFormat = GL_R8;
dataFormat = GL_RED;
dataType = GL_UNSIGNED_BYTE;
break;
case FMT_DXT1:
internalFormat = GL_COMPRESSED_RGBA_S3TC_DXT1_EXT;
dataFormat = GL_RGBA;
dataType = GL_UNSIGNED_BYTE;
break;
case FMT_DXT5:
internalFormat = GL_COMPRESSED_RGBA_S3TC_DXT5_EXT;
dataFormat = GL_RGBA;
dataType = GL_UNSIGNED_BYTE;
break;
case FMT_DEPTH:
internalFormat = GL_DEPTH_COMPONENT;
dataFormat = GL_DEPTH_COMPONENT;
dataType = GL_UNSIGNED_BYTE;
break;
case FMT_DEPTH_STENCIL:
internalFormat = GL_DEPTH24_STENCIL8;
dataFormat = GL_DEPTH_STENCIL;
dataType = GL_UNSIGNED_INT_24_8;
break;
case FMT_SHADOW_ARRAY:
internalFormat = GL_DEPTH_COMPONENT;
dataFormat = GL_DEPTH_COMPONENT;
dataType = GL_UNSIGNED_BYTE;
break;
case FMT_RG16F:
internalFormat = GL_RG16F;
dataFormat = GL_RG;
dataType = GL_HALF_FLOAT;
break;
case FMT_RGBA16F:
internalFormat = GL_RGBA16F;
dataFormat = GL_RGBA;
dataType = GL_HALF_FLOAT;
break;
case FMT_RGBA32F:
internalFormat = GL_RGBA32F;
dataFormat = GL_RGBA;
dataType = GL_UNSIGNED_BYTE;
break;
case FMT_R32F:
internalFormat = GL_R32F;
dataFormat = GL_RED;
dataType = GL_UNSIGNED_BYTE;
break;
case FMT_X16:
internalFormat = GL_INTENSITY16;
dataFormat = GL_LUMINANCE;
dataType = GL_UNSIGNED_SHORT;
break;
case FMT_Y16_X16:
internalFormat = GL_LUMINANCE16_ALPHA16;
dataFormat = GL_LUMINANCE_ALPHA;
dataType = GL_UNSIGNED_SHORT;
break;
// see http://what-when-how.com/Tutorial/topic-615ll9ug/Praise-for-OpenGL-ES-30-Programming-Guide-291.html
case FMT_R11G11B10F:
internalFormat = GL_R11F_G11F_B10F;
dataFormat = GL_RGB;
dataType = GL_UNSIGNED_INT_10F_11F_11F_REV;
break;
default:
idLib::Error( "Unhandled image format %d in %s\n", opts.format, GetName() );
}
// if we don't have a rendering context, just return after we
// have filled in the parms. We must have the values set, or
// an image match from a shader before OpenGL starts would miss
// the generated texture
if( !tr.IsInitialized() )
{
return;
}
// generate the texture number
glGenTextures( 1, ( GLuint* )&texnum );
assert( texnum != TEXTURE_NOT_LOADED );
//----------------------------------------------------
// allocate all the mip levels with NULL data
//----------------------------------------------------
int numSides;
int target;
int uploadTarget;
if( opts.textureType == TT_2D )
{
target = uploadTarget = GL_TEXTURE_2D;
numSides = 1;
}
else if( opts.textureType == TT_CUBIC )
{
target = GL_TEXTURE_CUBE_MAP;
uploadTarget = GL_TEXTURE_CUBE_MAP_POSITIVE_X;
numSides = 6;
}
// RB begin
else if( opts.textureType == TT_2D_ARRAY )
{
target = GL_TEXTURE_2D_ARRAY;
uploadTarget = GL_TEXTURE_2D_ARRAY;
numSides = 6;
}
else if( opts.textureType == TT_2D_MULTISAMPLE )
{
target = GL_TEXTURE_2D_MULTISAMPLE;
uploadTarget = GL_TEXTURE_2D_MULTISAMPLE;
numSides = 1;
}
// RB end
else
{
assert( !"opts.textureType" );
target = uploadTarget = GL_TEXTURE_2D;
numSides = 1;
}
glBindTexture( target, texnum );
if( opts.textureType == TT_2D_ARRAY )
{
glTexImage3D( uploadTarget, 0, internalFormat, opts.width, opts.height, numSides, 0, dataFormat, GL_UNSIGNED_BYTE, NULL );
}
else if( opts.textureType == TT_2D_MULTISAMPLE )
{
glTexImage2DMultisample( uploadTarget, opts.samples, internalFormat, opts.width, opts.height, GL_FALSE );
}
else
{
for( int side = 0; side < numSides; side++ )
{
int w = opts.width;
int h = opts.height;
if( opts.textureType == TT_CUBIC )
{
h = w;
}
for( int level = 0; level < opts.numLevels; level++ )
{
// clear out any previous error
GL_CheckErrors();
if( IsCompressed() )
{
int compressedSize = ( ( ( w + 3 ) / 4 ) * ( ( h + 3 ) / 4 ) * int64( 16 ) * BitsForFormat( opts.format ) ) / 8;
// Even though the OpenGL specification allows the 'data' pointer to be NULL, for some
// drivers we actually need to upload data to get it to allocate the texture.
// However, on 32-bit systems we may fail to allocate a large block of memory for large
// textures. We handle this case by using HeapAlloc directly and allowing the allocation
// to fail in which case we simply pass down NULL to glCompressedTexImage2D and hope for the best.
// As of 2011-10-6 using NVIDIA hardware and drivers we have to allocate the memory with HeapAlloc
// with the exact size otherwise large image allocation (for instance for physical page textures)
// may fail on Vista 32-bit.
// RB begin
#if defined(_WIN32)
void* data = HeapAlloc( GetProcessHeap(), 0, compressedSize );
glCompressedTexImage2D( uploadTarget + side, level, internalFormat, w, h, 0, compressedSize, data );
if( data != NULL )
{
HeapFree( GetProcessHeap(), 0, data );
}
#else
byte* data = ( byte* )Mem_Alloc( compressedSize, TAG_TEMP );
glCompressedTexImage2D( uploadTarget + side, level, internalFormat, w, h, 0, compressedSize, data );
if( data != NULL )
{
Mem_Free( data );
}
#endif
// RB end
}
else
{
glTexImage2D( uploadTarget + side, level, internalFormat, w, h, 0, dataFormat, dataType, NULL );
}
GL_CheckErrors();
w = Max( 1, w >> 1 );
h = Max( 1, h >> 1 );
}
}
glTexParameteri( target, GL_TEXTURE_MAX_LEVEL, opts.numLevels - 1 );
}
// see if we messed anything up
GL_CheckErrors();
SetTexParameters();
GL_CheckErrors();
}
/*
========================
idImage::PurgeImage
========================
*/
void idImage::PurgeImage()
{
if( texnum != TEXTURE_NOT_LOADED )
{
glDeleteTextures( 1, ( GLuint* )&texnum ); // this should be the ONLY place it is ever called!
texnum = TEXTURE_NOT_LOADED;
}
// clear all the current binding caches, so the next bind will do a real one
for( int i = 0; i < MAX_MULTITEXTURE_UNITS; i++ )
{
glcontext.tmu[i].current2DMap = TEXTURE_NOT_LOADED;
glcontext.tmu[i].current2DArray = TEXTURE_NOT_LOADED;
glcontext.tmu[i].currentCubeMap = TEXTURE_NOT_LOADED;
}
// reset for reloading images
defaulted = false;
}
/*
========================
idImage::Resize
========================
*/
void idImage::Resize( int width, int height )
{
if( opts.width == width && opts.height == height )
{
return;
}
opts.width = width;
opts.height = height;
AllocImage();
}

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -1,585 +0,0 @@
/*
===========================================================================
Doom 3 BFG Edition GPL Source Code
Copyright (C) 1993-2012 id Software LLC, a ZeniMax Media company.
Copyright (C) 2013-2018 Robert Beckebans
Copyright (C) 2016-2017 Dustin Land
This file is part of the Doom 3 BFG Edition GPL Source Code ("Doom 3 BFG Edition Source Code").
Doom 3 BFG Edition Source Code is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Doom 3 BFG Edition Source Code is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Doom 3 BFG Edition Source Code. If not, see <http://www.gnu.org/licenses/>.
In addition, the Doom 3 BFG Edition Source Code is also subject to certain additional terms. You should have received a copy of these additional terms immediately following the terms and conditions of the GNU General Public License which accompanied the Doom 3 BFG Edition Source Code. If not, please request a copy in writing from id Software at the address below.
If you have questions concerning this license or the applicable additional terms, you may contact in writing id Software LLC, c/o ZeniMax Media Inc., Suite 120, Rockville, Maryland 20850 USA.
===========================================================================
*/
#include "precompiled.h"
#pragma hdrstop
#include "../RenderCommon.h"
idCVar r_displayGLSLCompilerMessages( "r_displayGLSLCompilerMessages", "1", CVAR_BOOL | CVAR_ARCHIVE, "Show info messages the GPU driver outputs when compiling the shaders" );
idCVar r_alwaysExportGLSL( "r_alwaysExportGLSL", "1", CVAR_BOOL, "" );
/*
========================
idRenderProgManager::StartFrame
========================
*/
void idRenderProgManager::StartFrame()
{
}
/*
================================================================================================
idRenderProgManager::BindProgram
================================================================================================
*/
void idRenderProgManager::BindProgram( int index )
{
if( current == index )
{
return;
}
current = index;
RENDERLOG_PRINTF( "Binding GLSL Program %s\n", renderProgs[ index ].name.c_str() );
glUseProgram( renderProgs[ index ].progId );
}
/*
================================================================================================
idRenderProgManager::Unbind
================================================================================================
*/
void idRenderProgManager::Unbind()
{
current = -1;
glUseProgram( 0 );
}
/*
================================================================================================
idRenderProgManager::LoadShader
================================================================================================
*/
void idRenderProgManager::LoadShader( int index, rpStage_t stage )
{
if( shaders[index].progId != INVALID_PROGID )
{
return; // Already loaded
}
LoadShader( shaders[index] );
}
/*
================================================================================================
idRenderProgManager::LoadGLSLShader
================================================================================================
*/
void idRenderProgManager::LoadShader( shader_t& shader )
{
idStr inFile;
idStr outFileHLSL;
idStr outFileGLSL;
idStr outFileUniforms;
// RB: replaced backslashes
inFile.Format( "renderprogs/%s", shader.name.c_str() );
inFile.StripFileExtension();
outFileHLSL.Format( "renderprogs/hlsl/%s%s", shader.name.c_str(), shader.nameOutSuffix.c_str() );
outFileHLSL.StripFileExtension();
switch( glConfig.driverType )
{
case GLDRV_OPENGL_MESA:
{
outFileGLSL.Format( "renderprogs/glsles-3_00/%s%s", shader.name.c_str(), shader.nameOutSuffix.c_str() );
outFileUniforms.Format( "renderprogs/glsles-3_00/%s%s", shader.name.c_str(), shader.nameOutSuffix.c_str() );
break;
}
case GLDRV_VULKAN:
{
outFileGLSL.Format( "renderprogs/vkglsl/%s%s", shader.name.c_str(), shader.nameOutSuffix.c_str() );
outFileUniforms.Format( "renderprogs/vkglsl/%s%s", shader.name.c_str(), shader.nameOutSuffix.c_str() );
break;
}
default:
{
//SRS - OSX supports only up to GLSL 4.1
#if defined(__APPLE__)
outFileGLSL.Format( "renderprogs/glsl-4_10/%s%s", shader.name.c_str(), shader.nameOutSuffix.c_str() );
outFileUniforms.Format( "renderprogs/glsl-4_10/%s%s", shader.name.c_str(), shader.nameOutSuffix.c_str() );
#else
outFileGLSL.Format( "renderprogs/glsl-4_50/%s%s", shader.name.c_str(), shader.nameOutSuffix.c_str() );
outFileUniforms.Format( "renderprogs/glsl-4_50/%s%s", shader.name.c_str(), shader.nameOutSuffix.c_str() );
#endif
}
}
outFileGLSL.StripFileExtension();
outFileUniforms.StripFileExtension();
GLenum glTarget;
if( shader.stage == SHADER_STAGE_FRAGMENT )
{
glTarget = GL_FRAGMENT_SHADER;
inFile += ".ps.hlsl";
outFileHLSL += ".ps.hlsl";
outFileGLSL += ".frag";
outFileUniforms += ".frag.layout";
}
else
{
glTarget = GL_VERTEX_SHADER;
inFile += ".vs.hlsl";
outFileHLSL += ".vs.hlsl";
outFileGLSL += ".vert";
outFileUniforms += ".vert.layout";
}
// first check whether we already have a valid GLSL file and compare it to the hlsl timestamp;
ID_TIME_T hlslTimeStamp;
int hlslFileLength = fileSystem->ReadFile( inFile.c_str(), NULL, &hlslTimeStamp );
ID_TIME_T glslTimeStamp;
int glslFileLength = fileSystem->ReadFile( outFileGLSL.c_str(), NULL, &glslTimeStamp );
// if the glsl file doesn't exist or we have a newer HLSL file we need to recreate the glsl file.
idStr programGLSL;
idStr programUniforms;
if( ( glslFileLength <= 0 ) || ( hlslTimeStamp != FILE_NOT_FOUND_TIMESTAMP && hlslTimeStamp > glslTimeStamp ) || r_alwaysExportGLSL.GetBool() )
{
const char* hlslFileBuffer = NULL;
int len = 0;
if( hlslFileLength <= 0 )
{
// hlsl file doesn't even exist bail out
hlslFileBuffer = FindEmbeddedSourceShader( inFile.c_str() );
if( hlslFileBuffer == NULL )
{
return;
}
len = strlen( hlslFileBuffer );
}
else
{
len = fileSystem->ReadFile( inFile.c_str(), ( void** ) &hlslFileBuffer );
}
if( len <= 0 )
{
return;
}
idStrList compileMacros;
for( int j = 0; j < MAX_SHADER_MACRO_NAMES; j++ )
{
if( BIT( j ) & shader.shaderFeatures )
{
const char* macroName = GetGLSLMacroName( ( shaderFeature_t ) j );
compileMacros.Append( idStr( macroName ) );
}
}
// FIXME: we should really scan the program source code for using rpEnableSkinning but at this
// point we directly load a binary and the program source code is not available on the consoles
bool hasGPUSkinning = false;
if( idStr::Icmp( shader.name.c_str(), "heatHaze" ) == 0 ||
idStr::Icmp( shader.name.c_str(), "heatHazeWithMask" ) == 0 ||
idStr::Icmp( shader.name.c_str(), "heatHazeWithMaskAndVertex" ) == 0 ||
( BIT( USE_GPU_SKINNING ) & shader.shaderFeatures ) )
{
hasGPUSkinning = true;
}
idStr hlslCode( hlslFileBuffer );
idStr programHLSL = StripDeadCode( hlslCode, inFile, compileMacros, shader.builtin );
programGLSL = ConvertCG2GLSL( programHLSL, inFile.c_str(), shader.stage, programUniforms, false, hasGPUSkinning, shader.vertexLayout );
fileSystem->WriteFile( outFileHLSL, programHLSL.c_str(), programHLSL.Length(), "fs_savepath" );
fileSystem->WriteFile( outFileGLSL, programGLSL.c_str(), programGLSL.Length(), "fs_savepath" );
fileSystem->WriteFile( outFileUniforms, programUniforms.c_str(), programUniforms.Length(), "fs_savepath" );
}
else
{
// read in the glsl file
void* fileBufferGLSL = NULL;
int lengthGLSL = fileSystem->ReadFile( outFileGLSL.c_str(), &fileBufferGLSL );
if( lengthGLSL <= 0 )
{
idLib::Error( "GLSL file %s could not be loaded and may be corrupt", outFileGLSL.c_str() );
}
programGLSL = ( const char* ) fileBufferGLSL;
Mem_Free( fileBufferGLSL );
{
// read in the uniform file
void* fileBufferUniforms = NULL;
int lengthUniforms = fileSystem->ReadFile( outFileUniforms.c_str(), &fileBufferUniforms );
if( lengthUniforms <= 0 )
{
idLib::Error( "uniform file %s could not be loaded and may be corrupt", outFileUniforms.c_str() );
}
programUniforms = ( const char* ) fileBufferUniforms;
Mem_Free( fileBufferUniforms );
}
}
// RB: find the uniforms locations in either the vertex or fragment uniform array
// this uses the new layout structure
{
shader.uniforms.Clear();
idLexer src( programUniforms, programUniforms.Length(), "uniforms" );
idToken token;
if( src.ExpectTokenString( "uniforms" ) )
{
src.ExpectTokenString( "[" );
while( !src.CheckTokenString( "]" ) )
{
src.ReadToken( &token );
int index = -1;
for( int i = 0; i < RENDERPARM_TOTAL && index == -1; i++ )
{
const char* parmName = GetGLSLParmName( i );
if( token == parmName )
{
index = i;
}
}
if( index == -1 )
{
idLib::Error( "couldn't find uniform %s for %s", token.c_str(), outFileGLSL.c_str() );
}
shader.uniforms.Append( index );
}
}
}
// create and compile the shader
shader.progId = glCreateShader( glTarget );
if( shader.progId )
{
const char* source[1] = { programGLSL.c_str() };
glShaderSource( shader.progId, 1, source, NULL );
glCompileShader( shader.progId );
int infologLength = 0;
glGetShaderiv( shader.progId, GL_INFO_LOG_LENGTH, &infologLength );
if( infologLength > 1 )
{
idTempArray<char> infoLog( infologLength );
int charsWritten = 0;
glGetShaderInfoLog( shader.progId, infologLength, &charsWritten, infoLog.Ptr() );
// catch the strings the ATI and Intel drivers output on success
if( strstr( infoLog.Ptr(), "successfully compiled to run on hardware" ) != NULL ||
strstr( infoLog.Ptr(), "No errors." ) != NULL )
{
//idLib::Printf( "%s program %s from %s compiled to run on hardware\n", typeName, GetName(), GetFileName() );
}
else if( r_displayGLSLCompilerMessages.GetBool() ) // DG: check for the CVar I added above
{
idLib::Printf( "While compiling %s program %s\n", ( shader.stage == SHADER_STAGE_FRAGMENT ) ? "fragment" : "vertex" , inFile.c_str() );
const char separator = '\n';
idList<idStr> lines;
lines.Clear();
idStr source( programGLSL );
lines.Append( source );
for( int index = 0, ofs = lines[index].Find( separator ); ofs != -1; index++, ofs = lines[index].Find( separator ) )
{
lines.Append( lines[index].c_str() + ofs + 1 );
lines[index].CapLength( ofs );
}
idLib::Printf( "-----------------\n" );
for( int i = 0; i < lines.Num(); i++ )
{
idLib::Printf( "%3d: %s\n", i + 1, lines[i].c_str() );
}
idLib::Printf( "-----------------\n" );
idLib::Printf( "%s\n", infoLog.Ptr() );
}
}
GLint compiled = GL_FALSE;
glGetShaderiv( shader.progId, GL_COMPILE_STATUS, &compiled );
if( compiled == GL_FALSE )
{
glDeleteShader( shader.progId );
return;
}
}
}
/*
================================================================================================
idRenderProgManager::LoadGLSLProgram
================================================================================================
*/
void idRenderProgManager::LoadGLSLProgram( const int programIndex, const int vertexShaderIndex, const int fragmentShaderIndex )
{
renderProg_t& prog = renderProgs[programIndex];
if( prog.progId != INVALID_PROGID )
{
return; // Already loaded
}
//shader_t& vertexShader = shaders[ vertexShaderIndex ];
//shader_t& fragmentShader = shaders[ fragmentShaderIndex ];
GLuint vertexProgID = ( vertexShaderIndex != -1 ) ? shaders[ vertexShaderIndex ].progId : INVALID_PROGID;
GLuint fragmentProgID = ( fragmentShaderIndex != -1 ) ? shaders[ fragmentShaderIndex ].progId : INVALID_PROGID;
const GLuint program = glCreateProgram();
if( program )
{
if( vertexProgID != INVALID_PROGID )
{
glAttachShader( program, vertexProgID );
}
if( fragmentProgID != INVALID_PROGID )
{
glAttachShader( program, fragmentProgID );
}
// bind vertex attribute locations
for( int i = 0; attribsPC[i].glsl != NULL; i++ )
{
if( ( attribsPC[i].flags & AT_VS_IN ) != 0 )
{
glBindAttribLocation( program, attribsPC[i].bind, attribsPC[i].glsl );
}
}
glLinkProgram( program );
int infologLength = 0;
glGetProgramiv( program, GL_INFO_LOG_LENGTH, &infologLength );
if( infologLength > 1 )
{
char* infoLog = ( char* )malloc( infologLength );
int charsWritten = 0;
glGetProgramInfoLog( program, infologLength, &charsWritten, infoLog );
// catch the strings the ATI and Intel drivers output on success
if( strstr( infoLog, "Vertex shader(s) linked, fragment shader(s) linked." ) != NULL || strstr( infoLog, "No errors." ) != NULL )
{
//idLib::Printf( "render prog %s from %s linked\n", GetName(), GetFileName() );
}
else
{
idLib::Printf( "While linking GLSL program %d with vertexShader %s and fragmentShader %s\n",
programIndex,
( vertexShaderIndex >= 0 ) ? shaders[vertexShaderIndex].name.c_str() : "<Invalid>",
( fragmentShaderIndex >= 0 ) ? shaders[ fragmentShaderIndex ].name.c_str() : "<Invalid>" );
idLib::Printf( "%s\n", infoLog );
}
free( infoLog );
}
}
int linked = GL_FALSE;
glGetProgramiv( program, GL_LINK_STATUS, &linked );
if( linked == GL_FALSE )
{
glDeleteProgram( program );
idLib::Error( "While linking GLSL program %d with vertexShader %s and fragmentShader %s\n",
programIndex,
( vertexShaderIndex >= 0 ) ? shaders[vertexShaderIndex].name.c_str() : "<Invalid>",
( fragmentShaderIndex >= 0 ) ? shaders[ fragmentShaderIndex ].name.c_str() : "<Invalid>" );
return;
}
//shaders[ vertexShaderIndex ].uniformArray = glGetUniformLocation( program, VERTEX_UNIFORM_ARRAY_NAME );
//shaders[ fragmentShaderIndex ].uniformArray = glGetUniformLocation( program, FRAGMENT_UNIFORM_ARRAY_NAME );
if( vertexShaderIndex > -1 && shaders[ vertexShaderIndex ].uniforms.Num() > 0 )
{
shader_t& vertexShader = shaders[ vertexShaderIndex ];
vertexShader.uniformArray = glGetUniformLocation( program, VERTEX_UNIFORM_ARRAY_NAME );
}
if( fragmentShaderIndex > -1 && shaders[ fragmentShaderIndex ].uniforms.Num() > 0 )
{
shader_t& fragmentShader = shaders[ fragmentShaderIndex ];
fragmentShader.uniformArray = glGetUniformLocation( program, FRAGMENT_UNIFORM_ARRAY_NAME );
}
assert( shaders[ vertexShaderIndex ].uniformArray != -1 || vertexShaderIndex > -1 || shaders[vertexShaderIndex].uniforms.Num() == 0 );
assert( shaders[ fragmentShaderIndex ].uniformArray != -1 || fragmentShaderIndex > -1 || shaders[fragmentShaderIndex].uniforms.Num() == 0 );
// RB: only load joint uniform buffers if available
if( glConfig.gpuSkinningAvailable )
{
// get the uniform buffer binding for skinning joint matrices
GLint blockIndex = glGetUniformBlockIndex( program, "matrices_ubo" );
if( blockIndex != -1 )
{
glUniformBlockBinding( program, blockIndex, 0 );
}
}
// RB end
// set the texture unit locations once for the render program. We only need to do this once since we only link the program once
glUseProgram( program );
int numSamplerUniforms = 0;
for( int i = 0; i < MAX_PROG_TEXTURE_PARMS; ++i )
{
GLint loc = glGetUniformLocation( program, va( "samp%d", i ) );
if( loc != -1 )
{
glUniform1i( loc, i );
numSamplerUniforms++;
}
}
idStr programName = shaders[ vertexShaderIndex ].name;
programName.StripFileExtension();
prog.name = programName;
prog.progId = program;
prog.fragmentShaderIndex = fragmentShaderIndex;
prog.vertexShaderIndex = vertexShaderIndex;
// RB: removed idStr::Icmp( name, "heatHaze.vfp" ) == 0 hack
// this requires r_useUniformArrays 1
for( int i = 0; i < shaders[vertexShaderIndex].uniforms.Num(); i++ )
{
if( shaders[vertexShaderIndex].uniforms[i] == RENDERPARM_ENABLE_SKINNING )
{
prog.usesJoints = true;
prog.optionalSkinning = true;
}
}
// RB end
}
/*
================================================================================================
idRenderProgManager::CommitUnforms
================================================================================================
*/
void idRenderProgManager::CommitUniforms( uint64 stateBits )
{
const int progID = current;
const renderProg_t& prog = renderProgs[progID];
//GL_CheckErrors();
ALIGNTYPE16 idVec4 localVectors[RENDERPARM_TOTAL];
auto commitarray = [&]( idVec4( &vectors )[ RENDERPARM_TOTAL ] , shader_t& shader )
{
const int numUniforms = shader.uniforms.Num();
if( shader.uniformArray != -1 && numUniforms > 0 )
{
int totalUniforms = 0;
for( int i = 0; i < numUniforms; ++i )
{
// RB: HACK rpShadowMatrices[6 * 4]
if( shader.uniforms[i] == RENDERPARM_SHADOW_MATRIX_0_X )
{
for( int j = 0; j < ( 6 * 4 ); j++ )
{
vectors[i + j] = uniforms[ shader.uniforms[i] + j];
totalUniforms++;
}
}
else
{
vectors[i] = uniforms[ shader.uniforms[i] ];
totalUniforms++;
}
}
glUniform4fv( shader.uniformArray, totalUniforms, localVectors->ToFloatPtr() );
}
};
if( prog.vertexShaderIndex >= 0 )
{
commitarray( localVectors, shaders[ prog.vertexShaderIndex ] );
}
if( prog.fragmentShaderIndex >= 0 )
{
commitarray( localVectors, shaders[ prog.fragmentShaderIndex ] );
}
//GL_CheckErrors();
}
/*
================================================================================================
idRenderProgManager::KillAllShaders()
================================================================================================
*/
void idRenderProgManager::KillAllShaders()
{
Unbind();
for( int i = 0; i < shaders.Num(); i++ )
{
if( shaders[i].progId != INVALID_PROGID )
{
glDeleteShader( shaders[i].progId );
shaders[i].progId = INVALID_PROGID;
}
}
for( int i = 0; i < renderProgs.Num(); ++i )
{
if( renderProgs[i].progId != INVALID_PROGID )
{
glDeleteProgram( renderProgs[i].progId );
renderProgs[i].progId = INVALID_PROGID;
}
}
}
/*
====================
idRenderBackend::ResizeImages
====================
*/
void idRenderBackend::ResizeImages()
{
// TODO resize framebuffers here
}

View file

@ -1,798 +0,0 @@
/*
===========================================================================
Doom 3 BFG Edition GPL Source Code
Copyright (C) 1993-2012 id Software LLC, a ZeniMax Media company.
Copyright (C) 2016-2017 Dustin Land
This file is part of the Doom 3 BFG Edition GPL Source Code ("Doom 3 BFG Edition Source Code").
Doom 3 BFG Edition Source Code is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Doom 3 BFG Edition Source Code is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Doom 3 BFG Edition Source Code. If not, see <http://www.gnu.org/licenses/>.
In addition, the Doom 3 BFG Edition Source Code is also subject to certain additional terms. You should have received a copy of these additional terms immediately following the terms and conditions of the GNU General Public License which accompanied the Doom 3 BFG Edition Source Code. If not, please request a copy in writing from id Software at the address below.
If you have questions concerning this license or the applicable additional terms, you may contact in writing id Software LLC, c/o ZeniMax Media Inc., Suite 120, Rockville, Maryland 20850 USA.
===========================================================================
*/
#pragma hdrstop
#include "precompiled.h"
#include "../RenderCommon.h"
#include "../RenderBackend.h"
#include "Allocator_VK.h"
idCVar r_vkDeviceLocalMemoryMB( "r_vkDeviceLocalMemoryMB", "256", CVAR_INTEGER | CVAR_INIT, "" );
idCVar r_vkHostVisibleMemoryMB( "r_vkHostVisibleMemoryMB", "64", CVAR_INTEGER | CVAR_INIT, "" );
static const char* memoryUsageStrings[ VULKAN_MEMORY_USAGES ] =
{
"VULKAN_MEMORY_USAGE_UNKNOWN",
"VULKAN_MEMORY_USAGE_GPU_ONLY",
"VULKAN_MEMORY_USAGE_CPU_ONLY",
"VULKAN_MEMORY_USAGE_CPU_TO_GPU",
"VULKAN_MEMORY_USAGE_GPU_TO_CPU",
};
static const char* allocationTypeStrings[ VULKAN_ALLOCATION_TYPES ] =
{
"VULKAN_ALLOCATION_TYPE_FREE",
"VULKAN_ALLOCATION_TYPE_BUFFER",
"VULKAN_ALLOCATION_TYPE_IMAGE",
"VULKAN_ALLOCATION_TYPE_IMAGE_LINEAR",
"VULKAN_ALLOCATION_TYPE_IMAGE_OPTIMAL",
};
/*
=============
FindMemoryTypeIndex
=============
*/
uint32 FindMemoryTypeIndex( const uint32 memoryTypeBits, const vulkanMemoryUsage_t usage )
{
VkPhysicalDeviceMemoryProperties& physicalMemoryProperties = vkcontext.gpu->memProps;
VkMemoryPropertyFlags required = 0;
VkMemoryPropertyFlags preferred = 0;
VkMemoryHeapFlags avoid = 0;
switch( usage )
{
case VULKAN_MEMORY_USAGE_GPU_ONLY:
preferred |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
break;
case VULKAN_MEMORY_USAGE_CPU_ONLY:
required |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT;
// SRS - Make sure memory type does not have VK_MEMORY_HEAP_MULTI_INSTANCE_BIT set, otherwise get validation errors when mapping memory
avoid |= VK_MEMORY_HEAP_MULTI_INSTANCE_BIT;
break;
case VULKAN_MEMORY_USAGE_CPU_TO_GPU:
required |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
preferred |= VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
// SRS - Make sure memory type does not have VK_MEMORY_HEAP_MULTI_INSTANCE_BIT set, otherwise get validation errors when mapping memory
avoid |= VK_MEMORY_HEAP_MULTI_INSTANCE_BIT;
break;
case VULKAN_MEMORY_USAGE_GPU_TO_CPU:
required |= VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
preferred |= VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT;
// SRS - Make sure memory type does not have VK_MEMORY_HEAP_MULTI_INSTANCE_BIT set, otherwise get validation errors when mapping memory
avoid |= VK_MEMORY_HEAP_MULTI_INSTANCE_BIT;
break;
default:
idLib::FatalError( "idVulkanAllocator::AllocateFromPools: Unknown memory usage." );
}
for( uint32 i = 0; i < physicalMemoryProperties.memoryTypeCount; ++i )
{
if( ( ( memoryTypeBits >> i ) & 1 ) == 0 )
{
continue;
}
// SRS - Make sure memory type does not have any avoid heap flags set
if( ( physicalMemoryProperties.memoryHeaps[ physicalMemoryProperties.memoryTypes[ i ].heapIndex ].flags & avoid ) != 0 )
{
continue;
}
const VkMemoryPropertyFlags properties = physicalMemoryProperties.memoryTypes[ i ].propertyFlags;
if( ( properties & required ) != required )
{
continue;
}
if( ( properties & preferred ) != preferred )
{
continue;
}
return i;
}
for( uint32 i = 0; i < physicalMemoryProperties.memoryTypeCount; ++i )
{
if( ( ( memoryTypeBits >> i ) & 1 ) == 0 )
{
continue;
}
// SRS - Make sure memory type does not have any avoid heap flags set
if( ( physicalMemoryProperties.memoryHeaps[ physicalMemoryProperties.memoryTypes[ i ].heapIndex ].flags & avoid ) != 0 )
{
continue;
}
const VkMemoryPropertyFlags properties = physicalMemoryProperties.memoryTypes[ i ].propertyFlags;
if( ( properties & required ) != required )
{
continue;
}
return i;
}
return UINT32_MAX;
}
/*
================================================================================================
idVulkanAllocator
================================================================================================
*/
/*
=============
idVulkanBlock::idVulkanBlock
=============
*/
idVulkanBlock::idVulkanBlock( const uint32 _memoryTypeIndex, const VkDeviceSize _size, vulkanMemoryUsage_t _usage ) :
nextBlockId( 0 ),
size( _size ),
allocated( 0 ),
memoryTypeIndex( _memoryTypeIndex ),
usage( _usage ),
deviceMemory( VK_NULL_HANDLE )
{
}
/*
=============
idVulkanBlock::idVulkanBlock
=============
*/
idVulkanBlock::~idVulkanBlock()
{
Shutdown();
}
/*
=============
idVulkanBlock::Init
=============
*/
bool idVulkanBlock::Init()
{
//SRS - Changed UINT64_MAX to UINT32_MAX for type consistency, otherwise test is always false
if( memoryTypeIndex == UINT32_MAX )
{
return false;
}
VkMemoryAllocateInfo memoryAllocateInfo = {};
memoryAllocateInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
memoryAllocateInfo.allocationSize = size;
memoryAllocateInfo.memoryTypeIndex = memoryTypeIndex;
ID_VK_CHECK( vkAllocateMemory( vkcontext.device, &memoryAllocateInfo, NULL, &deviceMemory ) )
if( deviceMemory == VK_NULL_HANDLE )
{
return false;
}
if( IsHostVisible() )
{
ID_VK_CHECK( vkMapMemory( vkcontext.device, deviceMemory, 0, size, 0, ( void** )&data ) );
}
head = new chunk_t();
head->id = nextBlockId++;
head->size = size;
head->offset = 0;
head->prev = NULL;
head->next = NULL;
head->type = VULKAN_ALLOCATION_TYPE_FREE;
return true;
}
/*
=============
idVulkanBlock::Shutdown
=============
*/
void idVulkanBlock::Shutdown()
{
// Unmap the memory
if( IsHostVisible() )
{
vkUnmapMemory( vkcontext.device, deviceMemory );
}
// Free the memory
vkFreeMemory( vkcontext.device, deviceMemory, NULL );
deviceMemory = VK_NULL_HANDLE;
chunk_t* prev = NULL;
chunk_t* current = head;
while( 1 )
{
if( current->next == NULL )
{
delete current;
break;
}
else
{
prev = current;
current = current->next;
delete prev;
}
}
head = NULL;
}
/*
=============
IsOnSamePage
Algorithm comes from the Vulkan 1.0.39 spec. "Buffer-Image Granularity"
Also known as "Linear-Optimal Granularity"
=============
*/
static bool IsOnSamePage(
VkDeviceSize rAOffset, VkDeviceSize rASize,
VkDeviceSize rBOffset, VkDeviceSize pageSize )
{
assert( rAOffset + rASize <= rBOffset && rASize > 0 && pageSize > 0 );
VkDeviceSize rAEnd = rAOffset + rASize - 1;
VkDeviceSize rAEndPage = rAEnd & ~( pageSize - 1 );
VkDeviceSize rBStart = rBOffset;
VkDeviceSize rBStartPage = rBStart & ~( pageSize - 1 );
return rAEndPage == rBStartPage;
}
/*
=============
HasGranularityConflict
Check that allocation types obey buffer image granularity.
=============
*/
static bool HasGranularityConflict( vulkanAllocationType_t type1, vulkanAllocationType_t type2 )
{
if( type1 > type2 )
{
SwapValues( type1, type2 );
}
switch( type1 )
{
case VULKAN_ALLOCATION_TYPE_FREE:
return false;
case VULKAN_ALLOCATION_TYPE_BUFFER:
return type2 == VULKAN_ALLOCATION_TYPE_IMAGE ||
type2 == VULKAN_ALLOCATION_TYPE_IMAGE_OPTIMAL;
case VULKAN_ALLOCATION_TYPE_IMAGE:
return type2 == VULKAN_ALLOCATION_TYPE_IMAGE ||
type2 == VULKAN_ALLOCATION_TYPE_IMAGE_LINEAR ||
type2 == VULKAN_ALLOCATION_TYPE_IMAGE_OPTIMAL;
case VULKAN_ALLOCATION_TYPE_IMAGE_LINEAR:
return type2 == VULKAN_ALLOCATION_TYPE_IMAGE_OPTIMAL;
case VULKAN_ALLOCATION_TYPE_IMAGE_OPTIMAL:
return false;
default:
assert( false );
return true;
}
}
/*
=============
idVulkanBlock::Allocate
=============
*/
bool idVulkanBlock::Allocate(
const uint32 _size,
const uint32 align,
const VkDeviceSize granularity,
const vulkanAllocationType_t allocType,
vulkanAllocation_t& allocation )
{
const VkDeviceSize freeSize = size - allocated;
if( freeSize < _size )
{
return false;
}
chunk_t* current = NULL;
chunk_t* bestFit = NULL;
chunk_t* previous = NULL;
VkDeviceSize padding = 0;
VkDeviceSize offset = 0;
VkDeviceSize alignedSize = 0;
for( current = head; current != NULL; previous = current, current = current->next )
{
if( current->type != VULKAN_ALLOCATION_TYPE_FREE )
{
continue;
}
if( _size > current->size )
{
continue;
}
offset = ALIGN( current->offset, align );
// Check for linear/optimal granularity conflict with previous allocation
if( previous != NULL && granularity > 1 )
{
if( IsOnSamePage( previous->offset, previous->size, offset, granularity ) )
{
if( HasGranularityConflict( previous->type, allocType ) )
{
offset = ALIGN( offset, granularity );
}
}
}
padding = offset - current->offset;
alignedSize = padding + _size;
if( alignedSize > current->size )
{
continue;
}
if( alignedSize + allocated >= size )
{
return false;
}
if( granularity > 1 && current->next != NULL )
{
chunk_t* next = current->next;
if( IsOnSamePage( offset, _size, next->offset, granularity ) )
{
if( HasGranularityConflict( allocType, next->type ) )
{
continue;
}
}
}
bestFit = current;
break;
}
if( bestFit == NULL )
{
return false;
}
if( bestFit->size > _size )
{
chunk_t* chunk = new chunk_t();
chunk_t* next = bestFit->next;
chunk->id = nextBlockId++;
chunk->prev = bestFit;
bestFit->next = chunk;
chunk->next = next;
if( next )
{
next->prev = chunk;
}
chunk->size = bestFit->size - alignedSize;
chunk->offset = offset + _size;
chunk->type = VULKAN_ALLOCATION_TYPE_FREE;
}
bestFit->type = allocType;
bestFit->size = _size;
allocated += alignedSize;
allocation.size = bestFit->size;
allocation.id = bestFit->id;
allocation.deviceMemory = deviceMemory;
if( IsHostVisible() )
{
allocation.data = data + offset;
}
allocation.offset = offset;
allocation.block = this;
return true;
}
/*
=============
idVulkanBlock::Free
=============
*/
void idVulkanBlock::Free( vulkanAllocation_t& allocation )
{
chunk_t* current = NULL;
for( current = head; current != NULL; current = current->next )
{
if( current->id == allocation.id )
{
break;
}
}
if( current == NULL )
{
idLib::Warning( "idVulkanBlock::Free: Tried to free an unknown allocation. %p - %u", this, allocation.id );
return;
}
current->type = VULKAN_ALLOCATION_TYPE_FREE;
if( current->prev && current->prev->type == VULKAN_ALLOCATION_TYPE_FREE )
{
chunk_t* prev = current->prev;
prev->next = current->next;
if( current->next )
{
current->next->prev = prev;
}
prev->size += current->size;
delete current;
current = prev;
}
if( current->next && current->next->type == VULKAN_ALLOCATION_TYPE_FREE )
{
chunk_t* next = current->next;
if( next->next )
{
next->next->prev = current;
}
current->next = next->next;
current->size += next->size;
delete next;
}
allocated -= allocation.size;
}
/*
=============
idVulkanBlock::Print
=============
*/
void idVulkanBlock::Print()
{
int count = 0;
for( chunk_t* current = head; current != NULL; current = current->next )
{
count++;
}
idLib::Printf( "Type Index: %u\n", memoryTypeIndex );
idLib::Printf( "Usage: %s\n", memoryUsageStrings[ usage ] );
idLib::Printf( "Count: %d\n", count );
// SRS - Changed %lu to %PRIu64 pre-defined macro to handle platform differences
idLib::Printf( "Size: %" PRIu64"\n", size );
idLib::Printf( "Allocated: %" PRIu64"\n", allocated );
idLib::Printf( "Next Block: %u\n", nextBlockId );
idLib::Printf( "------------------------\n" );
for( chunk_t* current = head; current != NULL; current = current->next )
{
idLib::Printf( "{\n" );
idLib::Printf( "\tId: %u\n", current->id );
// SRS - Changed %lu to %PRIu64 pre-defined macro to handle platform differences
idLib::Printf( "\tSize: %" PRIu64"\n", current->size );
idLib::Printf( "\tOffset: %" PRIu64"\n", current->offset );
idLib::Printf( "\tType: %s\n", allocationTypeStrings[ current->type ] );
idLib::Printf( "}\n" );
}
idLib::Printf( "\n" );
}
/*
================================================================================================
idVulkanAllocator
================================================================================================
*/
#if defined( USE_AMD_ALLOCATOR )
VmaAllocator vmaAllocator;
#else
idVulkanAllocator vulkanAllocator;
#endif
/*
=============
idVulkanAllocator::idVulkanAllocator
=============
*/
idVulkanAllocator::idVulkanAllocator() :
garbageIndex( 0 ),
deviceLocalMemoryBytes( 0 ),
hostVisibleMemoryBytes( 0 ),
bufferImageGranularity( 0 )
{
}
/*
=============
idVulkanAllocator::Init
=============
*/
void idVulkanAllocator::Init()
{
deviceLocalMemoryBytes = r_vkDeviceLocalMemoryMB.GetInteger() * 1024 * 1024;
hostVisibleMemoryBytes = r_vkHostVisibleMemoryMB.GetInteger() * 1024 * 1024;
bufferImageGranularity = vkcontext.gpu->props.limits.bufferImageGranularity;
}
/*
=============
idVulkanAllocator::Shutdown
=============
*/
void idVulkanAllocator::Shutdown()
{
EmptyGarbage();
for( int i = 0; i < VK_MAX_MEMORY_TYPES; ++i )
{
idList< idVulkanBlock* >& blocks = this->blocks[ i ];
const int numBlocks = blocks.Num();
for( int j = 0; j < numBlocks; ++j )
{
delete blocks[ j ];
}
blocks.Clear();
}
}
/*
=============
idVulkanAllocator::Allocate
=============
*/
vulkanAllocation_t idVulkanAllocator::Allocate(
const uint32 _size,
const uint32 align,
const uint32 memoryTypeBits,
const vulkanMemoryUsage_t usage,
const vulkanAllocationType_t allocType )
{
vulkanAllocation_t allocation;
uint32 memoryTypeIndex = FindMemoryTypeIndex( memoryTypeBits, usage );
if( memoryTypeIndex == UINT32_MAX )
{
idLib::FatalError( "idVulkanAllocator::Allocate: Unable to find a memoryTypeIndex for allocation request." );
}
idList< idVulkanBlock* >& blocks = this->blocks[ memoryTypeIndex ];
const int numBlocks = blocks.Num();
for( int i = 0; i < numBlocks; ++i )
{
idVulkanBlock* block = blocks[ i ];
if( block->memoryTypeIndex != memoryTypeIndex )
{
continue;
}
if( block->Allocate( _size, align, bufferImageGranularity, allocType, allocation ) )
{
return allocation;
}
}
VkDeviceSize blockSize = ( usage == VULKAN_MEMORY_USAGE_GPU_ONLY ) ? deviceLocalMemoryBytes : hostVisibleMemoryBytes;
idVulkanBlock* block = new idVulkanBlock( memoryTypeIndex, blockSize, usage );
if( block->Init() )
{
blocks.Append( block );
}
else
{
idLib::FatalError( "idVulkanAllocator::Allocate: Could not allocate new memory block." );
}
block->Allocate( _size, align, bufferImageGranularity, allocType, allocation );
return allocation;
}
/*
=============
idVulkanAllocator::Free
=============
*/
void idVulkanAllocator::Free( const vulkanAllocation_t allocation )
{
// SRS - Make sure we are trying to free an actual allocated block, otherwise skip
if( allocation.block != NULL )
{
garbage[ garbageIndex ].Append( allocation );
}
}
/*
=============
idVulkanAllocator::EmptyGarbage
=============
*/
void idVulkanAllocator::EmptyGarbage()
{
garbageIndex = ( garbageIndex + 1 ) % NUM_FRAME_DATA;
idList< vulkanAllocation_t >& garbage = this->garbage[ garbageIndex ];
const int numAllocations = garbage.Num();
for( int i = 0; i < numAllocations; ++i )
{
vulkanAllocation_t allocation = garbage[ i ];
allocation.block->Free( allocation );
if( allocation.block->allocated == 0 )
{
blocks[ allocation.block->memoryTypeIndex ].Remove( allocation.block );
delete allocation.block;
allocation.block = NULL;
}
}
garbage.Clear();
}
/*
=============
idVulkanAllocator::Print
=============
*/
void idVulkanAllocator::Print()
{
idLib::Printf( "Device Local MB: %d\n", int( deviceLocalMemoryBytes / 1024 * 1024 ) );
idLib::Printf( "Host Visible MB: %d\n", int( hostVisibleMemoryBytes / 1024 * 1024 ) );
// SRS - Changed %lu to %PRIu64 pre-defined macro to handle platform differences
idLib::Printf( "Buffer Granularity: %" PRIu64"\n", bufferImageGranularity );
idLib::Printf( "\n" );
for( int i = 0; i < VK_MAX_MEMORY_TYPES; ++i )
{
idList< idVulkanBlock* >& blocksByType = blocks[ i ];
const int numBlocks = blocksByType.Num();
for( int j = 0; j < numBlocks; ++j )
{
blocksByType[ j ]->Print();
}
}
}
CONSOLE_COMMAND( Vulkan_PrintHeapInfo, "Print out the heap information for this hardware.", 0 )
{
VkPhysicalDeviceMemoryProperties& props = vkcontext.gpu->memProps;
idLib::Printf( "Heaps %u\n------------------------\n", props.memoryHeapCount );
for( uint32 i = 0; i < props.memoryHeapCount; ++i )
{
VkMemoryHeap heap = props.memoryHeaps[ i ];
// SRS - Changed %lu to %PRIu64 pre-defined macro to handle platform differences
idLib::Printf( "id=%d, size=%" PRIu64", flags=", i, heap.size );
if( heap.flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT )
{
idLib::Printf( "DEVICE_LOCAL" );
}
else
{
idLib::Printf( "HOST_VISIBLE" );
}
if( heap.flags & VK_MEMORY_HEAP_MULTI_INSTANCE_BIT )
{
idLib::Printf( ", MULTI_INSTANCE" );
}
idLib::Printf( "\n" );
for( uint32 j = 0; j < props.memoryTypeCount; ++j )
{
VkMemoryType type = props.memoryTypes[ j ];
if( type.heapIndex != i )
{
continue;
}
idStr properties;
if( type.propertyFlags & VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT )
{
properties += "\tDEVICE_LOCAL\n";
}
if( type.propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT )
{
properties += "\tHOST_VISIBLE\n";
}
if( type.propertyFlags & VK_MEMORY_PROPERTY_HOST_COHERENT_BIT )
{
properties += "\tHOST_COHERENT\n";
}
if( type.propertyFlags & VK_MEMORY_PROPERTY_HOST_CACHED_BIT )
{
properties += "\tHOST_CACHED\n";
}
if( type.propertyFlags & VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT )
{
properties += "\tLAZILY_ALLOCATED\n";
}
if( properties.Length() > 0 )
{
idLib::Printf( "memory_type=%u\n", j );
idLib::Printf( "%s", properties.c_str() );
}
}
idLib::Printf( "\n" );
}
}
CONSOLE_COMMAND( Vulkan_PrintAllocations, "Print out all the current allocations.", 0 )
{
#if defined( USE_AMD_ALLOCATOR )
// TODO
#else
vulkanAllocator.Print();
#endif
}

View file

@ -1,175 +0,0 @@
/*
===========================================================================
Doom 3 BFG Edition GPL Source Code
Copyright (C) 1993-2012 id Software LLC, a ZeniMax Media company.
This file is part of the Doom 3 BFG Edition GPL Source Code ("Doom 3 BFG Edition Source Code").
Doom 3 BFG Edition Source Code is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Doom 3 BFG Edition Source Code is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Doom 3 BFG Edition Source Code. If not, see <http://www.gnu.org/licenses/>.
In addition, the Doom 3 BFG Edition Source Code is also subject to certain additional terms. You should have received a copy of these additional terms immediately following the terms and conditions of the GNU General Public License which accompanied the Doom 3 BFG Edition Source Code. If not, please request a copy in writing from id Software at the address below.
If you have questions concerning this license or the applicable additional terms, you may contact in writing id Software LLC, c/o ZeniMax Media Inc., Suite 120, Rockville, Maryland 20850 USA.
===========================================================================
*/
#ifndef __HEAP_VK_H__
#define __HEAP_VK_H__
enum vulkanMemoryUsage_t
{
VULKAN_MEMORY_USAGE_UNKNOWN,
VULKAN_MEMORY_USAGE_GPU_ONLY,
VULKAN_MEMORY_USAGE_CPU_ONLY,
VULKAN_MEMORY_USAGE_CPU_TO_GPU,
VULKAN_MEMORY_USAGE_GPU_TO_CPU,
VULKAN_MEMORY_USAGES,
};
enum vulkanAllocationType_t
{
VULKAN_ALLOCATION_TYPE_FREE,
VULKAN_ALLOCATION_TYPE_BUFFER,
VULKAN_ALLOCATION_TYPE_IMAGE,
VULKAN_ALLOCATION_TYPE_IMAGE_LINEAR,
VULKAN_ALLOCATION_TYPE_IMAGE_OPTIMAL,
VULKAN_ALLOCATION_TYPES,
};
uint32 FindMemoryTypeIndex( const uint32 memoryTypeBits, const vulkanMemoryUsage_t usage );
class idVulkanBlock;
struct vulkanAllocation_t
{
vulkanAllocation_t() :
block( NULL ),
id( 0 ),
deviceMemory( VK_NULL_HANDLE ),
offset( 0 ),
size( 0 ),
data( NULL )
{
}
idVulkanBlock* block;
uint32 id;
VkDeviceMemory deviceMemory;
VkDeviceSize offset;
VkDeviceSize size;
byte* data;
};
/*
================================================================================================
idVulkanBlock
================================================================================================
*/
class idVulkanBlock
{
friend class idVulkanAllocator;
public:
idVulkanBlock( const uint32 memoryTypeIndex, const VkDeviceSize size, vulkanMemoryUsage_t usage );
~idVulkanBlock();
bool Init();
void Shutdown();
bool IsHostVisible() const
{
return usage != VULKAN_MEMORY_USAGE_GPU_ONLY;
}
bool Allocate(
const uint32 size,
const uint32 align,
const VkDeviceSize granularity,
const vulkanAllocationType_t allocType,
vulkanAllocation_t& allocation );
void Free( vulkanAllocation_t& allocation );
void Print();
private:
struct chunk_t
{
uint32 id;
VkDeviceSize size;
VkDeviceSize offset;
chunk_t* prev;
chunk_t* next;
vulkanAllocationType_t type;
};
chunk_t* head;
uint32 nextBlockId;
uint32 memoryTypeIndex;
vulkanMemoryUsage_t usage;
VkDeviceMemory deviceMemory;
VkDeviceSize size;
VkDeviceSize allocated;
byte* data;
};
typedef idArray< idList< idVulkanBlock* >, VK_MAX_MEMORY_TYPES > idVulkanBlocks;
/*
================================================================================================
idVulkanAllocator
================================================================================================
*/
class idVulkanAllocator
{
public:
idVulkanAllocator();
void Init();
void Shutdown();
vulkanAllocation_t Allocate(
const uint32 size,
const uint32 align,
const uint32 memoryTypeBits,
const vulkanMemoryUsage_t usage,
const vulkanAllocationType_t allocType );
void Free( const vulkanAllocation_t allocation );
void EmptyGarbage();
void Print();
private:
int garbageIndex;
int deviceLocalMemoryBytes;
int hostVisibleMemoryBytes;
VkDeviceSize bufferImageGranularity;
idVulkanBlocks blocks;
idList<vulkanAllocation_t> garbage[ NUM_FRAME_DATA ];
};
#if defined( USE_AMD_ALLOCATOR )
extern VmaAllocator vmaAllocator;
#else
extern idVulkanAllocator vulkanAllocator;
#endif
#endif

View file

@ -1,836 +0,0 @@
/*
===========================================================================
Doom 3 BFG Edition GPL Source Code
Copyright (C) 1993-2012 id Software LLC, a ZeniMax Media company.
Copyright (C) 2016-2017 Dustin Land
This file is part of the Doom 3 BFG Edition GPL Source Code ("Doom 3 BFG Edition Source Code").
Doom 3 BFG Edition Source Code is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Doom 3 BFG Edition Source Code is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Doom 3 BFG Edition Source Code. If not, see <http://www.gnu.org/licenses/>.
In addition, the Doom 3 BFG Edition Source Code is also subject to certain additional terms. You should have received a copy of these additional terms immediately following the terms and conditions of the GNU General Public License which accompanied the Doom 3 BFG Edition Source Code. If not, please request a copy in writing from id Software at the address below.
If you have questions concerning this license or the applicable additional terms, you may contact in writing id Software LLC, c/o ZeniMax Media Inc., Suite 120, Rockville, Maryland 20850 USA.
===========================================================================
*/
#pragma hdrstop
#include "precompiled.h"
#include "../RenderCommon.h"
#include "../RenderBackend.h"
#include "../BufferObject.h"
#include "Staging_VK.h"
extern idCVar r_showBuffers;
/*
========================
UnbindBufferObjects
========================
*/
void UnbindBufferObjects()
{
}
/*
================================================================================================
idVertexBuffer
================================================================================================
*/
/*
========================
idVertexBuffer::idVertexBuffer
========================
*/
idVertexBuffer::idVertexBuffer()
{
SetUnmapped();
}
/*
========================
idVertexBuffer::AllocBufferObject
========================
*/
bool idVertexBuffer::AllocBufferObject( const void* data, int allocSize, bufferUsageType_t _usage )
{
assert( apiObject == VK_NULL_HANDLE );
assert_16_byte_aligned( data );
if( allocSize <= 0 )
{
idLib::Error( "idVertexBuffer::AllocBufferObject: allocSize = %i", allocSize );
}
size = allocSize;
usage = _usage;
bool allocationFailed = false;
int numBytes = GetAllocedSize();
VkBufferCreateInfo bufferCreateInfo = {};
bufferCreateInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
bufferCreateInfo.pNext = NULL;
bufferCreateInfo.size = numBytes;
bufferCreateInfo.usage = VK_BUFFER_USAGE_VERTEX_BUFFER_BIT;
bufferCreateInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
if( usage == BU_STATIC )
{
bufferCreateInfo.usage |= VK_BUFFER_USAGE_TRANSFER_DST_BIT;
}
#if defined( USE_AMD_ALLOCATOR )
VmaMemoryRequirements vmaReq = {};
if( usage == BU_STATIC )
{
vmaReq.usage = VMA_MEMORY_USAGE_GPU_ONLY;
}
else if( usage == BU_DYNAMIC )
{
#if defined(__APPLE__)
// SRS - VMA_MEMORY_USAGE_CPU_ONLY required for BU_DYNAMIC host coherency on OSX, otherwise black screen
vmaReq.usage = VMA_MEMORY_USAGE_CPU_ONLY;
#else
vmaReq.usage = VMA_MEMORY_USAGE_CPU_TO_GPU;
#endif
vmaReq.flags = VMA_MEMORY_REQUIREMENT_PERSISTENT_MAP_BIT;
}
ID_VK_CHECK( vmaCreateBuffer( vmaAllocator, &bufferCreateInfo, &vmaReq, &apiObject, &vmaAllocation, &allocation ) );
#else
VkResult ret = vkCreateBuffer( vkcontext.device, &bufferCreateInfo, NULL, &apiObject );
assert( ret == VK_SUCCESS );
VkMemoryRequirements memoryRequirements = {};
vkGetBufferMemoryRequirements( vkcontext.device, apiObject, &memoryRequirements );
#if defined(__APPLE__)
// SRS - VULKAN_MEMORY_USAGE_CPU_ONLY required for BU_DYNAMIC host coherency on OSX, otherwise black screen
vulkanMemoryUsage_t memUsage = ( usage == BU_STATIC ) ? VULKAN_MEMORY_USAGE_GPU_ONLY : VULKAN_MEMORY_USAGE_CPU_ONLY;
#else
vulkanMemoryUsage_t memUsage = ( usage == BU_STATIC ) ? VULKAN_MEMORY_USAGE_GPU_ONLY : VULKAN_MEMORY_USAGE_CPU_TO_GPU;
#endif
allocation = vulkanAllocator.Allocate(
memoryRequirements.size,
memoryRequirements.alignment,
memoryRequirements.memoryTypeBits,
memUsage,
VULKAN_ALLOCATION_TYPE_BUFFER );
ID_VK_CHECK( vkBindBufferMemory( vkcontext.device, apiObject, allocation.deviceMemory, allocation.offset ) );
#endif
if( r_showBuffers.GetBool() )
{
idLib::Printf( "vertex buffer alloc %p, (%i bytes)\n", this, GetSize() );
}
// copy the data
if( data != NULL )
{
Update( data, allocSize );
}
return !allocationFailed;
}
/*
========================
idVertexBuffer::FreeBufferObject
========================
*/
void idVertexBuffer::FreeBufferObject()
{
if( IsMapped() )
{
UnmapBuffer();
}
// if this is a sub-allocation inside a larger buffer, don't actually free anything.
if( OwnsBuffer() == false )
{
ClearWithoutFreeing();
return;
}
if( apiObject == VK_NULL_HANDLE )
{
return;
}
if( r_showBuffers.GetBool() )
{
idLib::Printf( "vertex buffer free %p, (%i bytes)\n", this, GetSize() );
}
if( apiObject != VK_NULL_HANDLE )
{
#if defined( USE_AMD_ALLOCATOR )
vmaDestroyBuffer( vmaAllocator, apiObject, vmaAllocation );
apiObject = VK_NULL_HANDLE;
allocation = VmaAllocationInfo();
vmaAllocation = NULL;
#else
vulkanAllocator.Free( allocation );
vkDestroyBuffer( vkcontext.device, apiObject, NULL );
apiObject = VK_NULL_HANDLE;
allocation = vulkanAllocation_t();
#endif
}
ClearWithoutFreeing();
}
/*
========================
idVertexBuffer::Update
========================
*/
void idVertexBuffer::Update( const void* data, int size, int offset ) const
{
assert( apiObject != VK_NULL_HANDLE );
assert_16_byte_aligned( data );
assert( ( GetOffset() & 15 ) == 0 );
if( size > GetSize() )
{
idLib::FatalError( "idVertexBuffer::Update: size overrun, %i > %i\n", size, GetSize() );
}
if( usage == BU_DYNAMIC )
{
CopyBuffer(
#if defined( USE_AMD_ALLOCATOR )
( byte* )allocation.pMappedData + GetOffset() + offset,
#else
allocation.data + GetOffset() + offset,
#endif
( const byte* )data, size );
}
else
{
VkBuffer stageBuffer;
VkCommandBuffer commandBuffer;
int stageOffset = 0;
byte* stageData = stagingManager.Stage( size, 1, commandBuffer, stageBuffer, stageOffset );
memcpy( stageData, data, size );
VkBufferCopy bufferCopy = {};
bufferCopy.srcOffset = stageOffset;
bufferCopy.dstOffset = GetOffset() + offset;
bufferCopy.size = size;
vkCmdCopyBuffer( commandBuffer, stageBuffer, apiObject, 1, &bufferCopy );
}
}
/*
========================
idVertexBuffer::MapBuffer
========================
*/
void* idVertexBuffer::MapBuffer( bufferMapType_t mapType )
{
assert( apiObject != VK_NULL_HANDLE );
if( usage == BU_STATIC )
{
idLib::FatalError( "idVertexBuffer::MapBuffer: Cannot map a buffer marked as BU_STATIC." );
}
#if defined( USE_AMD_ALLOCATOR )
void* buffer = ( byte* )allocation.pMappedData + GetOffset();
#else
void* buffer = allocation.data + GetOffset();
#endif
SetMapped();
if( buffer == NULL )
{
idLib::FatalError( "idVertexBuffer::MapBuffer: failed" );
}
return buffer;
}
/*
========================
idVertexBuffer::UnmapBuffer
========================
*/
void idVertexBuffer::UnmapBuffer()
{
assert( apiObject != VK_NULL_HANDLE );
if( usage == BU_STATIC )
{
idLib::FatalError( "idVertexBuffer::UnmapBuffer: Cannot unmap a buffer marked as BU_STATIC." );
}
SetUnmapped();
}
/*
========================
idVertexBuffer::ClearWithoutFreeing
========================
*/
void idVertexBuffer::ClearWithoutFreeing()
{
size = 0;
offsetInOtherBuffer = OWNS_BUFFER_FLAG;
apiObject = VK_NULL_HANDLE;
#if defined( USE_AMD_ALLOCATOR )
allocation = VmaAllocationInfo();
vmaAllocation = NULL;
#else
allocation.deviceMemory = VK_NULL_HANDLE;
#endif
}
/*
================================================================================================
idIndexBuffer
================================================================================================
*/
/*
========================
idIndexBuffer::idIndexBuffer
========================
*/
idIndexBuffer::idIndexBuffer()
{
SetUnmapped();
}
/*
========================
idIndexBuffer::AllocBufferObject
========================
*/
bool idIndexBuffer::AllocBufferObject( const void* data, int allocSize, bufferUsageType_t _usage )
{
assert( apiObject == VK_NULL_HANDLE );
assert_16_byte_aligned( data );
if( allocSize <= 0 )
{
idLib::Error( "idIndexBuffer::AllocBufferObject: allocSize = %i", allocSize );
}
size = allocSize;
usage = _usage;
bool allocationFailed = false;
int numBytes = GetAllocedSize();
VkBufferCreateInfo bufferCreateInfo = {};
bufferCreateInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
bufferCreateInfo.pNext = NULL;
bufferCreateInfo.size = numBytes;
bufferCreateInfo.usage = VK_BUFFER_USAGE_INDEX_BUFFER_BIT;
if( usage == BU_STATIC )
{
bufferCreateInfo.usage |= VK_BUFFER_USAGE_TRANSFER_DST_BIT;
}
#if defined( USE_AMD_ALLOCATOR )
VmaMemoryRequirements vmaReq = {};
if( usage == BU_STATIC )
{
vmaReq.usage = VMA_MEMORY_USAGE_GPU_ONLY;
}
else if( usage == BU_DYNAMIC )
{
#if defined(__APPLE__)
// SRS - VMA_MEMORY_USAGE_CPU_ONLY required for BU_DYNAMIC host coherency on OSX, otherwise black screen
vmaReq.usage = VMA_MEMORY_USAGE_CPU_ONLY;
#else
vmaReq.usage = VMA_MEMORY_USAGE_CPU_TO_GPU;
#endif
vmaReq.flags = VMA_MEMORY_REQUIREMENT_PERSISTENT_MAP_BIT;
}
ID_VK_CHECK( vmaCreateBuffer( vmaAllocator, &bufferCreateInfo, &vmaReq, &apiObject, &vmaAllocation, &allocation ) );
#else
VkResult ret = vkCreateBuffer( vkcontext.device, &bufferCreateInfo, NULL, &apiObject );
assert( ret == VK_SUCCESS );
VkMemoryRequirements memoryRequirements = {};
vkGetBufferMemoryRequirements( vkcontext.device, apiObject, &memoryRequirements );
#if defined(__APPLE__)
// SRS - VULKAN_MEMORY_USAGE_CPU_ONLY required for BU_DYNAMIC host coherency on OSX, otherwise black screen
vulkanMemoryUsage_t memUsage = ( usage == BU_STATIC ) ? VULKAN_MEMORY_USAGE_GPU_ONLY : VULKAN_MEMORY_USAGE_CPU_ONLY;
#else
vulkanMemoryUsage_t memUsage = ( usage == BU_STATIC ) ? VULKAN_MEMORY_USAGE_GPU_ONLY : VULKAN_MEMORY_USAGE_CPU_TO_GPU;
#endif
allocation = vulkanAllocator.Allocate(
memoryRequirements.size,
memoryRequirements.alignment,
memoryRequirements.memoryTypeBits,
memUsage,
VULKAN_ALLOCATION_TYPE_BUFFER );
ID_VK_CHECK( vkBindBufferMemory( vkcontext.device, apiObject, allocation.deviceMemory, allocation.offset ) );
#endif
if( r_showBuffers.GetBool() )
{
idLib::Printf( "index buffer alloc %p, (%i bytes)\n", this, GetSize() );
}
// copy the data
if( data != NULL )
{
Update( data, allocSize );
}
return !allocationFailed;
}
/*
========================
idIndexBuffer::FreeBufferObject
========================
*/
void idIndexBuffer::FreeBufferObject()
{
if( IsMapped() )
{
UnmapBuffer();
}
// if this is a sub-allocation inside a larger buffer, don't actually free anything.
if( OwnsBuffer() == false )
{
ClearWithoutFreeing();
return;
}
if( apiObject == VK_NULL_HANDLE )
{
return;
}
if( r_showBuffers.GetBool() )
{
idLib::Printf( "index buffer free %p, (%i bytes)\n", this, GetSize() );
}
if( apiObject != VK_NULL_HANDLE )
{
#if defined( USE_AMD_ALLOCATOR )
vmaDestroyBuffer( vmaAllocator, apiObject, vmaAllocation );
apiObject = VK_NULL_HANDLE;
allocation = VmaAllocationInfo();
vmaAllocation = NULL;
#else
vulkanAllocator.Free( allocation );
vkDestroyBuffer( vkcontext.device, apiObject, NULL );
apiObject = VK_NULL_HANDLE;
allocation = vulkanAllocation_t();
#endif
}
ClearWithoutFreeing();
}
/*
========================
idIndexBuffer::Update
========================
*/
void idIndexBuffer::Update( const void* data, int size, int offset ) const
{
assert( apiObject != VK_NULL_HANDLE );
assert_16_byte_aligned( data );
assert( ( GetOffset() & 15 ) == 0 );
if( size > GetSize() )
{
idLib::FatalError( "idIndexBuffer::Update: size overrun, %i > %i\n", size, GetSize() );
}
if( usage == BU_DYNAMIC )
{
CopyBuffer(
#if defined( USE_AMD_ALLOCATOR )
( byte* )allocation.pMappedData + GetOffset() + offset,
#else
allocation.data + GetOffset() + offset,
#endif
( const byte* )data, size );
}
else
{
VkBuffer stageBuffer;
VkCommandBuffer commandBuffer;
int stageOffset = 0;
byte* stageData = stagingManager.Stage( size, 1, commandBuffer, stageBuffer, stageOffset );
memcpy( stageData, data, size );
VkBufferCopy bufferCopy = {};
bufferCopy.srcOffset = stageOffset;
bufferCopy.dstOffset = GetOffset() + offset;
bufferCopy.size = size;
vkCmdCopyBuffer( commandBuffer, stageBuffer, apiObject, 1, &bufferCopy );
}
}
/*
========================
idIndexBuffer::MapBuffer
========================
*/
void* idIndexBuffer::MapBuffer( bufferMapType_t mapType )
{
assert( apiObject != VK_NULL_HANDLE );
if( usage == BU_STATIC )
{
idLib::FatalError( "idIndexBuffer::MapBuffer: Cannot map a buffer marked as BU_STATIC." );
}
#if defined( USE_AMD_ALLOCATOR )
void* buffer = ( byte* )allocation.pMappedData + GetOffset();
#else
void* buffer = allocation.data + GetOffset();
#endif
SetMapped();
if( buffer == NULL )
{
idLib::FatalError( "idIndexBuffer::MapBuffer: failed" );
}
return buffer;
}
/*
========================
idIndexBuffer::UnmapBuffer
========================
*/
void idIndexBuffer::UnmapBuffer()
{
assert( apiObject != VK_NULL_HANDLE );
if( usage == BU_STATIC )
{
idLib::FatalError( "idIndexBuffer::UnmapBuffer: Cannot unmap a buffer marked as BU_STATIC." );
}
SetUnmapped();
}
/*
========================
idIndexBuffer::ClearWithoutFreeing
========================
*/
void idIndexBuffer::ClearWithoutFreeing()
{
size = 0;
offsetInOtherBuffer = OWNS_BUFFER_FLAG;
apiObject = VK_NULL_HANDLE;
#if defined( USE_AMD_ALLOCATOR )
allocation = VmaAllocationInfo();
vmaAllocation = NULL;
#else
allocation.deviceMemory = VK_NULL_HANDLE;
#endif
}
/*
================================================================================================
idUniformBuffer
================================================================================================
*/
/*
========================
idUniformBuffer::idUniformBuffer
========================
*/
idUniformBuffer::idUniformBuffer()
{
usage = BU_DYNAMIC;
SetUnmapped();
}
/*
========================
idUniformBuffer::AllocBufferObject
========================
*/
bool idUniformBuffer::AllocBufferObject( const void* data, int allocSize, bufferUsageType_t _usage )
{
assert( apiObject == VK_NULL_HANDLE );
assert_16_byte_aligned( data );
if( allocSize <= 0 )
{
idLib::Error( "idUniformBuffer::AllocBufferObject: allocSize = %i", allocSize );
}
size = allocSize;
usage = _usage;
bool allocationFailed = false;
const int numBytes = GetAllocedSize();
VkBufferCreateInfo bufferCreateInfo = {};
bufferCreateInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
bufferCreateInfo.pNext = NULL;
bufferCreateInfo.size = numBytes;
bufferCreateInfo.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT;
if( usage == BU_STATIC )
{
bufferCreateInfo.usage |= VK_BUFFER_USAGE_TRANSFER_DST_BIT;
}
#if defined( USE_AMD_ALLOCATOR )
VmaMemoryRequirements vmaReq = {};
if( usage == BU_STATIC )
{
vmaReq.usage = VMA_MEMORY_USAGE_GPU_ONLY;
}
else if( usage == BU_DYNAMIC )
{
#if defined(__APPLE__)
// SRS - VMA_MEMORY_USAGE_CPU_ONLY required for BU_DYNAMIC host coherency on OSX, otherwise black screen
vmaReq.usage = VMA_MEMORY_USAGE_CPU_ONLY;
#else
vmaReq.usage = VMA_MEMORY_USAGE_CPU_TO_GPU;
#endif
vmaReq.flags = VMA_MEMORY_REQUIREMENT_PERSISTENT_MAP_BIT;
}
ID_VK_CHECK( vmaCreateBuffer( vmaAllocator, &bufferCreateInfo, &vmaReq, &apiObject, &vmaAllocation, &allocation ) );
#else
VkResult ret = vkCreateBuffer( vkcontext.device, &bufferCreateInfo, NULL, &apiObject );
assert( ret == VK_SUCCESS );
VkMemoryRequirements memoryRequirements = {};
vkGetBufferMemoryRequirements( vkcontext.device, apiObject, &memoryRequirements );
#if defined(__APPLE__)
// SRS - VULKAN_MEMORY_USAGE_CPU_ONLY required for BU_DYNAMIC host coherency on OSX, otherwise black screen
vulkanMemoryUsage_t memUsage = ( usage == BU_STATIC ) ? VULKAN_MEMORY_USAGE_GPU_ONLY : VULKAN_MEMORY_USAGE_CPU_ONLY;
#else
vulkanMemoryUsage_t memUsage = ( usage == BU_STATIC ) ? VULKAN_MEMORY_USAGE_GPU_ONLY : VULKAN_MEMORY_USAGE_CPU_TO_GPU;
#endif
allocation = vulkanAllocator.Allocate(
memoryRequirements.size,
memoryRequirements.alignment,
memoryRequirements.memoryTypeBits,
memUsage,
VULKAN_ALLOCATION_TYPE_BUFFER );
ID_VK_CHECK( vkBindBufferMemory( vkcontext.device, apiObject, allocation.deviceMemory, allocation.offset ) );
#endif
if( r_showBuffers.GetBool() )
{
idLib::Printf( "joint buffer alloc %p, (%i bytes)\n", this, GetSize() );
}
// copy the data
if( data != NULL )
{
Update( data, allocSize );
}
return !allocationFailed;
}
/*
========================
idUniformBuffer::FreeBufferObject
========================
*/
void idUniformBuffer::FreeBufferObject()
{
if( IsMapped() )
{
UnmapBuffer();
}
// if this is a sub-allocation inside a larger buffer, don't actually free anything.
if( OwnsBuffer() == false )
{
ClearWithoutFreeing();
return;
}
if( apiObject == VK_NULL_HANDLE )
{
return;
}
if( r_showBuffers.GetBool() )
{
idLib::Printf( "joint buffer free %p, (%i bytes)\n", this, GetSize() );
}
if( apiObject != VK_NULL_HANDLE )
{
#if defined( USE_AMD_ALLOCATOR )
vmaDestroyBuffer( vmaAllocator, apiObject, vmaAllocation );
apiObject = VK_NULL_HANDLE;
allocation = VmaAllocationInfo();
vmaAllocation = NULL;
#else
vulkanAllocator.Free( allocation );
vkDestroyBuffer( vkcontext.device, apiObject, NULL );
apiObject = VK_NULL_HANDLE;
allocation = vulkanAllocation_t();
#endif
}
ClearWithoutFreeing();
}
/*
========================
idUniformBuffer::Update
========================
*/
void idUniformBuffer::Update( const void* data, int size, int offset ) const
{
assert( apiObject != VK_NULL_HANDLE );
assert_16_byte_aligned( data );
assert( ( GetOffset() & 15 ) == 0 );
if( size > GetSize() )
{
idLib::FatalError( "idUniformBuffer::Update: size overrun, %i > %i\n", size, size );
}
if( usage == BU_DYNAMIC )
{
CopyBuffer(
#if defined( USE_AMD_ALLOCATOR )
( byte* )allocation.pMappedData + GetOffset() + offset,
#else
allocation.data + GetOffset() + offset,
#endif
( const byte* )data, size );
}
else
{
VkBuffer stageBuffer;
VkCommandBuffer commandBuffer;
int stageOffset = 0;
byte* stageData = stagingManager.Stage( size, 1, commandBuffer, stageBuffer, stageOffset );
memcpy( stageData, data, size );
VkBufferCopy bufferCopy = {};
bufferCopy.srcOffset = stageOffset;
bufferCopy.dstOffset = GetOffset() + offset;
bufferCopy.size = size;
vkCmdCopyBuffer( commandBuffer, stageBuffer, apiObject, 1, &bufferCopy );
}
}
/*
========================
idUniformBuffer::MapBuffer
========================
*/
void* idUniformBuffer::MapBuffer( bufferMapType_t mapType )
{
assert( mapType == BM_WRITE );
assert( apiObject != VK_NULL_HANDLE );
if( usage == BU_STATIC )
{
idLib::FatalError( "idUniformBuffer::MapBuffer: Cannot map a buffer marked as BU_STATIC." );
}
#if defined( USE_AMD_ALLOCATOR )
void* buffer = ( byte* )allocation.pMappedData + GetOffset();
#else
void* buffer = allocation.data + GetOffset();
#endif
SetMapped();
if( buffer == NULL )
{
idLib::FatalError( "idUniformBuffer::MapBuffer: failed" );
}
return buffer;
}
/*
========================
idUniformBuffer::UnmapBuffer
========================
*/
void idUniformBuffer::UnmapBuffer()
{
assert( apiObject != VK_NULL_HANDLE );
if( usage == BU_STATIC )
{
idLib::FatalError( "idUniformBuffer::UnmapBuffer: Cannot unmap a buffer marked as BU_STATIC." );
}
SetUnmapped();
}
/*
========================
idUniformBuffer::ClearWithoutFreeing
========================
*/
void idUniformBuffer::ClearWithoutFreeing()
{
size = 0;
offsetInOtherBuffer = OWNS_BUFFER_FLAG;
apiObject = VK_NULL_HANDLE;
#if defined( USE_AMD_ALLOCATOR )
allocation = VmaAllocationInfo();
vmaAllocation = NULL;
#else
allocation.deviceMemory = VK_NULL_HANDLE;
#endif
}

View file

@ -1,899 +0,0 @@
/*
===========================================================================
Doom 3 BFG Edition GPL Source Code
Copyright (C) 1993-2012 id Software LLC, a ZeniMax Media company.
Copyright (C) 2013-2021 Robert Beckebans
Copyright (C) 2016-2017 Dustin Land
This file is part of the Doom 3 BFG Edition GPL Source Code ("Doom 3 BFG Edition Source Code").
Doom 3 BFG Edition Source Code is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Doom 3 BFG Edition Source Code is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Doom 3 BFG Edition Source Code. If not, see <http://www.gnu.org/licenses/>.
In addition, the Doom 3 BFG Edition Source Code is also subject to certain additional terms. You should have received a copy of these additional terms immediately following the terms and conditions of the GNU General Public License which accompanied the Doom 3 BFG Edition Source Code. If not, please request a copy in writing from id Software at the address below.
If you have questions concerning this license or the applicable additional terms, you may contact in writing id Software LLC, c/o ZeniMax Media Inc., Suite 120, Rockville, Maryland 20850 USA.
===========================================================================
*/
#pragma hdrstop
#include "precompiled.h"
//#include "../../libs/mesa/format_r11g11b10f.h"
/*
================================================================================================
Contains the Image implementation for Vulkan
================================================================================================
*/
#include "../RenderCommon.h"
#include "Staging_VK.h"
int idImage::garbageIndex = 0;
#if defined( USE_AMD_ALLOCATOR )
idList< VmaAllocation > idImage::allocationGarbage[ NUM_FRAME_DATA ];
#else
idList< vulkanAllocation_t > idImage::allocationGarbage[ NUM_FRAME_DATA ];
#endif
idList< VkImage > idImage::imageGarbage[ NUM_FRAME_DATA ];
idList< VkImageView > idImage::viewGarbage[ NUM_FRAME_DATA ];
idList< VkSampler > idImage::samplerGarbage[ NUM_FRAME_DATA ];
/*
====================
VK_GetFormatFromTextureFormat
====================
*/
static VkFormat VK_GetFormatFromTextureFormat( const textureFormat_t format )
{
switch( format )
{
case FMT_RGBA8:
return VK_FORMAT_R8G8B8A8_UNORM;
case FMT_XRGB8:
return VK_FORMAT_R8G8B8_UNORM;
case FMT_ALPHA:
return VK_FORMAT_R8_UNORM;
case FMT_L8A8:
return VK_FORMAT_R8G8_UNORM;
case FMT_LUM8:
return VK_FORMAT_R8_UNORM;
case FMT_INT8:
return VK_FORMAT_R8_UNORM;
case FMT_DXT1:
return VK_FORMAT_BC1_RGB_UNORM_BLOCK;
case FMT_DXT5:
return VK_FORMAT_BC3_UNORM_BLOCK;
case FMT_DEPTH:
case FMT_DEPTH_STENCIL:
return vkcontext.depthFormat;
case FMT_X16:
return VK_FORMAT_R16_UNORM;
case FMT_Y16_X16:
return VK_FORMAT_R16G16_UNORM;
case FMT_RGB565:
return VK_FORMAT_R5G6B5_UNORM_PACK16;
// RB begin
//case FMT_ETC1_RGB8_OES, // 4 bpp
//case FMT_SHADOW_ARRAY: // 32 bpp * 6
// return VK_FORMAT_
case FMT_RG16F:
return VK_FORMAT_R16G16_SFLOAT;
// we might want to use UNORM instead of SFLOAT
// however this is intended to be used for the HDR lights buffer which should be allowed to go beyond 1.0
case FMT_RGBA16F:
return VK_FORMAT_R16G16B16A16_SFLOAT;
case FMT_RGBA32F:
return VK_FORMAT_R32G32B32A32_SFLOAT;
case FMT_R32F:
return VK_FORMAT_R32_SFLOAT;
case FMT_R11G11B10F:
return VK_FORMAT_B10G11R11_UFLOAT_PACK32;
// RB end
default:
return VK_FORMAT_UNDEFINED;
}
}
/*
====================
VK_GetComponentMappingFromTextureFormat
====================
*/
static VkComponentMapping VK_GetComponentMappingFromTextureFormat( const textureFormat_t format, textureColor_t color )
{
VkComponentMapping componentMapping =
{
VK_COMPONENT_SWIZZLE_ZERO,
VK_COMPONENT_SWIZZLE_ZERO,
VK_COMPONENT_SWIZZLE_ZERO,
VK_COMPONENT_SWIZZLE_ZERO
};
if( color == CFM_GREEN_ALPHA )
{
componentMapping.r = VK_COMPONENT_SWIZZLE_ONE;
componentMapping.g = VK_COMPONENT_SWIZZLE_ONE;
componentMapping.b = VK_COMPONENT_SWIZZLE_ONE;
componentMapping.a = VK_COMPONENT_SWIZZLE_G;
return componentMapping;
}
switch( format )
{
case FMT_LUM8:
componentMapping.r = VK_COMPONENT_SWIZZLE_R;
componentMapping.g = VK_COMPONENT_SWIZZLE_R;
componentMapping.b = VK_COMPONENT_SWIZZLE_R;
componentMapping.a = VK_COMPONENT_SWIZZLE_ONE;
break;
case FMT_L8A8:
componentMapping.r = VK_COMPONENT_SWIZZLE_R;
componentMapping.g = VK_COMPONENT_SWIZZLE_R;
componentMapping.b = VK_COMPONENT_SWIZZLE_R;
componentMapping.a = VK_COMPONENT_SWIZZLE_G;
break;
case FMT_ALPHA:
componentMapping.r = VK_COMPONENT_SWIZZLE_ONE;
componentMapping.g = VK_COMPONENT_SWIZZLE_ONE;
componentMapping.b = VK_COMPONENT_SWIZZLE_ONE;
componentMapping.a = VK_COMPONENT_SWIZZLE_R;
break;
case FMT_INT8:
componentMapping.r = VK_COMPONENT_SWIZZLE_R;
componentMapping.g = VK_COMPONENT_SWIZZLE_R;
componentMapping.b = VK_COMPONENT_SWIZZLE_R;
componentMapping.a = VK_COMPONENT_SWIZZLE_R;
break;
case FMT_R11G11B10F:
componentMapping.r = VK_COMPONENT_SWIZZLE_R;
componentMapping.g = VK_COMPONENT_SWIZZLE_G;
componentMapping.b = VK_COMPONENT_SWIZZLE_B;
componentMapping.a = VK_COMPONENT_SWIZZLE_ONE;
break;
default:
componentMapping.r = VK_COMPONENT_SWIZZLE_R;
componentMapping.g = VK_COMPONENT_SWIZZLE_G;
componentMapping.b = VK_COMPONENT_SWIZZLE_B;
componentMapping.a = VK_COMPONENT_SWIZZLE_A;
break;
}
return componentMapping;
}
/*
====================
idImage::idImage
====================
*/
idImage::idImage( const char* name ) : imgName( name )
{
// Vulkan specific
bIsSwapChainImage = false;
internalFormat = VK_FORMAT_UNDEFINED;
image = VK_NULL_HANDLE;
view = VK_NULL_HANDLE;
layout = VK_IMAGE_LAYOUT_GENERAL;
sampler = VK_NULL_HANDLE;
generatorFunction = NULL;
filter = TF_DEFAULT;
repeat = TR_REPEAT;
usage = TD_DEFAULT;
cubeFiles = CF_2D;
referencedOutsideLevelLoad = false;
levelLoadReferenced = false;
defaulted = false;
sourceFileTime = FILE_NOT_FOUND_TIMESTAMP;
binaryFileTime = FILE_NOT_FOUND_TIMESTAMP;
refCount = 0;
}
/*
====================
idImage::~idImage
====================
*/
idImage::~idImage()
{
if( !bIsSwapChainImage )
{
PurgeImage();
}
}
/*
====================
idImage::IsLoaded
====================
*/
bool idImage::IsLoaded() const
{
return image != VK_NULL_HANDLE; // TODO_VK maybe do something better than this.
}
/*
====================
idImage::CreateSampler
====================
*/
void idImage::CreateSampler()
{
VkSamplerCreateInfo createInfo = {};
createInfo.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO;
createInfo.maxAnisotropy = 1.0f;
createInfo.anisotropyEnable = VK_FALSE;
createInfo.compareEnable = ( opts.format == FMT_DEPTH );
createInfo.compareOp = ( opts.format == FMT_DEPTH ) ? VK_COMPARE_OP_LESS_OR_EQUAL : VK_COMPARE_OP_NEVER;
// RB: support textureLod
createInfo.minLod = 0.0f;
createInfo.maxLod = opts.numLevels;
switch( filter )
{
case TF_DEFAULT:
createInfo.minFilter = VK_FILTER_LINEAR;
createInfo.magFilter = VK_FILTER_LINEAR;
createInfo.mipmapMode = VK_SAMPLER_MIPMAP_MODE_LINEAR;
// RB: enable anisotropic filtering
if( r_maxAnisotropicFiltering.GetInteger() > 0 )
{
createInfo.anisotropyEnable = VK_TRUE;
createInfo.maxAnisotropy = Min( r_maxAnisotropicFiltering.GetFloat(), vkcontext.gpu->props.limits.maxSamplerAnisotropy );
}
break;
case TF_LINEAR:
createInfo.minFilter = VK_FILTER_LINEAR;
createInfo.magFilter = VK_FILTER_LINEAR;
createInfo.mipmapMode = VK_SAMPLER_MIPMAP_MODE_LINEAR;
break;
case TF_NEAREST:
createInfo.minFilter = VK_FILTER_NEAREST;
createInfo.magFilter = VK_FILTER_NEAREST;
createInfo.mipmapMode = VK_SAMPLER_MIPMAP_MODE_NEAREST;
break;
// RB:
case TF_NEAREST_MIPMAP:
createInfo.minFilter = VK_FILTER_NEAREST;
createInfo.magFilter = VK_FILTER_NEAREST;
createInfo.mipmapMode = VK_SAMPLER_MIPMAP_MODE_LINEAR;
break;
default:
idLib::FatalError( "idImage::CreateSampler: unrecognized texture filter %d", filter );
}
switch( repeat )
{
case TR_REPEAT:
createInfo.addressModeU = VK_SAMPLER_ADDRESS_MODE_REPEAT;
createInfo.addressModeV = VK_SAMPLER_ADDRESS_MODE_REPEAT;
createInfo.addressModeW = VK_SAMPLER_ADDRESS_MODE_REPEAT;
break;
case TR_CLAMP:
createInfo.addressModeU = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE;
createInfo.addressModeV = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE;
createInfo.addressModeW = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE;
break;
case TR_CLAMP_TO_ZERO_ALPHA:
createInfo.borderColor = VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK;
createInfo.addressModeU = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER;
createInfo.addressModeV = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER;
createInfo.addressModeW = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER;
break;
case TR_CLAMP_TO_ZERO:
createInfo.borderColor = VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK;
createInfo.addressModeU = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER;
createInfo.addressModeV = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER;
createInfo.addressModeW = VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER;
break;
default:
idLib::FatalError( "idImage::CreateSampler: unrecognized texture repeat mode %d", repeat );
}
ID_VK_CHECK( vkCreateSampler( vkcontext.device, &createInfo, NULL, &sampler ) );
}
/*
====================
idImage::EmptyGarbage
====================
*/
void idImage::EmptyGarbage()
{
garbageIndex = ( garbageIndex + 1 ) % NUM_FRAME_DATA;
#if defined( USE_AMD_ALLOCATOR )
idList< VmaAllocation >& allocationsToFree = allocationGarbage[ garbageIndex ];
#else
idList< vulkanAllocation_t >& allocationsToFree = allocationGarbage[ garbageIndex ];
#endif
idList< VkImage >& imagesToFree = imageGarbage[ garbageIndex ];
idList< VkImageView >& viewsToFree = viewGarbage[ garbageIndex ];
idList< VkSampler >& samplersToFree = samplerGarbage[ garbageIndex ];
#if defined( USE_AMD_ALLOCATOR )
const int numAllocations = allocationsToFree.Num();
for( int i = 0; i < numAllocations; ++i )
{
vmaDestroyImage( vmaAllocator, imagesToFree[ i ], allocationsToFree[ i ] );
}
#else
const int numAllocations = allocationsToFree.Num();
for( int i = 0; i < numAllocations; ++i )
{
vulkanAllocator.Free( allocationsToFree[ i ] );
}
const int numImages = imagesToFree.Num();
for( int i = 0; i < numImages; ++i )
{
vkDestroyImage( vkcontext.device, imagesToFree[ i ], NULL );
}
#endif
const int numViews = viewsToFree.Num();
for( int i = 0; i < numViews; ++i )
{
vkDestroyImageView( vkcontext.device, viewsToFree[ i ], NULL );
}
const int numSamplers = samplersToFree.Num();
for( int i = 0; i < numSamplers; ++i )
{
vkDestroySampler( vkcontext.device, samplersToFree[ i ], NULL );
}
allocationsToFree.Clear();
imagesToFree.Clear();
viewsToFree.Clear();
samplersToFree.Clear();
}
/*
==============
Bind
Automatically enables 2D mapping or cube mapping if needed
==============
*/
void idImage::Bind()
{
RENDERLOG_PRINTF( "GL_BindTexture( %s )\n", GetName() );
vkcontext.imageParms[ vkcontext.currentImageParm ] = this;
}
/*
====================
CopyFramebuffer
====================
*/
void idImage::CopyFramebuffer( int x, int y, int imageWidth, int imageHeight )
{
#if 0
VkCommandBuffer commandBuffer = vkcontext.commandBuffer[ vkcontext.frameParity ];
vkCmdEndRenderPass( commandBuffer );
VkImageMemoryBarrier dstBarrier = {};
dstBarrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
dstBarrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
dstBarrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
dstBarrier.image = GetImage();
dstBarrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
dstBarrier.subresourceRange.baseMipLevel = 0;
dstBarrier.subresourceRange.levelCount = 1;
dstBarrier.subresourceRange.baseArrayLayer = 0;
dstBarrier.subresourceRange.layerCount = 1;
// Pre copy transitions
{
// Transition the color dst image so we can transfer to it.
dstBarrier.oldLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
dstBarrier.newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
dstBarrier.srcAccessMask = VK_ACCESS_SHADER_READ_BIT;
dstBarrier.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
vkCmdPipelineBarrier(
commandBuffer,
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
VK_PIPELINE_STAGE_TRANSFER_BIT,
0, 0, NULL, 0, NULL, 1, &dstBarrier );
}
// Perform the blit/copy
{
VkImageBlit region = {};
region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
region.srcSubresource.baseArrayLayer = 0;
region.srcSubresource.mipLevel = 0;
region.srcSubresource.layerCount = 1;
region.srcOffsets[ 1 ] = { imageWidth, imageHeight, 1 };
region.dstSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
region.dstSubresource.baseArrayLayer = 0;
region.dstSubresource.mipLevel = 0;
region.dstSubresource.layerCount = 1;
region.dstOffsets[ 1 ] = { imageWidth, imageHeight, 1 };
vkCmdBlitImage(
commandBuffer,
vkcontext.swapchainImages[ vkcontext.currentSwapIndex ], VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
GetImage(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1, &region, VK_FILTER_NEAREST );
}
// Post copy transitions
{
// Transition the color dst image so we can transfer to it.
dstBarrier.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
dstBarrier.newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
dstBarrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
dstBarrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
vkCmdPipelineBarrier(
commandBuffer,
VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,
0, 0, NULL, 0, NULL, 1, &dstBarrier );
}
VkRenderPassBeginInfo renderPassBeginInfo = {};
renderPassBeginInfo.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
renderPassBeginInfo.renderPass = vkcontext.renderPass;
renderPassBeginInfo.framebuffer = vkcontext.frameBuffers[ vkcontext.currentSwapIndex ];
renderPassBeginInfo.renderArea.extent = vkcontext.swapchainExtent;
vkCmdBeginRenderPass( commandBuffer, &renderPassBeginInfo, VK_SUBPASS_CONTENTS_INLINE );
#endif
}
/*
====================
CopyDepthbuffer
====================
*/
void idImage::CopyDepthbuffer( int x, int y, int imageWidth, int imageHeight )
{
}
/*
========================
idImage::SetTexParameters
========================
*/
void idImage::SetTexParameters()
{
}
/*
====================
idImage::SetSamplerState
====================
*/
void idImage::SetSamplerState( textureFilter_t filter, textureRepeat_t repeat )
{
}
/*
========================
idImage::AllocImage
Every image will pass through this function. Allocates all the necessary MipMap levels for the
Image, but doesn't put anything in them.
This should not be done during normal game-play, if you can avoid it.
========================
*/
void idImage::AllocImage()
{
PurgeImage();
internalFormat = VK_GetFormatFromTextureFormat( opts.format );
// Create Sampler
CreateSampler();
VkImageUsageFlags usageFlags = VK_IMAGE_USAGE_SAMPLED_BIT;
if( opts.format == FMT_DEPTH )
{
usageFlags |= VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT;
}
else
{
usageFlags |= VK_IMAGE_USAGE_TRANSFER_DST_BIT;
}
// Create Image
VkImageCreateInfo imageCreateInfo = {};
imageCreateInfo.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
imageCreateInfo.flags = ( opts.textureType == TT_CUBIC ) ? VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT : 0;
imageCreateInfo.imageType = VK_IMAGE_TYPE_2D;
imageCreateInfo.format = internalFormat;
imageCreateInfo.extent.width = opts.width;
imageCreateInfo.extent.height = opts.height;
imageCreateInfo.extent.depth = 1;
imageCreateInfo.mipLevels = opts.numLevels;
imageCreateInfo.arrayLayers = ( opts.textureType == TT_CUBIC ) ? 6 : 1;
imageCreateInfo.samples = static_cast< VkSampleCountFlagBits >( opts.samples );
imageCreateInfo.tiling = VK_IMAGE_TILING_OPTIMAL;
imageCreateInfo.usage = usageFlags;
imageCreateInfo.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
imageCreateInfo.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
#if defined( USE_AMD_ALLOCATOR )
VmaMemoryRequirements vmaReq = {};
vmaReq.usage = VMA_MEMORY_USAGE_GPU_ONLY;
ID_VK_CHECK( vmaCreateImage( vmaAllocator, &imageCreateInfo, &vmaReq, &image, &allocation, NULL ) );
#else
ID_VK_CHECK( vkCreateImage( vkcontext.device, &imageCreateInfo, NULL, &image ) );
VkMemoryRequirements memoryRequirements;
vkGetImageMemoryRequirements( vkcontext.device, image, &memoryRequirements );
allocation = vulkanAllocator.Allocate(
memoryRequirements.size,
memoryRequirements.alignment,
memoryRequirements.memoryTypeBits,
VULKAN_MEMORY_USAGE_GPU_ONLY,
VULKAN_ALLOCATION_TYPE_IMAGE_OPTIMAL );
ID_VK_CHECK( vkBindImageMemory( vkcontext.device, image, allocation.deviceMemory, allocation.offset ) );
#endif
// Eric: disable for now to clean the terminal output
// idLib::Printf( "Vulkan Image alloc '%s': %p\n", GetName(), image );
// Create Image View
VkImageViewCreateInfo viewCreateInfo = {};
viewCreateInfo.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO;
viewCreateInfo.image = image;
viewCreateInfo.viewType = ( opts.textureType == TT_CUBIC ) ? VK_IMAGE_VIEW_TYPE_CUBE : VK_IMAGE_VIEW_TYPE_2D;
viewCreateInfo.format = internalFormat;
viewCreateInfo.components = VK_GetComponentMappingFromTextureFormat( opts.format, opts.colorFormat );
// SRS - Added FMT_DEPTH_STENCIL case
viewCreateInfo.subresourceRange.aspectMask = ( opts.format == FMT_DEPTH || opts.format == FMT_DEPTH_STENCIL ) ? VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT : VK_IMAGE_ASPECT_COLOR_BIT;
viewCreateInfo.subresourceRange.levelCount = opts.numLevels;
viewCreateInfo.subresourceRange.layerCount = ( opts.textureType == TT_CUBIC ) ? 6 : 1;
viewCreateInfo.subresourceRange.baseMipLevel = 0;
ID_VK_CHECK( vkCreateImageView( vkcontext.device, &viewCreateInfo, NULL, &view ) );
}
/*
====================
idImage::PurgeImage
====================
*/
void idImage::PurgeImage()
{
if( sampler != VK_NULL_HANDLE )
{
samplerGarbage[ garbageIndex ].Append( sampler );
sampler = VK_NULL_HANDLE;
}
if( image != VK_NULL_HANDLE )
{
allocationGarbage[ garbageIndex ].Append( allocation );
viewGarbage[ garbageIndex ].Append( view );
imageGarbage[ garbageIndex ].Append( image );
#if defined( USE_AMD_ALLOCATOR )
allocation = NULL;
#else
allocation = vulkanAllocation_t();
#endif
view = VK_NULL_HANDLE;
image = VK_NULL_HANDLE;
}
}
/*
========================
idImage::Resize
========================
*/
void idImage::Resize( int width, int height )
{
}
/*
====================
idImage::SubImageUpload
====================
*/
void idImage::SubImageUpload( int mipLevel, int x, int y, int z, int width, int height, const void* pic, int pixelPitch )
{
assert( x >= 0 && y >= 0 && mipLevel >= 0 && width >= 0 && height >= 0 && mipLevel < opts.numLevels );
// SRS - Calculate buffer size without changing original width and height dimensions for compressed images
int bufferW = width;
int bufferH = height;
if( IsCompressed() )
{
bufferW = ( width + 3 ) & ~3;
bufferH = ( height + 3 ) & ~3;
}
int size = bufferW * bufferH * BitsForFormat( opts.format ) / 8;
// SRS end
VkBuffer buffer;
VkCommandBuffer commandBuffer;
int offset = 0;
byte* data = stagingManager.Stage( size, 16, commandBuffer, buffer, offset );
if( opts.format == FMT_RGB565 )
{
byte* imgData = ( byte* )pic;
for( int i = 0; i < size; i += 2 )
{
data[ i ] = imgData[ i + 1 ];
data[ i + 1 ] = imgData[ i ];
}
}
#if 0
else if( opts.format == FMT_R11G11B10F )
{
// convert R11G11B10F to RGBA8 for testing
byte* imgData = ( byte* )pic;
for( int i = 0; i < size; i += 4 )
{
// unpack RGBA8 to 3 floats
union
{
uint32 i;
byte b[4];
} tmp;
tmp.b[0] = imgData[ i + 0 ];
tmp.b[1] = imgData[ i + 1 ];
tmp.b[2] = imgData[ i + 2 ];
tmp.b[3] = imgData[ i + 3 ];
float hdr[3];
r11g11b10f_to_float3( tmp.i, hdr );
// tonemap
hdr[0] = hdr[0] / ( hdr[0] + 1.0f );
hdr[1] = hdr[1] / ( hdr[1] + 1.0f );
hdr[2] = hdr[2] / ( hdr[2] + 1.0f );
// tonemapped to LDR
data[ i + 0 ] = byte( hdr[0] * 255 );
data[ i + 1 ] = byte( hdr[1] * 255 );
data[ i + 2 ] = byte( hdr[2] * 255 );
data[ i + 3 ] = 255;
}
}
#endif
#if defined(__APPLE__) && defined(USE_BINKDEC)
else if( opts.format == FMT_LUM8 && ( imgName == "_cinematicCr" || imgName == "_cinematicCb" ) )
{
// SRS - When decoding YUV420 cinematics on OSX, copy and duplicate individual rows of half-height chroma planes into full-height planes
// This works around a stall that occurs with half-height planes when exiting levels or after demo playback (possible issue in MoltenVK??)
// ***IMPORTANT - Assumes that SubImageUpload() has been called with half-width and full-height parameters and a packed pic buffer ***
byte* imgData = ( byte* )pic;
int evenRow;
for( int i = 0; i < size / 2; i++ )
{
evenRow = ( i / width ) * 2;
data[ evenRow * width + i % width ] = imgData[ i ]; // SRS - Copy image data into even-numbered rows of new chroma plane
data[( evenRow + 1 ) * width + i % width ] = imgData[ i ]; // SRS - Duplicate image data into odd-numbered rows of new chroma plane
}
}
#endif
else
{
memcpy( data, pic, size );
}
VkBufferImageCopy imgCopy = {};
imgCopy.bufferOffset = offset;
imgCopy.bufferRowLength = pixelPitch;
imgCopy.bufferImageHeight = bufferH; // SRS - Use buffer height vs. image height to avoid vulkan errors for compressed images
imgCopy.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
imgCopy.imageSubresource.layerCount = 1;
imgCopy.imageSubresource.mipLevel = mipLevel;
imgCopy.imageSubresource.baseArrayLayer = z;
imgCopy.imageOffset.x = x;
imgCopy.imageOffset.y = y;
imgCopy.imageOffset.z = 0;
imgCopy.imageExtent.width = width; // SRS - Always use original width and height dimensions, even when image is compressed
imgCopy.imageExtent.height = height;
imgCopy.imageExtent.depth = 1;
VkImageMemoryBarrier barrier = {};
barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
barrier.image = image;
barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
barrier.subresourceRange.baseMipLevel = 0;
barrier.subresourceRange.levelCount = opts.numLevels;
barrier.subresourceRange.baseArrayLayer = z;
barrier.subresourceRange.layerCount = 1;
barrier.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED;
barrier.newLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
barrier.srcAccessMask = 0;
barrier.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
vkCmdPipelineBarrier( commandBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0, 0, NULL, 0, NULL, 1, &barrier );
vkCmdCopyBufferToImage( commandBuffer, buffer, image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &imgCopy );
barrier.oldLayout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
barrier.newLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
vkCmdPipelineBarrier( commandBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, 0, 0, NULL, 0, NULL, 1, &barrier );
layout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
}
// SRS - added method to set image layout
/*
====================
idImage::SetImageLayout
====================
*/
void idImage::SetImageLayout( VkImage image, VkImageSubresourceRange subresourceRange, VkImageLayout oldImageLayout, VkImageLayout newImageLayout )
{
VkBuffer buffer;
VkCommandBuffer commandBuffer;
int size = 0;
int offset = 0;
byte* data = stagingManager.Stage( size, 16, commandBuffer, buffer, offset );
VkImageMemoryBarrier barrier = {};
barrier.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER;
barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED;
barrier.image = image;
barrier.subresourceRange = subresourceRange;
barrier.oldLayout = oldImageLayout;
barrier.newLayout = newImageLayout;
// Source layouts (old)
// Source access mask controls actions that have to be finished on the old layout before it will be transitioned to the new layout
switch( oldImageLayout )
{
case VK_IMAGE_LAYOUT_UNDEFINED:
// Image layout is undefined (or does not matter)
// Only valid as initial layout
// No flags required, listed only for completeness
barrier.srcAccessMask = 0;
break;
case VK_IMAGE_LAYOUT_PREINITIALIZED:
// Image is preinitialized
// Only valid as initial layout for linear images, preserves memory contents
// Make sure host writes have been finished
barrier.srcAccessMask = VK_ACCESS_HOST_WRITE_BIT;
break;
case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
// Image is a color attachment
// Make sure any writes to the color buffer have been finished
barrier.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
break;
case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
// Image is a depth/stencil attachment
// Make sure any writes to the depth/stencil buffer have been finished
barrier.srcAccessMask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
break;
case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
// Image is a transfer source
// Make sure any reads from the image have been finished
barrier.srcAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
break;
case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
// Image is a transfer destination
// Make sure any writes to the image have been finished
barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
break;
case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
// Image is read by a shader
// Make sure any shader reads from the image have been finished
barrier.srcAccessMask = VK_ACCESS_SHADER_READ_BIT;
break;
default:
// Other source layouts aren't handled (yet)
break;
}
// Target layouts (new)
// Destination access mask controls the dependency for the new image layout
switch( newImageLayout )
{
case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
// Image will be used as a transfer destination
// Make sure any writes to the image have been finished
barrier.dstAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
break;
case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
// Image will be used as a transfer source
// Make sure any reads from the image have been finished
barrier.dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT;
break;
case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
// Image will be used as a color attachment
// Make sure any writes to the color buffer have been finished
barrier.dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT;
break;
case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
// Image layout will be used as a depth/stencil attachment
// Make sure any writes to depth/stencil buffer have been finished
barrier.dstAccessMask = VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT;
break;
case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
// Image will be read in a shader (sampler, input attachment)
// Make sure any writes to the image have been finished
if( barrier.srcAccessMask == 0 )
{
barrier.srcAccessMask = VK_ACCESS_HOST_WRITE_BIT | VK_ACCESS_TRANSFER_WRITE_BIT;
}
barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
break;
default:
// Other destination layouts aren't handled (yet)
break;
}
vkCmdPipelineBarrier( commandBuffer, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, 0, 0, NULL, 0, NULL, 1, &barrier );
}
// SRS End

File diff suppressed because it is too large Load diff

View file

@ -1,715 +0,0 @@
/*
===========================================================================
Doom 3 BFG Edition GPL Source Code
Copyright (C) 1993-2012 id Software LLC, a ZeniMax Media company.
Copyright (C) 2014-2016 Robert Beckebans
Copyright (C) 2014-2016 Kot in Action Creative Artel
Copyright (C) 2016-2017 Dustin Land
This file is part of the Doom 3 BFG Edition GPL Source Code ("Doom 3 BFG Edition Source Code").
Doom 3 BFG Edition Source Code is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Doom 3 BFG Edition Source Code is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Doom 3 BFG Edition Source Code. If not, see <http://www.gnu.org/licenses/>.
In addition, the Doom 3 BFG Edition Source Code is also subject to certain additional terms. You should have received a copy of these additional terms immediately following the terms and conditions of the GNU General Public License which accompanied the Doom 3 BFG Edition Source Code. If not, please request a copy in writing from id Software at the address below.
If you have questions concerning this license or the applicable additional terms, you may contact in writing id Software LLC, c/o ZeniMax Media Inc., Suite 120, Rockville, Maryland 20850 USA.
===========================================================================
*/
#include "precompiled.h"
#pragma hdrstop
#include "../RenderCommon.h"
/*
================
idRenderBackend::DBG_SimpleSurfaceSetup
================
*/
void idRenderBackend::DBG_SimpleSurfaceSetup( const drawSurf_t* drawSurf )
{
}
/*
================
idRenderBackend::DBG_SimpleWorldSetup
================
*/
void idRenderBackend::DBG_SimpleWorldSetup()
{
}
/*
=================
idRenderBackend::DBG_PolygonClear
This will cover the entire screen with normal rasterization.
Texturing is disabled, but the existing glColor, glDepthMask,
glColorMask, and the enabled state of depth buffering and
stenciling will matter.
=================
*/
void idRenderBackend::DBG_PolygonClear()
{
}
/*
====================
idRenderBackend::DBG_ShowDestinationAlpha
====================
*/
void idRenderBackend::DBG_ShowDestinationAlpha()
{
}
/*
===================
idRenderBackend::DBG_ScanStencilBuffer
Debugging tool to see what values are in the stencil buffer
===================
*/
void idRenderBackend::DBG_ScanStencilBuffer()
{
}
/*
===================
idRenderBackend::DBG_CountStencilBuffer
Print an overdraw count based on stencil index values
===================
*/
void idRenderBackend::DBG_CountStencilBuffer()
{
}
/*
===================
idRenderBackend::DBG_ColorByStencilBuffer
Sets the screen colors based on the contents of the
stencil buffer. Stencil of 0 = black, 1 = red, 2 = green,
3 = blue, ..., 7+ = white
===================
*/
void idRenderBackend::DBG_ColorByStencilBuffer()
{
}
/*
==================
idRenderBackend::DBG_ShowOverdraw
==================
*/
void idRenderBackend::DBG_ShowOverdraw()
{
}
/*
===================
idRenderBackend::DBG_ShowIntensity
Debugging tool to see how much dynamic range a scene is using.
The greatest of the rgb values at each pixel will be used, with
the resulting color shading from red at 0 to green at 128 to blue at 255
===================
*/
void idRenderBackend::DBG_ShowIntensity()
{
}
/*
===================
idRenderBackend::DBG_ShowDepthBuffer
Draw the depth buffer as colors
===================
*/
void idRenderBackend::DBG_ShowDepthBuffer()
{
}
/*
=================
idRenderBackend::DBG_ShowLightCount
This is a debugging tool that will draw each surface with a color
based on how many lights are effecting it
=================
*/
void idRenderBackend::DBG_ShowLightCount()
{
}
/*
====================
idRenderBackend::DBG_RenderDrawSurfListWithFunction
The triangle functions can check backEnd.currentSpace != surf->space
to see if they need to perform any new matrix setup. The modelview
matrix will already have been loaded, and backEnd.currentSpace will
be updated after the triangle function completes.
====================
*/
void idRenderBackend::DBG_RenderDrawSurfListWithFunction( drawSurf_t** drawSurfs, int numDrawSurfs )
{
}
/*
=================
idRenderBackend::DBG_ShowSilhouette
Blacks out all edges, then adds color for each edge that a shadow
plane extends from, allowing you to see doubled edges
FIXME: not thread safe!
=================
*/
void idRenderBackend::DBG_ShowSilhouette()
{
}
/*
=====================
idRenderBackend::DBG_ShowTris
Debugging tool
=====================
*/
void idRenderBackend::DBG_ShowTris( drawSurf_t** drawSurfs, int numDrawSurfs )
{
}
/*
=====================
idRenderBackend::DBG_ShowSurfaceInfo
Debugging tool
=====================
*/
void idRenderSystemLocal::OnFrame()
{
}
void idRenderBackend::DBG_ShowSurfaceInfo( drawSurf_t** drawSurfs, int numDrawSurfs )
{
}
/*
=====================
idRenderBackend::DBG_ShowViewEntitys
Debugging tool
=====================
*/
void idRenderBackend::DBG_ShowViewEntitys( viewEntity_t* vModels )
{
}
/*
=====================
idRenderBackend::DBG_ShowTexturePolarity
Shade triangle red if they have a positive texture area
green if they have a negative texture area, or blue if degenerate area
=====================
*/
void idRenderBackend::DBG_ShowTexturePolarity( drawSurf_t** drawSurfs, int numDrawSurfs )
{
}
/*
=====================
idRenderBackend::DBG_ShowUnsmoothedTangents
Shade materials that are using unsmoothed tangents
=====================
*/
void idRenderBackend::DBG_ShowUnsmoothedTangents( drawSurf_t** drawSurfs, int numDrawSurfs )
{
}
/*
=====================
RB_ShowTangentSpace
Shade a triangle by the RGB colors of its tangent space
1 = tangents[0]
2 = tangents[1]
3 = normal
=====================
*/
void idRenderBackend::DBG_ShowTangentSpace( drawSurf_t** drawSurfs, int numDrawSurfs )
{
}
/*
=====================
idRenderBackend::DBG_ShowVertexColor
Draw each triangle with the solid vertex colors
=====================
*/
void idRenderBackend::DBG_ShowVertexColor( drawSurf_t** drawSurfs, int numDrawSurfs )
{
}
/*
=====================
idRenderBackend::DBG_ShowNormals
Debugging tool
=====================
*/
void idRenderBackend::DBG_ShowNormals( drawSurf_t** drawSurfs, int numDrawSurfs )
{
}
/*
=====================
idRenderBackend::DBG_ShowTextureVectors
Draw texture vectors in the center of each triangle
=====================
*/
void idRenderBackend::DBG_ShowTextureVectors( drawSurf_t** drawSurfs, int numDrawSurfs )
{
}
/*
=====================
idRenderBackend::DBG_ShowDominantTris
Draw lines from each vertex to the dominant triangle center
=====================
*/
void idRenderBackend::DBG_ShowDominantTris( drawSurf_t** drawSurfs, int numDrawSurfs )
{
}
/*
=====================
idRenderBackend::DBG_ShowEdges
Debugging tool
=====================
*/
void idRenderBackend::DBG_ShowEdges( drawSurf_t** drawSurfs, int numDrawSurfs )
{
}
/*
==============
RB_ShowLights
Visualize all light volumes used in the current scene
r_showLights 1 : just print volumes numbers, highlighting ones covering the view
r_showLights 2 : also draw planes of each volume
r_showLights 3 : also draw edges of each volume
==============
*/
void idRenderBackend::DBG_ShowLights()
{
}
// RB begin
void idRenderBackend::DBG_ShowShadowMapLODs()
{
}
// RB end
/*
=====================
idRenderBackend::DBG_ShowPortals
Debugging tool, won't work correctly with SMP or when mirrors are present
=====================
*/
void idRenderBackend::DBG_ShowPortals()
{
}
/*
================
idRenderBackend::DBG_ClearDebugText
================
*/
void RB_ClearDebugText( int time )
{
}
/*
================
RB_AddDebugText
================
*/
void RB_AddDebugText( const char* text, const idVec3& origin, float scale, const idVec4& color, const idMat3& viewAxis, const int align, const int lifetime, const bool depthTest )
{
}
/*
================
RB_DrawTextLength
returns the length of the given text
================
*/
float RB_DrawTextLength( const char* text, float scale, int len )
{
return 0;
}
/*
================
RB_DrawText
oriented on the viewaxis
align can be 0-left, 1-center (default), 2-right
================
*/
void RB_DrawText( const char* text, const idVec3& origin, float scale, const idVec4& color, const idMat3& viewAxis, const int align )
{
}
/*
================
idRenderBackend::DBG_ShowDebugText
================
*/
void idRenderBackend::DBG_ShowDebugText()
{
}
/*
================
RB_ClearDebugLines
================
*/
void RB_ClearDebugLines( int time )
{
}
/*
================
RB_AddDebugLine
================
*/
void RB_AddDebugLine( const idVec4& color, const idVec3& start, const idVec3& end, const int lifeTime, const bool depthTest )
{
}
/*
================
idRenderBackend::DBG_ShowDebugLines
================
*/
void idRenderBackend::DBG_ShowDebugLines()
{
}
/*
================
RB_ClearDebugPolygons
================
*/
void RB_ClearDebugPolygons( int time )
{
}
/*
================
RB_AddDebugPolygon
================
*/
void RB_AddDebugPolygon( const idVec4& color, const idWinding& winding, const int lifeTime, const bool depthTest )
{
}
/*
================
idRenderBackend::DBG_ShowDebugPolygons
================
*/
void idRenderBackend::DBG_ShowDebugPolygons()
{
}
/*
================
idRenderBackend::DBG_ShowCenterOfProjection
================
*/
void idRenderBackend::DBG_ShowCenterOfProjection()
{
}
/*
================
idRenderBackend::DBG_ShowLines
Draw exact pixel lines to check pixel center sampling
================
*/
void idRenderBackend::DBG_ShowLines()
{
}
/*
================
idRenderBackend::DBG_TestGamma
================
*/
void idRenderBackend::DBG_TestGamma()
{
}
/*
==================
idRenderBackend::DBG_TestGammaBias
==================
*/
void idRenderBackend::DBG_TestGammaBias()
{
}
/*
================
idRenderBackend::DBG_TestImage
Display a single image over most of the screen
================
*/
void idRenderBackend::DBG_TestImage()
{
idImage* image = NULL;
idImage* imageCr = NULL;
idImage* imageCb = NULL;
int max;
float w, h;
image = tr.testImage;
if( !image )
{
return;
}
if( tr.testVideo )
{
cinData_t cin;
// SRS - Don't need calibrated time for testing cinematics, so just call ImageForTime() with current system time
// This simplification allows cinematic test playback to work over both 2D and 3D background scenes
cin = tr.testVideo->ImageForTime( Sys_Milliseconds() /*viewDef->renderView.time[1] - tr.testVideoStartTime*/ );
if( cin.imageY != NULL )
{
image = cin.imageY;
imageCr = cin.imageCr;
imageCb = cin.imageCb;
}
// SRS - Also handle ffmpeg and original RoQ decoders for test videos (using cin.image)
else if( cin.image != NULL )
{
image = cin.image;
}
else
{
tr.testImage = NULL;
return;
}
w = 0.25;
h = 0.25;
}
else
{
max = image->GetUploadWidth() > image->GetUploadHeight() ? image->GetUploadWidth() : image->GetUploadHeight();
w = 0.25 * image->GetUploadWidth() / max;
h = 0.25 * image->GetUploadHeight() / max;
w *= ( float )renderSystem->GetHeight() / renderSystem->GetWidth();
}
// Set State
GL_State( GLS_DEPTHFUNC_ALWAYS | GLS_CULL_TWOSIDED | GLS_SRCBLEND_ONE | GLS_DSTBLEND_ZERO );
// Set Parms
float texS[4] = { 1.0f, 0.0f, 0.0f, 0.0f };
float texT[4] = { 0.0f, 1.0f, 0.0f, 0.0f };
renderProgManager.SetRenderParm( RENDERPARM_TEXTUREMATRIX_S, texS );
renderProgManager.SetRenderParm( RENDERPARM_TEXTUREMATRIX_T, texT );
float texGenEnabled[4] = { 0, 0, 0, 0 };
renderProgManager.SetRenderParm( RENDERPARM_TEXGEN_0_ENABLED, texGenEnabled );
// not really necessary but just for clarity
const float screenWidth = 1.0f;
const float screenHeight = 1.0f;
const float halfScreenWidth = screenWidth * 0.5f;
const float halfScreenHeight = screenHeight * 0.5f;
float scale[16] = { 0 };
scale[0] = w; // scale
scale[5] = h; // scale
scale[12] = halfScreenWidth - ( halfScreenWidth * w ); // translate
scale[13] = halfScreenHeight / 2 - ( halfScreenHeight * h ); // translate (SRS - center of console dropdown)
scale[10] = 1.0f;
scale[15] = 1.0f;
// RB: orthographic projection is changed for Vulkan
float ortho[16] = { 0 };
ortho[0] = 2.0f / screenWidth;
ortho[5] = 2.0f / screenHeight;
ortho[10] = -1.0f;
ortho[12] = -1.0f;
ortho[13] = -1.0f;
ortho[14] = 0.0f;
ortho[15] = 1.0f;
float finalOrtho[16];
R_MatrixMultiply( scale, ortho, finalOrtho );
float projMatrixTranspose[16];
R_MatrixTranspose( finalOrtho, projMatrixTranspose );
renderProgManager.SetRenderParms( RENDERPARM_MVPMATRIX_X, projMatrixTranspose, 4 );
// Set Color
GL_Color( 1, 1, 1, 1 );
// Bind the Texture
if( ( imageCr != NULL ) && ( imageCb != NULL ) )
{
GL_SelectTexture( 0 );
image->Bind();
GL_SelectTexture( 1 );
imageCr->Bind();
GL_SelectTexture( 2 );
imageCb->Bind();
// SRS - Use Bink shader with no sRGB to linear conversion, otherwise cinematic colours may be wrong
// BindShader_BinkGUI() does not seem to work here - perhaps due to vertex shader input dependencies?
renderProgManager.BindShader_Bink_sRGB();
}
else
{
GL_SelectTexture( 0 );
image->Bind();
renderProgManager.BindShader_Texture();
}
// Draw!
DrawElementsWithCounters( &testImageSurface );
}
// RB begin
void idRenderBackend::DBG_ShowShadowMaps()
{
}
// RB end
/*
=================
RB_DrawExpandedTriangles
=================
*/
static void RB_DrawExpandedTriangles( const srfTriangles_t* tri, const float radius, const idVec3& vieworg )
{
}
/*
================
idRenderBackend::DBG_ShowTrace
Debug visualization
FIXME: not thread safe!
================
*/
void idRenderBackend::DBG_ShowTrace( drawSurf_t** drawSurfs, int numDrawSurfs )
{
}
/*
=================
idRenderBackend::DBG_RenderDebugTools
=================
*/
void idRenderBackend::DBG_RenderDebugTools( drawSurf_t** drawSurfs, int numDrawSurfs )
{
if( viewDef->renderView.rdflags & RDF_IRRADIANCE )
{
return;
}
// don't do much if this was a 2D rendering
if( !viewDef->viewEntitys )
{
DBG_TestImage();
DBG_ShowLines();
return;
}
// TODO
}
/*
=================
RB_ShutdownDebugTools
=================
*/
void RB_ShutdownDebugTools()
{
}

File diff suppressed because it is too large Load diff

View file

@ -1,283 +0,0 @@
/*
===========================================================================
Doom 3 BFG Edition GPL Source Code
Copyright (C) 1993-2012 id Software LLC, a ZeniMax Media company.
Copyright (C) 2016-2017 Dustin Land
This file is part of the Doom 3 BFG Edition GPL Source Code ("Doom 3 BFG Edition Source Code").
Doom 3 BFG Edition Source Code is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Doom 3 BFG Edition Source Code is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Doom 3 BFG Edition Source Code. If not, see <http://www.gnu.org/licenses/>.
In addition, the Doom 3 BFG Edition Source Code is also subject to certain additional terms. You should have received a copy of these additional terms immediately following the terms and conditions of the GNU General Public License which accompanied the Doom 3 BFG Edition Source Code. If not, please request a copy in writing from id Software at the address below.
If you have questions concerning this license or the applicable additional terms, you may contact in writing id Software LLC, c/o ZeniMax Media Inc., Suite 120, Rockville, Maryland 20850 USA.
===========================================================================
*/
#pragma hdrstop
#include "precompiled.h"
#include "../RenderCommon.h"
#include "../RenderBackend.h"
#include "Staging_VK.h"
idCVar r_vkUploadBufferSizeMB( "r_vkUploadBufferSizeMB", "64", CVAR_INTEGER | CVAR_INIT, "Size of gpu upload buffer." );
idCVar r_vkStagingMaxCommands( "r_vkStagingMaxCommands", "-1", CVAR_INTEGER | CVAR_INIT, "Maximum amount of commands staged (-1 for no limit)" );
/*
===========================================================================
idVulkanStagingManager
===========================================================================
*/
idVulkanStagingManager stagingManager;
/*
=============
idVulkanStagingManager::idVulkanStagingManager
=============
*/
idVulkanStagingManager::idVulkanStagingManager() :
maxBufferSize( 0 ),
currentBuffer( 0 ),
mappedData( NULL ),
memory( VK_NULL_HANDLE ),
commandPool( VK_NULL_HANDLE )
{
}
/*
=============
idVulkanStagingManager::~idVulkanStagingManager
=============
*/
idVulkanStagingManager::~idVulkanStagingManager()
{
}
/*
=============
idVulkanStagingManager::Init
=============
*/
void idVulkanStagingManager::Init()
{
maxBufferSize = ( size_t )( r_vkUploadBufferSizeMB.GetInteger() * 1024 * 1024 );
VkBufferCreateInfo bufferCreateInfo = {};
bufferCreateInfo.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO;
bufferCreateInfo.size = maxBufferSize;
bufferCreateInfo.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
for( int i = 0; i < NUM_FRAME_DATA; ++i )
{
buffers[ i ].offset = 0;
ID_VK_CHECK( vkCreateBuffer( vkcontext.device, &bufferCreateInfo, NULL, &buffers[ i ].buffer ) );
}
VkMemoryRequirements memoryRequirements;
vkGetBufferMemoryRequirements( vkcontext.device, buffers[ 0 ].buffer, &memoryRequirements );
const VkDeviceSize alignMod = memoryRequirements.size % memoryRequirements.alignment;
const VkDeviceSize alignedSize = ( alignMod == 0 ) ? memoryRequirements.size : ( memoryRequirements.size + memoryRequirements.alignment - alignMod );
VkMemoryAllocateInfo memoryAllocateInfo = {};
memoryAllocateInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO;
memoryAllocateInfo.allocationSize = alignedSize * NUM_FRAME_DATA;
memoryAllocateInfo.memoryTypeIndex = FindMemoryTypeIndex( memoryRequirements.memoryTypeBits, VULKAN_MEMORY_USAGE_CPU_TO_GPU );
ID_VK_CHECK( vkAllocateMemory( vkcontext.device, &memoryAllocateInfo, NULL, &memory ) );
for( int i = 0; i < NUM_FRAME_DATA; ++i )
{
ID_VK_CHECK( vkBindBufferMemory( vkcontext.device, buffers[ i ].buffer, memory, i * alignedSize ) );
}
ID_VK_CHECK( vkMapMemory( vkcontext.device, memory, 0, alignedSize * NUM_FRAME_DATA, 0, reinterpret_cast< void** >( &mappedData ) ) );
VkCommandPoolCreateInfo commandPoolCreateInfo = {};
commandPoolCreateInfo.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO;
commandPoolCreateInfo.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
commandPoolCreateInfo.queueFamilyIndex = vkcontext.graphicsFamilyIdx;
ID_VK_CHECK( vkCreateCommandPool( vkcontext.device, &commandPoolCreateInfo, NULL, &commandPool ) );
VkCommandBufferAllocateInfo commandBufferAllocateInfo = {};
commandBufferAllocateInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
commandBufferAllocateInfo.commandPool = commandPool;
commandBufferAllocateInfo.commandBufferCount = 1;
VkFenceCreateInfo fenceCreateInfo = {};
fenceCreateInfo.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
VkCommandBufferBeginInfo commandBufferBeginInfo = {};
commandBufferBeginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
for( int i = 0; i < NUM_FRAME_DATA; ++i )
{
ID_VK_CHECK( vkAllocateCommandBuffers( vkcontext.device, &commandBufferAllocateInfo, &buffers[ i ].commandBuffer ) );
ID_VK_CHECK( vkCreateFence( vkcontext.device, &fenceCreateInfo, NULL, &buffers[ i ].fence ) );
ID_VK_CHECK( vkBeginCommandBuffer( buffers[ i ].commandBuffer, &commandBufferBeginInfo ) );
buffers[ i ].data = ( byte* )mappedData + ( i * alignedSize );
}
}
/*
=============
idVulkanStagingManager::Shutdown
=============
*/
void idVulkanStagingManager::Shutdown()
{
// SRS - use vkFreeMemory (with implicit unmap) vs. vkUnmapMemory to avoid validation layer errors on shutdown
//vkUnmapMemory( vkcontext.device, memory );
vkFreeMemory( vkcontext.device, memory, NULL );
memory = VK_NULL_HANDLE;
mappedData = NULL;
for( int i = 0; i < NUM_FRAME_DATA; ++i )
{
vkDestroyFence( vkcontext.device, buffers[ i ].fence, NULL );
vkDestroyBuffer( vkcontext.device, buffers[ i ].buffer, NULL );
vkFreeCommandBuffers( vkcontext.device, commandPool, 1, &buffers[ i ].commandBuffer );
}
memset( buffers, 0, sizeof( buffers ) );
maxBufferSize = 0;
currentBuffer = 0;
// SRS - destroy command pool to avoid validation layer errors on shutdown
vkDestroyCommandPool( vkcontext.device, commandPool, NULL );
commandPool = VK_NULL_HANDLE;
}
/*
=============
idVulkanStagingManager::Stage
=============
*/
byte* idVulkanStagingManager::Stage( const int size, const int alignment, VkCommandBuffer& commandBuffer, VkBuffer& buffer, int& bufferOffset )
{
if( size > maxBufferSize )
{
idLib::FatalError( "Can't allocate %d MB in gpu transfer buffer", ( int )( size / 1024 / 1024 ) );
}
stagingBuffer_t* stage = &buffers[ currentBuffer ];
const int alignMod = stage->offset % alignment;
stage->offset = ( ( stage->offset % alignment ) == 0 ) ? stage->offset : ( stage->offset + alignment - alignMod );
if( ( stage->offset + size ) >= ( maxBufferSize ) && !stage->submitted )
{
Flush();
}
int maxCommands = r_vkStagingMaxCommands.GetInteger();
if( ( maxCommands > 0 ) && ( stage->stagedCommands >= maxCommands ) )
{
Flush();
}
stage = &buffers[ currentBuffer ];
if( stage->submitted )
{
Wait( *stage );
}
commandBuffer = stage->commandBuffer;
buffer = stage->buffer;
bufferOffset = stage->offset;
byte* data = stage->data + stage->offset;
stage->offset += size;
stage->stagedCommands++;
return data;
}
/*
=============
idVulkanStagingManager::Flush
=============
*/
void idVulkanStagingManager::Flush()
{
stagingBuffer_t& stage = buffers[ currentBuffer ];
if( stage.submitted || stage.offset == 0 )
{
return;
}
VkMemoryBarrier barrier = {};
barrier.sType = VK_STRUCTURE_TYPE_MEMORY_BARRIER;
barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
barrier.dstAccessMask = VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT | VK_ACCESS_INDEX_READ_BIT;
vkCmdPipelineBarrier(
stage.commandBuffer,
VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_PIPELINE_STAGE_VERTEX_INPUT_BIT,
0, 1, &barrier, 0, NULL, 0, NULL );
vkEndCommandBuffer( stage.commandBuffer );
VkMappedMemoryRange memoryRange = {};
memoryRange.sType = VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE;
memoryRange.memory = memory;
memoryRange.size = VK_WHOLE_SIZE;
vkFlushMappedMemoryRanges( vkcontext.device, 1, &memoryRange );
VkSubmitInfo submitInfo = {};
submitInfo.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
submitInfo.commandBufferCount = 1;
submitInfo.pCommandBuffers = &stage.commandBuffer;
vkQueueSubmit( vkcontext.graphicsQueue, 1, &submitInfo, stage.fence );
stage.submitted = true;
stage.stagedCommands = 0;
currentBuffer = ( currentBuffer + 1 ) % NUM_FRAME_DATA;
}
/*
=============
idVulkanStagingManager::Wait
=============
*/
void idVulkanStagingManager::Wait( stagingBuffer_t& stage )
{
if( stage.submitted == false )
{
return;
}
ID_VK_CHECK( vkWaitForFences( vkcontext.device, 1, &stage.fence, VK_TRUE, UINT64_MAX ) );
ID_VK_CHECK( vkResetFences( vkcontext.device, 1, &stage.fence ) );
stage.stagedCommands = 0;
stage.offset = 0;
stage.submitted = false;
VkCommandBufferBeginInfo commandBufferBeginInfo = {};
commandBufferBeginInfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
ID_VK_CHECK( vkBeginCommandBuffer( stage.commandBuffer, &commandBufferBeginInfo ) );
}

View file

@ -1,88 +0,0 @@
/*
===========================================================================
Doom 3 BFG Edition GPL Source Code
Copyright (C) 1993-2012 id Software LLC, a ZeniMax Media company.
Copyright (C) 2016-2017 Dustin Land
This file is part of the Doom 3 BFG Edition GPL Source Code ("Doom 3 BFG Edition Source Code").
Doom 3 BFG Edition Source Code is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Doom 3 BFG Edition Source Code is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Doom 3 BFG Edition Source Code. If not, see <http://www.gnu.org/licenses/>.
In addition, the Doom 3 BFG Edition Source Code is also subject to certain additional terms. You should have received a copy of these additional terms immediately following the terms and conditions of the GNU General Public License which accompanied the Doom 3 BFG Edition Source Code. If not, please request a copy in writing from id Software at the address below.
If you have questions concerning this license or the applicable additional terms, you may contact in writing id Software LLC, c/o ZeniMax Media Inc., Suite 120, Rockville, Maryland 20850 USA.
===========================================================================
*/
#ifndef __STAGING_VK__
#define __STAGING_VK__
/*
===========================================================================
idVulkanStagingManager
===========================================================================
*/
struct stagingBuffer_t
{
stagingBuffer_t() :
submitted( false ),
commandBuffer( VK_NULL_HANDLE ),
buffer( VK_NULL_HANDLE ),
fence( VK_NULL_HANDLE ),
offset( 0 ),
data( NULL ) {}
bool submitted;
VkCommandBuffer commandBuffer;
VkBuffer buffer;
VkFence fence;
VkDeviceSize offset;
byte* data;
int stagedCommands;
};
class idVulkanStagingManager
{
public:
idVulkanStagingManager();
~idVulkanStagingManager();
void Init();
void Shutdown();
byte* Stage( const int size, const int alignment, VkCommandBuffer& commandBuffer, VkBuffer& buffer, int& bufferOffset );
void Flush();
private:
void Wait( stagingBuffer_t& stage );
private:
int maxBufferSize;
int currentBuffer;
byte* mappedData;
VkDeviceMemory memory;
VkCommandPool commandPool;
stagingBuffer_t buffers[ NUM_FRAME_DATA ];
};
extern idVulkanStagingManager stagingManager;
#endif

View file

@ -1,84 +0,0 @@
/*
===========================================================================
Doom 3 BFG Edition GPL Source Code
Copyright (C) 1993-2012 id Software LLC, a ZeniMax Media company.
Copyright (C) 2016-2017 Dustin Land
This file is part of the Doom 3 BFG Edition GPL Source Code ("Doom 3 BFG Edition Source Code").
Doom 3 BFG Edition Source Code is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Doom 3 BFG Edition Source Code is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Doom 3 BFG Edition Source Code. If not, see <http://www.gnu.org/licenses/>.
In addition, the Doom 3 BFG Edition Source Code is also subject to certain additional terms. You should have received a copy of these additional terms immediately following the terms and conditions of the GNU General Public License which accompanied the Doom 3 BFG Edition Source Code. If not, please request a copy in writing from id Software at the address below.
If you have questions concerning this license or the applicable additional terms, you may contact in writing id Software LLC, c/o ZeniMax Media Inc., Suite 120, Rockville, Maryland 20850 USA.
===========================================================================
*/
#ifndef __QVK_H__
#define __QVK_H__
#if defined( USE_VULKAN )
#if defined(VK_USE_PLATFORM_WIN32_KHR) //_WIN32
#include <Windows.h>
#endif
#define USE_AMD_ALLOCATOR
#include <vulkan/vulkan.h>
#if defined( USE_AMD_ALLOCATOR )
#include "vma.h"
#endif
#define ID_VK_CHECK( x ) { \
VkResult ret = x; \
if ( ret != VK_SUCCESS ) idLib::FatalError( "VK: %s - %s", VK_ErrorToString( ret ), #x ); \
}
#define ID_VK_VALIDATE( x, msg ) { \
if ( !( x ) ) idLib::FatalError( "VK: %s - %s", msg, #x ); \
}
const char* VK_ErrorToString( VkResult result );
static const int MAX_DESC_SETS = 16384;
static const int MAX_DESC_UNIFORM_BUFFERS = 8192;
static const int MAX_DESC_IMAGE_SAMPLERS = 12384;
static const int MAX_DESC_SET_WRITES = 32;
static const int MAX_DESC_SET_UNIFORMS = 48;
static const int MAX_IMAGE_PARMS = 16;
static const int MAX_UBO_PARMS = 2;
static const int NUM_TIMESTAMP_QUERIES = 32;
// VK_EXT_debug_marker
extern PFN_vkDebugMarkerSetObjectTagEXT qvkDebugMarkerSetObjectTagEXT;
extern PFN_vkDebugMarkerSetObjectNameEXT qvkDebugMarkerSetObjectNameEXT;
extern PFN_vkCmdDebugMarkerBeginEXT qvkCmdDebugMarkerBeginEXT;
extern PFN_vkCmdDebugMarkerEndEXT qvkCmdDebugMarkerEndEXT;
extern PFN_vkCmdDebugMarkerInsertEXT qvkCmdDebugMarkerInsertEXT;
// VK_EXT_debug_utils
extern PFN_vkQueueBeginDebugUtilsLabelEXT qvkQueueBeginDebugUtilsLabelEXT;
extern PFN_vkQueueEndDebugUtilsLabelEXT qvkQueueEndDebugUtilsLabelEXT;
extern PFN_vkCmdBeginDebugUtilsLabelEXT qvkCmdBeginDebugUtilsLabelEXT;
extern PFN_vkCmdEndDebugUtilsLabelEXT qvkCmdEndDebugUtilsLabelEXT;
extern PFN_vkCmdInsertDebugUtilsLabelEXT qvkCmdInsertDebugUtilsLabelEXT;
#endif
#endif

View file

@ -1,37 +0,0 @@
//
// Copyright (c) 2017 Advanced Micro Devices, Inc. All rights reserved.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
//
#pragma warning( disable: 4244 ) // warning C4244: conversion from 'double' to 'float', possible loss of data
template<class T> T VMax( T x, T y )
{
return ( x > y ) ? x : y;
}
template<class T> T VMin( T x, T y )
{
return ( x < y ) ? x : y;
}
#define VMA_IMPLEMENTATION
#define VMA_MAX( v1, v2 ) VMax( (v1), (v2) )
#define VMA_MIN( v1, v2 ) VMin( (v1), (v2) )
#include "vma.h"

File diff suppressed because it is too large Load diff