mirror of
https://github.com/id-Software/DOOM-3-BFG.git
synced 2025-03-14 06:34:10 +00:00
Merged improved VBO/UBO handling from vkneo
This commit is contained in:
parent
095c68ca5b
commit
d115d84980
15 changed files with 1144 additions and 1065 deletions
|
@ -291,7 +291,7 @@ if(USE_VULKAN)
|
|||
include_directories($ENV{VK_SDK_PATH}/Include)
|
||||
|
||||
set(GLSLANG_DIR ${CMAKE_CURRENT_SOURCE_DIR}/libs/glslang)
|
||||
add_subdirectory(${GLSLANG_DIR})
|
||||
#add_subdirectory(${GLSLANG_DIR})
|
||||
|
||||
else()
|
||||
|
||||
|
@ -1254,7 +1254,7 @@ if(MSVC)
|
|||
|
||||
set(Vulkan_LIBRARIES
|
||||
${Vulkan_LIBRARY}
|
||||
glslang
|
||||
#glslang
|
||||
#SPIR-V
|
||||
#opengl32
|
||||
#glu32
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
Doom 3 BFG Edition GPL Source Code
|
||||
Copyright (C) 1993-2012 id Software LLC, a ZeniMax Media company.
|
||||
Copyright (C) 2013 Robert Beckebans
|
||||
Copyright (C) 2016-2017 Dustin Land
|
||||
|
||||
This file is part of the Doom 3 BFG Edition GPL Source Code ("Doom 3 BFG Edition Source Code").
|
||||
|
||||
|
@ -32,12 +33,6 @@ If you have questions concerning this license or the applicable additional terms
|
|||
|
||||
idCVar r_showBuffers( "r_showBuffers", "0", CVAR_INTEGER, "" );
|
||||
|
||||
|
||||
//static const GLenum bufferUsage = GL_STATIC_DRAW;
|
||||
static const GLenum bufferUsage = GL_DYNAMIC_DRAW;
|
||||
|
||||
// RB begin
|
||||
#if defined(_WIN32)
|
||||
/*
|
||||
==================
|
||||
IsWriteCombined
|
||||
|
@ -56,29 +51,8 @@ bool IsWriteCombined( void* base )
|
|||
bool isWriteCombined = ( ( info.AllocationProtect & PAGE_WRITECOMBINE ) != 0 );
|
||||
return isWriteCombined;
|
||||
}
|
||||
#endif
|
||||
// RB end
|
||||
|
||||
|
||||
/*
|
||||
================================================================================================
|
||||
|
||||
Buffer Objects
|
||||
|
||||
================================================================================================
|
||||
*/
|
||||
|
||||
/*
|
||||
========================
|
||||
UnbindBufferObjects
|
||||
========================
|
||||
*/
|
||||
void UnbindBufferObjects()
|
||||
{
|
||||
glBindBuffer( GL_ARRAY_BUFFER, 0 );
|
||||
glBindBuffer( GL_ELEMENT_ARRAY_BUFFER, 0 );
|
||||
}
|
||||
|
||||
#if defined(USE_INTRINSICS)
|
||||
|
||||
void CopyBuffer( byte* dst, const byte* src, int numBytes )
|
||||
|
@ -136,24 +110,41 @@ void CopyBuffer( byte* dst, const byte* src, int numBytes )
|
|||
/*
|
||||
================================================================================================
|
||||
|
||||
idVertexBuffer
|
||||
idBufferObject
|
||||
|
||||
================================================================================================
|
||||
*/
|
||||
|
||||
/*
|
||||
========================
|
||||
idVertexBuffer::idVertexBuffer
|
||||
idBufferObject::idBufferObject
|
||||
========================
|
||||
*/
|
||||
idVertexBuffer::idVertexBuffer()
|
||||
idBufferObject::idBufferObject()
|
||||
{
|
||||
size = 0;
|
||||
offsetInOtherBuffer = OWNS_BUFFER_FLAG;
|
||||
usage = BU_STATIC;
|
||||
|
||||
#if defined( USE_VULKAN )
|
||||
apiObject = VK_NULL_HANDLE;
|
||||
#if defined( ID_USE_AMD_ALLOCATOR )
|
||||
vmaAllocation = NULL;
|
||||
#endif
|
||||
#else
|
||||
apiObject = NULL;
|
||||
SetUnmapped();
|
||||
buffer = NULL;
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
================================================================================================
|
||||
|
||||
idVertexBuffer
|
||||
|
||||
================================================================================================
|
||||
*/
|
||||
|
||||
/*
|
||||
========================
|
||||
idVertexBuffer::~idVertexBuffer
|
||||
|
@ -164,102 +155,6 @@ idVertexBuffer::~idVertexBuffer()
|
|||
FreeBufferObject();
|
||||
}
|
||||
|
||||
/*
|
||||
========================
|
||||
idVertexBuffer::AllocBufferObject
|
||||
========================
|
||||
*/
|
||||
bool idVertexBuffer::AllocBufferObject( const void* data, int allocSize )
|
||||
{
|
||||
assert( apiObject == NULL );
|
||||
assert_16_byte_aligned( data );
|
||||
|
||||
if( allocSize <= 0 )
|
||||
{
|
||||
idLib::Error( "idVertexBuffer::AllocBufferObject: allocSize = %i", allocSize );
|
||||
}
|
||||
|
||||
size = allocSize;
|
||||
|
||||
bool allocationFailed = false;
|
||||
|
||||
int numBytes = GetAllocedSize();
|
||||
|
||||
|
||||
// clear out any previous error
|
||||
glGetError();
|
||||
|
||||
GLuint bufferObject = 0xFFFF;
|
||||
glGenBuffers( 1, & bufferObject );
|
||||
if( bufferObject == 0xFFFF )
|
||||
{
|
||||
idLib::FatalError( "idVertexBuffer::AllocBufferObject: failed" );
|
||||
}
|
||||
glBindBuffer( GL_ARRAY_BUFFER, bufferObject );
|
||||
|
||||
// these are rewritten every frame
|
||||
glBufferData( GL_ARRAY_BUFFER, numBytes, NULL, bufferUsage );
|
||||
apiObject = reinterpret_cast< void* >( bufferObject );
|
||||
|
||||
GLenum err = glGetError();
|
||||
if( err == GL_OUT_OF_MEMORY )
|
||||
{
|
||||
idLib::Warning( "idVertexBuffer::AllocBufferObject: allocation failed" );
|
||||
allocationFailed = true;
|
||||
}
|
||||
|
||||
|
||||
if( r_showBuffers.GetBool() )
|
||||
{
|
||||
idLib::Printf( "vertex buffer alloc %p, api %p (%i bytes)\n", this, GetAPIObject(), GetSize() );
|
||||
}
|
||||
|
||||
// copy the data
|
||||
if( data != NULL )
|
||||
{
|
||||
Update( data, allocSize );
|
||||
}
|
||||
|
||||
return !allocationFailed;
|
||||
}
|
||||
|
||||
/*
|
||||
========================
|
||||
idVertexBuffer::FreeBufferObject
|
||||
========================
|
||||
*/
|
||||
void idVertexBuffer::FreeBufferObject()
|
||||
{
|
||||
if( IsMapped() )
|
||||
{
|
||||
UnmapBuffer();
|
||||
}
|
||||
|
||||
// if this is a sub-allocation inside a larger buffer, don't actually free anything.
|
||||
if( OwnsBuffer() == false )
|
||||
{
|
||||
ClearWithoutFreeing();
|
||||
return;
|
||||
}
|
||||
|
||||
if( apiObject == NULL )
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
if( r_showBuffers.GetBool() )
|
||||
{
|
||||
idLib::Printf( "vertex buffer free %p, api %p (%i bytes)\n", this, GetAPIObject(), GetSize() );
|
||||
}
|
||||
|
||||
// RB: 64 bit fixes, changed GLuint to GLintptrARB
|
||||
GLintptr bufferObject = reinterpret_cast< GLintptr >( apiObject );
|
||||
glDeleteBuffers( 1, ( const unsigned int* ) & bufferObject );
|
||||
// RB end
|
||||
|
||||
ClearWithoutFreeing();
|
||||
}
|
||||
|
||||
/*
|
||||
========================
|
||||
idVertexBuffer::Reference
|
||||
|
@ -269,13 +164,16 @@ void idVertexBuffer::Reference( const idVertexBuffer& other )
|
|||
{
|
||||
assert( IsMapped() == false );
|
||||
//assert( other.IsMapped() == false ); // this happens when building idTriangles while at the same time setting up idDrawVerts
|
||||
assert( other.GetAPIObject() != NULL );
|
||||
assert( other.GetSize() > 0 );
|
||||
|
||||
FreeBufferObject();
|
||||
size = other.GetSize(); // this strips the MAPPED_FLAG
|
||||
size = other.GetSize(); // this strips the MAPPED_FLAG
|
||||
offsetInOtherBuffer = other.GetOffset(); // this strips the OWNS_BUFFER_FLAG
|
||||
usage = other.usage;
|
||||
apiObject = other.apiObject;
|
||||
#if defined( USE_VULKAN )
|
||||
allocation = other.allocation;
|
||||
#endif
|
||||
assert( OwnsBuffer() == false );
|
||||
}
|
||||
|
||||
|
@ -288,7 +186,6 @@ void idVertexBuffer::Reference( const idVertexBuffer& other, int refOffset, int
|
|||
{
|
||||
assert( IsMapped() == false );
|
||||
//assert( other.IsMapped() == false ); // this happens when building idTriangles while at the same time setting up idDrawVerts
|
||||
assert( other.GetAPIObject() != NULL );
|
||||
assert( refOffset >= 0 );
|
||||
assert( refSize >= 0 );
|
||||
assert( refOffset + refSize <= other.GetSize() );
|
||||
|
@ -296,156 +193,22 @@ void idVertexBuffer::Reference( const idVertexBuffer& other, int refOffset, int
|
|||
FreeBufferObject();
|
||||
size = refSize;
|
||||
offsetInOtherBuffer = other.GetOffset() + refOffset;
|
||||
usage = other.usage;
|
||||
apiObject = other.apiObject;
|
||||
#if defined( USE_VULKAN )
|
||||
allocation = other.allocation;
|
||||
#endif
|
||||
assert( OwnsBuffer() == false );
|
||||
}
|
||||
|
||||
/*
|
||||
========================
|
||||
idVertexBuffer::Update
|
||||
========================
|
||||
*/
|
||||
void idVertexBuffer::Update( const void* data, int updateSize ) const
|
||||
{
|
||||
assert( apiObject != NULL );
|
||||
assert( IsMapped() == false );
|
||||
assert_16_byte_aligned( data );
|
||||
assert( ( GetOffset() & 15 ) == 0 );
|
||||
|
||||
if( updateSize > size )
|
||||
{
|
||||
idLib::FatalError( "idVertexBuffer::Update: size overrun, %i > %i\n", updateSize, GetSize() );
|
||||
}
|
||||
|
||||
int numBytes = ( updateSize + 15 ) & ~15;
|
||||
|
||||
// RB: 64 bit fixes, changed GLuint to GLintptrARB
|
||||
GLintptr bufferObject = reinterpret_cast< GLintptr >( apiObject );
|
||||
// RB end
|
||||
|
||||
glBindBuffer( GL_ARRAY_BUFFER, bufferObject );
|
||||
glBufferSubData( GL_ARRAY_BUFFER, GetOffset(), ( GLsizeiptr )numBytes, data );
|
||||
/*
|
||||
void * buffer = MapBuffer( BM_WRITE );
|
||||
CopyBuffer( (byte *)buffer + GetOffset(), (byte *)data, numBytes );
|
||||
UnmapBuffer();
|
||||
*/
|
||||
}
|
||||
|
||||
/*
|
||||
========================
|
||||
idVertexBuffer::MapBuffer
|
||||
========================
|
||||
*/
|
||||
void* idVertexBuffer::MapBuffer( bufferMapType_t mapType ) const
|
||||
{
|
||||
assert( apiObject != NULL );
|
||||
assert( IsMapped() == false );
|
||||
|
||||
void* buffer = NULL;
|
||||
|
||||
// RB: 64 bit fixes, changed GLuint to GLintptrARB
|
||||
GLintptr bufferObject = reinterpret_cast< GLintptr >( apiObject );
|
||||
// RB end
|
||||
|
||||
glBindBuffer( GL_ARRAY_BUFFER, bufferObject );
|
||||
|
||||
if( mapType == BM_READ )
|
||||
{
|
||||
#if 0 //defined(USE_GLES2)
|
||||
buffer = glMapBufferOES( GL_ARRAY_BUFFER, GL_READ_ONLY );
|
||||
#else
|
||||
buffer = glMapBufferRange( GL_ARRAY_BUFFER, 0, GetAllocedSize(), GL_MAP_READ_BIT | GL_MAP_UNSYNCHRONIZED_BIT );
|
||||
#endif
|
||||
if( buffer != NULL )
|
||||
{
|
||||
buffer = ( byte* )buffer + GetOffset();
|
||||
}
|
||||
}
|
||||
else if( mapType == BM_WRITE )
|
||||
{
|
||||
#if 0 //defined(USE_GLES2)
|
||||
buffer = glMapBuffer( GL_ARRAY_BUFFER, GL_WRITE_ONLY );
|
||||
#else
|
||||
// RB: removed GL_MAP_INVALIDATE_RANGE_BIT as it breaks with an optimization in the Nvidia WHQL drivers >= 344.11
|
||||
buffer = glMapBufferRange( GL_ARRAY_BUFFER, 0, GetAllocedSize(), GL_MAP_WRITE_BIT /*| GL_MAP_INVALIDATE_RANGE_BIT*/ | GL_MAP_UNSYNCHRONIZED_BIT );
|
||||
#endif
|
||||
if( buffer != NULL )
|
||||
{
|
||||
buffer = ( byte* )buffer + GetOffset();
|
||||
}
|
||||
// assert( IsWriteCombined( buffer ) ); // commented out because it spams the console
|
||||
}
|
||||
else
|
||||
{
|
||||
assert( false );
|
||||
}
|
||||
|
||||
SetMapped();
|
||||
|
||||
if( buffer == NULL )
|
||||
{
|
||||
idLib::FatalError( "idVertexBuffer::MapBuffer: failed" );
|
||||
}
|
||||
return buffer;
|
||||
}
|
||||
|
||||
/*
|
||||
========================
|
||||
idVertexBuffer::UnmapBuffer
|
||||
========================
|
||||
*/
|
||||
void idVertexBuffer::UnmapBuffer() const
|
||||
{
|
||||
assert( apiObject != NULL );
|
||||
assert( IsMapped() );
|
||||
|
||||
// RB: 64 bit fixes, changed GLuint to GLintptrARB
|
||||
GLintptr bufferObject = reinterpret_cast< GLintptr >( apiObject );
|
||||
// RB end
|
||||
|
||||
glBindBuffer( GL_ARRAY_BUFFER, bufferObject );
|
||||
if( !glUnmapBuffer( GL_ARRAY_BUFFER ) )
|
||||
{
|
||||
idLib::Printf( "idVertexBuffer::UnmapBuffer failed\n" );
|
||||
}
|
||||
|
||||
SetUnmapped();
|
||||
}
|
||||
|
||||
/*
|
||||
========================
|
||||
idVertexBuffer::ClearWithoutFreeing
|
||||
========================
|
||||
*/
|
||||
void idVertexBuffer::ClearWithoutFreeing()
|
||||
{
|
||||
size = 0;
|
||||
offsetInOtherBuffer = OWNS_BUFFER_FLAG;
|
||||
apiObject = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
================================================================================================
|
||||
|
||||
idIndexBuffer
|
||||
idIndexBuffer
|
||||
|
||||
================================================================================================
|
||||
*/
|
||||
|
||||
/*
|
||||
========================
|
||||
idIndexBuffer::idIndexBuffer
|
||||
========================
|
||||
*/
|
||||
idIndexBuffer::idIndexBuffer()
|
||||
{
|
||||
size = 0;
|
||||
offsetInOtherBuffer = OWNS_BUFFER_FLAG;
|
||||
apiObject = NULL;
|
||||
SetUnmapped();
|
||||
}
|
||||
|
||||
/*
|
||||
========================
|
||||
idIndexBuffer::~idIndexBuffer
|
||||
|
@ -456,103 +219,6 @@ idIndexBuffer::~idIndexBuffer()
|
|||
FreeBufferObject();
|
||||
}
|
||||
|
||||
/*
|
||||
========================
|
||||
idIndexBuffer::AllocBufferObject
|
||||
========================
|
||||
*/
|
||||
bool idIndexBuffer::AllocBufferObject( const void* data, int allocSize )
|
||||
{
|
||||
assert( apiObject == NULL );
|
||||
assert_16_byte_aligned( data );
|
||||
|
||||
if( allocSize <= 0 )
|
||||
{
|
||||
idLib::Error( "idIndexBuffer::AllocBufferObject: allocSize = %i", allocSize );
|
||||
}
|
||||
|
||||
size = allocSize;
|
||||
|
||||
bool allocationFailed = false;
|
||||
|
||||
int numBytes = GetAllocedSize();
|
||||
|
||||
|
||||
// clear out any previous error
|
||||
glGetError();
|
||||
|
||||
GLuint bufferObject = 0xFFFF;
|
||||
glGenBuffers( 1, & bufferObject );
|
||||
if( bufferObject == 0xFFFF )
|
||||
{
|
||||
GLenum error = glGetError();
|
||||
idLib::FatalError( "idIndexBuffer::AllocBufferObject: failed - GL_Error %d", error );
|
||||
}
|
||||
glBindBuffer( GL_ELEMENT_ARRAY_BUFFER, bufferObject );
|
||||
|
||||
// these are rewritten every frame
|
||||
glBufferData( GL_ELEMENT_ARRAY_BUFFER, numBytes, NULL, bufferUsage );
|
||||
apiObject = reinterpret_cast< void* >( bufferObject );
|
||||
|
||||
GLenum err = glGetError();
|
||||
if( err == GL_OUT_OF_MEMORY )
|
||||
{
|
||||
idLib::Warning( "idIndexBuffer:AllocBufferObject: allocation failed" );
|
||||
allocationFailed = true;
|
||||
}
|
||||
|
||||
|
||||
if( r_showBuffers.GetBool() )
|
||||
{
|
||||
idLib::Printf( "index buffer alloc %p, api %p (%i bytes)\n", this, GetAPIObject(), GetSize() );
|
||||
}
|
||||
|
||||
// copy the data
|
||||
if( data != NULL )
|
||||
{
|
||||
Update( data, allocSize );
|
||||
}
|
||||
|
||||
return !allocationFailed;
|
||||
}
|
||||
|
||||
/*
|
||||
========================
|
||||
idIndexBuffer::FreeBufferObject
|
||||
========================
|
||||
*/
|
||||
void idIndexBuffer::FreeBufferObject()
|
||||
{
|
||||
if( IsMapped() )
|
||||
{
|
||||
UnmapBuffer();
|
||||
}
|
||||
|
||||
// if this is a sub-allocation inside a larger buffer, don't actually free anything.
|
||||
if( OwnsBuffer() == false )
|
||||
{
|
||||
ClearWithoutFreeing();
|
||||
return;
|
||||
}
|
||||
|
||||
if( apiObject == NULL )
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
if( r_showBuffers.GetBool() )
|
||||
{
|
||||
idLib::Printf( "index buffer free %p, api %p (%i bytes)\n", this, GetAPIObject(), GetSize() );
|
||||
}
|
||||
|
||||
// RB: 64 bit fixes, changed GLuint to GLintptrARB
|
||||
GLintptr bufferObject = reinterpret_cast< GLintptr >( apiObject );
|
||||
glDeleteBuffers( 1, ( const unsigned int* )& bufferObject );
|
||||
// RB end
|
||||
|
||||
ClearWithoutFreeing();
|
||||
}
|
||||
|
||||
/*
|
||||
========================
|
||||
idIndexBuffer::Reference
|
||||
|
@ -562,13 +228,16 @@ void idIndexBuffer::Reference( const idIndexBuffer& other )
|
|||
{
|
||||
assert( IsMapped() == false );
|
||||
//assert( other.IsMapped() == false ); // this happens when building idTriangles while at the same time setting up triIndex_t
|
||||
assert( other.GetAPIObject() != NULL );
|
||||
assert( other.GetSize() > 0 );
|
||||
|
||||
FreeBufferObject();
|
||||
size = other.GetSize(); // this strips the MAPPED_FLAG
|
||||
size = other.GetSize(); // this strips the MAPPED_FLAG
|
||||
offsetInOtherBuffer = other.GetOffset(); // this strips the OWNS_BUFFER_FLAG
|
||||
usage = other.usage;
|
||||
apiObject = other.apiObject;
|
||||
#if defined( USE_VULKAN )
|
||||
allocation = other.allocation;
|
||||
#endif
|
||||
assert( OwnsBuffer() == false );
|
||||
}
|
||||
|
||||
|
@ -581,7 +250,6 @@ void idIndexBuffer::Reference( const idIndexBuffer& other, int refOffset, int re
|
|||
{
|
||||
assert( IsMapped() == false );
|
||||
//assert( other.IsMapped() == false ); // this happens when building idTriangles while at the same time setting up triIndex_t
|
||||
assert( other.GetAPIObject() != NULL );
|
||||
assert( refOffset >= 0 );
|
||||
assert( refSize >= 0 );
|
||||
assert( refOffset + refSize <= other.GetSize() );
|
||||
|
@ -589,395 +257,74 @@ void idIndexBuffer::Reference( const idIndexBuffer& other, int refOffset, int re
|
|||
FreeBufferObject();
|
||||
size = refSize;
|
||||
offsetInOtherBuffer = other.GetOffset() + refOffset;
|
||||
usage = other.usage;
|
||||
apiObject = other.apiObject;
|
||||
#if defined( USE_VULKAN )
|
||||
allocation = other.allocation;
|
||||
#endif
|
||||
assert( OwnsBuffer() == false );
|
||||
}
|
||||
|
||||
/*
|
||||
========================
|
||||
idIndexBuffer::Update
|
||||
========================
|
||||
*/
|
||||
void idIndexBuffer::Update( const void* data, int updateSize ) const
|
||||
{
|
||||
|
||||
assert( apiObject != NULL );
|
||||
assert( IsMapped() == false );
|
||||
assert_16_byte_aligned( data );
|
||||
assert( ( GetOffset() & 15 ) == 0 );
|
||||
|
||||
if( updateSize > size )
|
||||
{
|
||||
idLib::FatalError( "idIndexBuffer::Update: size overrun, %i > %i\n", updateSize, GetSize() );
|
||||
}
|
||||
|
||||
int numBytes = ( updateSize + 15 ) & ~15;
|
||||
|
||||
// RB: 64 bit fixes, changed GLuint to GLintptrARB
|
||||
GLintptr bufferObject = reinterpret_cast< GLintptr >( apiObject );
|
||||
// RB end
|
||||
|
||||
glBindBuffer( GL_ELEMENT_ARRAY_BUFFER, bufferObject );
|
||||
glBufferSubData( GL_ELEMENT_ARRAY_BUFFER, GetOffset(), ( GLsizeiptr )numBytes, data );
|
||||
/*
|
||||
void * buffer = MapBuffer( BM_WRITE );
|
||||
CopyBuffer( (byte *)buffer + GetOffset(), (byte *)data, numBytes );
|
||||
UnmapBuffer();
|
||||
*/
|
||||
}
|
||||
|
||||
/*
|
||||
========================
|
||||
idIndexBuffer::MapBuffer
|
||||
========================
|
||||
*/
|
||||
void* idIndexBuffer::MapBuffer( bufferMapType_t mapType ) const
|
||||
{
|
||||
|
||||
assert( apiObject != NULL );
|
||||
assert( IsMapped() == false );
|
||||
|
||||
void* buffer = NULL;
|
||||
|
||||
// RB: 64 bit fixes, changed GLuint to GLintptrARB
|
||||
GLintptr bufferObject = reinterpret_cast< GLintptr >( apiObject );
|
||||
// RB end
|
||||
|
||||
glBindBuffer( GL_ELEMENT_ARRAY_BUFFER, bufferObject );
|
||||
|
||||
if( mapType == BM_READ )
|
||||
{
|
||||
//buffer = glMapBufferARB( GL_ELEMENT_ARRAY_BUFFER_ARB, GL_READ_ONLY_ARB );
|
||||
buffer = glMapBufferRange( GL_ELEMENT_ARRAY_BUFFER, 0, GetAllocedSize(), GL_MAP_READ_BIT | GL_MAP_UNSYNCHRONIZED_BIT );
|
||||
if( buffer != NULL )
|
||||
{
|
||||
buffer = ( byte* )buffer + GetOffset();
|
||||
}
|
||||
}
|
||||
else if( mapType == BM_WRITE )
|
||||
{
|
||||
//buffer = glMapBufferARB( GL_ELEMENT_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB );
|
||||
|
||||
// RB: removed GL_MAP_INVALIDATE_RANGE_BIT as it breaks with an optimization in the Nvidia WHQL drivers >= 344.11
|
||||
buffer = glMapBufferRange( GL_ELEMENT_ARRAY_BUFFER, 0, GetAllocedSize(), GL_MAP_WRITE_BIT /*| GL_MAP_INVALIDATE_RANGE_BIT*/ | GL_MAP_UNSYNCHRONIZED_BIT );
|
||||
if( buffer != NULL )
|
||||
{
|
||||
buffer = ( byte* )buffer + GetOffset();
|
||||
}
|
||||
// assert( IsWriteCombined( buffer ) ); // commented out because it spams the console
|
||||
}
|
||||
else
|
||||
{
|
||||
assert( false );
|
||||
}
|
||||
|
||||
SetMapped();
|
||||
|
||||
if( buffer == NULL )
|
||||
{
|
||||
idLib::FatalError( "idIndexBuffer::MapBuffer: failed" );
|
||||
}
|
||||
return buffer;
|
||||
}
|
||||
|
||||
/*
|
||||
========================
|
||||
idIndexBuffer::UnmapBuffer
|
||||
========================
|
||||
*/
|
||||
void idIndexBuffer::UnmapBuffer() const
|
||||
{
|
||||
assert( apiObject != NULL );
|
||||
assert( IsMapped() );
|
||||
|
||||
// RB: 64 bit fixes, changed GLuint to GLintptrARB
|
||||
GLintptr bufferObject = reinterpret_cast< GLintptr >( apiObject );
|
||||
// RB end
|
||||
|
||||
glBindBuffer( GL_ELEMENT_ARRAY_BUFFER, bufferObject );
|
||||
if( !glUnmapBuffer( GL_ELEMENT_ARRAY_BUFFER ) )
|
||||
{
|
||||
idLib::Printf( "idIndexBuffer::UnmapBuffer failed\n" );
|
||||
}
|
||||
|
||||
SetUnmapped();
|
||||
}
|
||||
|
||||
/*
|
||||
========================
|
||||
idIndexBuffer::ClearWithoutFreeing
|
||||
========================
|
||||
*/
|
||||
void idIndexBuffer::ClearWithoutFreeing()
|
||||
{
|
||||
size = 0;
|
||||
offsetInOtherBuffer = OWNS_BUFFER_FLAG;
|
||||
apiObject = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
================================================================================================
|
||||
|
||||
idJointBuffer
|
||||
idUniformBuffer
|
||||
|
||||
================================================================================================
|
||||
*/
|
||||
|
||||
/*
|
||||
========================
|
||||
idJointBuffer::idJointBuffer
|
||||
idUniformBuffer::~idUniformBuffer
|
||||
========================
|
||||
*/
|
||||
idJointBuffer::idJointBuffer()
|
||||
{
|
||||
numJoints = 0;
|
||||
offsetInOtherBuffer = OWNS_BUFFER_FLAG;
|
||||
apiObject = NULL;
|
||||
SetUnmapped();
|
||||
}
|
||||
|
||||
/*
|
||||
========================
|
||||
idJointBuffer::~idJointBuffer
|
||||
========================
|
||||
*/
|
||||
idJointBuffer::~idJointBuffer()
|
||||
idUniformBuffer::~idUniformBuffer()
|
||||
{
|
||||
FreeBufferObject();
|
||||
}
|
||||
|
||||
/*
|
||||
========================
|
||||
idJointBuffer::AllocBufferObject
|
||||
idUniformBuffer::Reference
|
||||
========================
|
||||
*/
|
||||
bool idJointBuffer::AllocBufferObject( const float* joints, int numAllocJoints )
|
||||
{
|
||||
assert( apiObject == NULL );
|
||||
assert_16_byte_aligned( joints );
|
||||
|
||||
if( numAllocJoints <= 0 )
|
||||
{
|
||||
idLib::Error( "idJointBuffer::AllocBufferObject: joints = %i", numAllocJoints );
|
||||
}
|
||||
|
||||
numJoints = numAllocJoints;
|
||||
|
||||
bool allocationFailed = false;
|
||||
|
||||
const int numBytes = GetAllocedSize();
|
||||
|
||||
GLuint buffer = 0;
|
||||
glGenBuffers( 1, &buffer );
|
||||
glBindBuffer( GL_UNIFORM_BUFFER, buffer );
|
||||
glBufferData( GL_UNIFORM_BUFFER, numBytes, NULL, GL_STREAM_DRAW );
|
||||
glBindBuffer( GL_UNIFORM_BUFFER, 0 );
|
||||
apiObject = reinterpret_cast< void* >( buffer );
|
||||
|
||||
if( r_showBuffers.GetBool() )
|
||||
{
|
||||
idLib::Printf( "joint buffer alloc %p, api %p (%i joints)\n", this, GetAPIObject(), GetNumJoints() );
|
||||
}
|
||||
|
||||
// copy the data
|
||||
if( joints != NULL )
|
||||
{
|
||||
Update( joints, numAllocJoints );
|
||||
}
|
||||
|
||||
return !allocationFailed;
|
||||
}
|
||||
|
||||
/*
|
||||
========================
|
||||
idJointBuffer::FreeBufferObject
|
||||
========================
|
||||
*/
|
||||
void idJointBuffer::FreeBufferObject()
|
||||
{
|
||||
if( IsMapped() )
|
||||
{
|
||||
UnmapBuffer();
|
||||
}
|
||||
|
||||
// if this is a sub-allocation inside a larger buffer, don't actually free anything.
|
||||
if( OwnsBuffer() == false )
|
||||
{
|
||||
ClearWithoutFreeing();
|
||||
return;
|
||||
}
|
||||
|
||||
if( apiObject == NULL )
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
if( r_showBuffers.GetBool() )
|
||||
{
|
||||
idLib::Printf( "joint buffer free %p, api %p (%i joints)\n", this, GetAPIObject(), GetNumJoints() );
|
||||
}
|
||||
|
||||
// RB: 64 bit fixes, changed GLuint to GLintptrARB
|
||||
GLintptr buffer = reinterpret_cast< GLintptr >( apiObject );
|
||||
|
||||
glBindBuffer( GL_UNIFORM_BUFFER, 0 );
|
||||
glDeleteBuffers( 1, ( const GLuint* )& buffer );
|
||||
// RB end
|
||||
|
||||
ClearWithoutFreeing();
|
||||
}
|
||||
|
||||
/*
|
||||
========================
|
||||
idJointBuffer::Reference
|
||||
========================
|
||||
*/
|
||||
void idJointBuffer::Reference( const idJointBuffer& other )
|
||||
void idUniformBuffer::Reference( const idUniformBuffer& other )
|
||||
{
|
||||
assert( IsMapped() == false );
|
||||
assert( other.IsMapped() == false );
|
||||
assert( other.GetAPIObject() != NULL );
|
||||
assert( other.GetNumJoints() > 0 );
|
||||
assert( other.GetSize() > 0 );
|
||||
|
||||
FreeBufferObject();
|
||||
numJoints = other.GetNumJoints(); // this strips the MAPPED_FLAG
|
||||
size = other.GetSize(); // this strips the MAPPED_FLAG
|
||||
offsetInOtherBuffer = other.GetOffset(); // this strips the OWNS_BUFFER_FLAG
|
||||
usage = other.usage;
|
||||
apiObject = other.apiObject;
|
||||
#if defined( USE_VULKAN )
|
||||
allocation = other.allocation;
|
||||
#endif
|
||||
assert( OwnsBuffer() == false );
|
||||
}
|
||||
|
||||
/*
|
||||
========================
|
||||
idJointBuffer::Reference
|
||||
idUniformBuffer::Reference
|
||||
========================
|
||||
*/
|
||||
void idJointBuffer::Reference( const idJointBuffer& other, int jointRefOffset, int numRefJoints )
|
||||
void idUniformBuffer::Reference( const idUniformBuffer& other, int refOffset, int refSize )
|
||||
{
|
||||
assert( IsMapped() == false );
|
||||
assert( other.IsMapped() == false );
|
||||
assert( other.GetAPIObject() != NULL );
|
||||
assert( jointRefOffset >= 0 );
|
||||
assert( numRefJoints >= 0 );
|
||||
assert( jointRefOffset + numRefJoints * sizeof( idJointMat ) <= other.GetNumJoints() * sizeof( idJointMat ) );
|
||||
assert_16_byte_aligned( numRefJoints * 3 * 4 * sizeof( float ) );
|
||||
assert( refOffset >= 0 );
|
||||
assert( refSize >= 0 );
|
||||
assert( refOffset + refSize <= other.GetSize() );
|
||||
|
||||
FreeBufferObject();
|
||||
numJoints = numRefJoints;
|
||||
offsetInOtherBuffer = other.GetOffset() + jointRefOffset;
|
||||
size = refSize;
|
||||
offsetInOtherBuffer = other.GetOffset() + refOffset;
|
||||
usage = other.usage;
|
||||
apiObject = other.apiObject;
|
||||
#if defined( USE_VULKAN )
|
||||
allocation = other.allocation;
|
||||
#endif
|
||||
assert( OwnsBuffer() == false );
|
||||
}
|
||||
|
||||
/*
|
||||
========================
|
||||
idJointBuffer::Update
|
||||
========================
|
||||
*/
|
||||
void idJointBuffer::Update( const float* joints, int numUpdateJoints ) const
|
||||
{
|
||||
assert( apiObject != NULL );
|
||||
assert( IsMapped() == false );
|
||||
assert_16_byte_aligned( joints );
|
||||
assert( ( GetOffset() & 15 ) == 0 );
|
||||
|
||||
if( numUpdateJoints > numJoints )
|
||||
{
|
||||
idLib::FatalError( "idJointBuffer::Update: size overrun, %i > %i\n", numUpdateJoints, numJoints );
|
||||
}
|
||||
|
||||
const int numBytes = numUpdateJoints * 3 * 4 * sizeof( float );
|
||||
|
||||
// RB: 64 bit fixes, changed GLuint to GLintptrARB
|
||||
glBindBuffer( GL_UNIFORM_BUFFER, reinterpret_cast< GLintptr >( apiObject ) );
|
||||
// RB end
|
||||
|
||||
glBufferSubData( GL_UNIFORM_BUFFER, GetOffset(), ( GLsizeiptr )numBytes, joints );
|
||||
}
|
||||
|
||||
/*
|
||||
========================
|
||||
idJointBuffer::MapBuffer
|
||||
========================
|
||||
*/
|
||||
float* idJointBuffer::MapBuffer( bufferMapType_t mapType ) const
|
||||
{
|
||||
assert( IsMapped() == false );
|
||||
assert( mapType == BM_WRITE );
|
||||
assert( apiObject != NULL );
|
||||
|
||||
int numBytes = GetAllocedSize();
|
||||
|
||||
void* buffer = NULL;
|
||||
|
||||
// RB: 64 bit fixes, changed GLuint to GLintptrARB
|
||||
glBindBuffer( GL_UNIFORM_BUFFER, reinterpret_cast< GLintptr >( apiObject ) );
|
||||
// RB end
|
||||
|
||||
numBytes = numBytes;
|
||||
assert( GetOffset() == 0 );
|
||||
//buffer = glMapBufferARB( GL_UNIFORM_BUFFER, GL_WRITE_ONLY_ARB );
|
||||
|
||||
// RB: removed GL_MAP_INVALIDATE_RANGE_BIT as it breaks with an optimization in the Nvidia WHQL drivers >= 344.11
|
||||
buffer = glMapBufferRange( GL_UNIFORM_BUFFER, 0, GetAllocedSize(), GL_MAP_WRITE_BIT /*| GL_MAP_INVALIDATE_RANGE_BIT*/ | GL_MAP_UNSYNCHRONIZED_BIT );
|
||||
if( buffer != NULL )
|
||||
{
|
||||
buffer = ( byte* )buffer + GetOffset();
|
||||
}
|
||||
|
||||
SetMapped();
|
||||
|
||||
if( buffer == NULL )
|
||||
{
|
||||
idLib::FatalError( "idJointBuffer::MapBuffer: failed" );
|
||||
}
|
||||
return ( float* ) buffer;
|
||||
}
|
||||
|
||||
/*
|
||||
========================
|
||||
idJointBuffer::UnmapBuffer
|
||||
========================
|
||||
*/
|
||||
void idJointBuffer::UnmapBuffer() const
|
||||
{
|
||||
assert( apiObject != NULL );
|
||||
assert( IsMapped() );
|
||||
|
||||
// RB: 64 bit fixes, changed GLuint to GLintptrARB
|
||||
glBindBuffer( GL_UNIFORM_BUFFER, reinterpret_cast< GLintptr >( apiObject ) );
|
||||
// RB end
|
||||
|
||||
if( !glUnmapBuffer( GL_UNIFORM_BUFFER ) )
|
||||
{
|
||||
idLib::Printf( "idJointBuffer::UnmapBuffer failed\n" );
|
||||
}
|
||||
|
||||
SetUnmapped();
|
||||
}
|
||||
|
||||
/*
|
||||
========================
|
||||
idJointBuffer::ClearWithoutFreeing
|
||||
========================
|
||||
*/
|
||||
void idJointBuffer::ClearWithoutFreeing()
|
||||
{
|
||||
numJoints = 0;
|
||||
offsetInOtherBuffer = OWNS_BUFFER_FLAG;
|
||||
apiObject = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
========================
|
||||
idJointBuffer::Swap
|
||||
========================
|
||||
*/
|
||||
void idJointBuffer::Swap( idJointBuffer& other )
|
||||
{
|
||||
// Make sure the ownership of the buffer is not transferred to an unintended place.
|
||||
assert( other.OwnsBuffer() == OwnsBuffer() );
|
||||
|
||||
SwapValues( other.numJoints, numJoints );
|
||||
SwapValues( other.offsetInOtherBuffer, offsetInOtherBuffer );
|
||||
SwapValues( other.apiObject, apiObject );
|
||||
}
|
||||
}
|
|
@ -33,39 +33,124 @@ If you have questions concerning this license or the applicable additional terms
|
|||
#include "Vulkan/Allocator_VK.h"
|
||||
#endif
|
||||
|
||||
/*
|
||||
================================================================================================
|
||||
|
||||
Buffer Objects
|
||||
|
||||
================================================================================================
|
||||
*/
|
||||
|
||||
class idIndexBuffer;
|
||||
|
||||
enum bufferMapType_t
|
||||
{
|
||||
BM_READ, // map for reading
|
||||
BM_WRITE // map for writing
|
||||
};
|
||||
|
||||
enum bufferUsageType_t
|
||||
{
|
||||
BU_STATIC, // GPU R
|
||||
BU_DYNAMIC, // GPU R, CPU R/W
|
||||
};
|
||||
|
||||
// Returns all targets to virtual memory use instead of buffer object use.
|
||||
// Call this before doing any conventional buffer reads, like screenshots.
|
||||
void UnbindBufferObjects();
|
||||
bool IsWriteCombined( void* base );
|
||||
void CopyBuffer( byte* dst, const byte* src, int numBytes );
|
||||
|
||||
/*
|
||||
================================================
|
||||
idVertexBuffer
|
||||
================================================
|
||||
================================================================================================
|
||||
|
||||
idBufferObject
|
||||
|
||||
================================================================================================
|
||||
*/
|
||||
class idVertexBuffer
|
||||
|
||||
class idBufferObject
|
||||
{
|
||||
public:
|
||||
idBufferObject();
|
||||
|
||||
int GetSize() const
|
||||
{
|
||||
return ( size & ~MAPPED_FLAG );
|
||||
}
|
||||
int GetAllocedSize() const
|
||||
{
|
||||
return ( ( size & ~MAPPED_FLAG ) + 15 ) & ~15;
|
||||
}
|
||||
bufferUsageType_t GetUsage() const
|
||||
{
|
||||
return usage;
|
||||
}
|
||||
#if defined( USE_VULKAN )
|
||||
VkBuffer GetAPIObject() const
|
||||
{
|
||||
return apiObject;
|
||||
}
|
||||
#else
|
||||
GLintptr GetAPIObject() const
|
||||
{
|
||||
return apiObject;
|
||||
}
|
||||
#endif
|
||||
int GetOffset() const
|
||||
{
|
||||
return ( offsetInOtherBuffer & ~OWNS_BUFFER_FLAG );
|
||||
}
|
||||
|
||||
bool IsMapped() const
|
||||
{
|
||||
return ( size & MAPPED_FLAG ) != 0;
|
||||
}
|
||||
|
||||
protected:
|
||||
void SetMapped() const
|
||||
{
|
||||
const_cast< int& >( size ) |= MAPPED_FLAG;
|
||||
}
|
||||
void SetUnmapped() const
|
||||
{
|
||||
const_cast< int& >( size ) &= ~MAPPED_FLAG;
|
||||
}
|
||||
bool OwnsBuffer() const
|
||||
{
|
||||
return ( ( offsetInOtherBuffer & OWNS_BUFFER_FLAG ) != 0 );
|
||||
}
|
||||
|
||||
protected:
|
||||
int size; // size in bytes
|
||||
int offsetInOtherBuffer; // offset in bytes
|
||||
bufferUsageType_t usage;
|
||||
|
||||
#if defined( USE_VULKAN )
|
||||
VkBuffer apiObject;
|
||||
#if defined( ID_USE_AMD_ALLOCATOR )
|
||||
VmaAllocation vmaAllocation;
|
||||
VmaAllocationInfo allocation;
|
||||
#else
|
||||
vulkanAllocation_t allocation;
|
||||
#endif
|
||||
|
||||
#else
|
||||
// GL
|
||||
GLintptr apiObject;
|
||||
void* buffer;
|
||||
#endif
|
||||
|
||||
// sizeof() confuses typeinfo...
|
||||
static const int MAPPED_FLAG = 1 << ( 4 /* sizeof( int ) */ * 8 - 1 );
|
||||
static const int OWNS_BUFFER_FLAG = 1 << ( 4 /* sizeof( int ) */ * 8 - 1 );
|
||||
};
|
||||
|
||||
/*
|
||||
================================================================================================
|
||||
|
||||
idVertexBuffer
|
||||
|
||||
================================================================================================
|
||||
*/
|
||||
class idVertexBuffer : public idBufferObject
|
||||
{
|
||||
public:
|
||||
idVertexBuffer();
|
||||
~idVertexBuffer();
|
||||
|
||||
// Allocate or free the buffer.
|
||||
bool AllocBufferObject( const void* data, int allocSize );
|
||||
bool AllocBufferObject( const void* data, int allocSize, bufferUsageType_t usage );
|
||||
void FreeBufferObject();
|
||||
|
||||
// Make this buffer a reference to another buffer.
|
||||
|
@ -73,76 +158,36 @@ public:
|
|||
void Reference( const idVertexBuffer& other, int refOffset, int refSize );
|
||||
|
||||
// Copies data to the buffer. 'size' may be less than the originally allocated size.
|
||||
void Update( const void* data, int updateSize ) const;
|
||||
void Update( const void* data, int size, int offset = 0 ) const;
|
||||
|
||||
void* MapBuffer( bufferMapType_t mapType ) const;
|
||||
idDrawVert* MapVertexBuffer( bufferMapType_t mapType ) const
|
||||
void* MapBuffer( bufferMapType_t mapType );
|
||||
idDrawVert* MapVertexBuffer( bufferMapType_t mapType )
|
||||
{
|
||||
return static_cast< idDrawVert* >( MapBuffer( mapType ) );
|
||||
}
|
||||
void UnmapBuffer() const;
|
||||
bool IsMapped() const
|
||||
{
|
||||
return ( size & MAPPED_FLAG ) != 0;
|
||||
}
|
||||
|
||||
int GetSize() const
|
||||
{
|
||||
return ( size & ~MAPPED_FLAG );
|
||||
}
|
||||
int GetAllocedSize() const
|
||||
{
|
||||
return ( ( size & ~MAPPED_FLAG ) + 15 ) & ~15;
|
||||
}
|
||||
void* GetAPIObject() const
|
||||
{
|
||||
return apiObject;
|
||||
}
|
||||
int GetOffset() const
|
||||
{
|
||||
return ( offsetInOtherBuffer & ~OWNS_BUFFER_FLAG );
|
||||
}
|
||||
|
||||
private:
|
||||
int size; // size in bytes
|
||||
int offsetInOtherBuffer; // offset in bytes
|
||||
void* apiObject;
|
||||
|
||||
// sizeof() confuses typeinfo...
|
||||
static const int MAPPED_FLAG = 1 << ( 4 /* sizeof( int ) */ * 8 - 1 );
|
||||
static const int OWNS_BUFFER_FLAG = 1 << ( 4 /* sizeof( int ) */ * 8 - 1 );
|
||||
void UnmapBuffer();
|
||||
|
||||
private:
|
||||
void ClearWithoutFreeing();
|
||||
void SetMapped() const
|
||||
{
|
||||
const_cast< int& >( size ) |= MAPPED_FLAG;
|
||||
}
|
||||
void SetUnmapped() const
|
||||
{
|
||||
const_cast< int& >( size ) &= ~MAPPED_FLAG;
|
||||
}
|
||||
bool OwnsBuffer() const
|
||||
{
|
||||
return ( ( offsetInOtherBuffer & OWNS_BUFFER_FLAG ) != 0 );
|
||||
}
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN( idVertexBuffer );
|
||||
};
|
||||
|
||||
/*
|
||||
================================================
|
||||
================================================================================================
|
||||
|
||||
idIndexBuffer
|
||||
================================================
|
||||
|
||||
================================================================================================
|
||||
*/
|
||||
class idIndexBuffer
|
||||
class idIndexBuffer : public idBufferObject
|
||||
{
|
||||
public:
|
||||
idIndexBuffer();
|
||||
~idIndexBuffer();
|
||||
|
||||
// Allocate or free the buffer.
|
||||
bool AllocBufferObject( const void* data, int allocSize );
|
||||
bool AllocBufferObject( const void* data, int allocSize, bufferUsageType_t usage );
|
||||
void FreeBufferObject();
|
||||
|
||||
// Make this buffer a reference to another buffer.
|
||||
|
@ -150,140 +195,55 @@ public:
|
|||
void Reference( const idIndexBuffer& other, int refOffset, int refSize );
|
||||
|
||||
// Copies data to the buffer. 'size' may be less than the originally allocated size.
|
||||
void Update( const void* data, int updateSize ) const;
|
||||
void Update( const void* data, int size, int offset = 0 ) const;
|
||||
|
||||
void* MapBuffer( bufferMapType_t mapType ) const;
|
||||
triIndex_t* MapIndexBuffer( bufferMapType_t mapType ) const
|
||||
void* MapBuffer( bufferMapType_t mapType );
|
||||
triIndex_t* MapIndexBuffer( bufferMapType_t mapType )
|
||||
{
|
||||
return static_cast< triIndex_t* >( MapBuffer( mapType ) );
|
||||
}
|
||||
void UnmapBuffer() const;
|
||||
bool IsMapped() const
|
||||
{
|
||||
return ( size & MAPPED_FLAG ) != 0;
|
||||
}
|
||||
|
||||
int GetSize() const
|
||||
{
|
||||
return ( size & ~MAPPED_FLAG );
|
||||
}
|
||||
int GetAllocedSize() const
|
||||
{
|
||||
return ( ( size & ~MAPPED_FLAG ) + 15 ) & ~15;
|
||||
}
|
||||
void* GetAPIObject() const
|
||||
{
|
||||
return apiObject;
|
||||
}
|
||||
int GetOffset() const
|
||||
{
|
||||
return ( offsetInOtherBuffer & ~OWNS_BUFFER_FLAG );
|
||||
}
|
||||
|
||||
private:
|
||||
int size; // size in bytes
|
||||
int offsetInOtherBuffer; // offset in bytes
|
||||
void* apiObject;
|
||||
|
||||
// sizeof() confuses typeinfo...
|
||||
static const int MAPPED_FLAG = 1 << ( 4 /* sizeof( int ) */ * 8 - 1 );
|
||||
static const int OWNS_BUFFER_FLAG = 1 << ( 4 /* sizeof( int ) */ * 8 - 1 );
|
||||
void UnmapBuffer();
|
||||
|
||||
private:
|
||||
void ClearWithoutFreeing();
|
||||
void SetMapped() const
|
||||
{
|
||||
const_cast< int& >( size ) |= MAPPED_FLAG;
|
||||
}
|
||||
void SetUnmapped() const
|
||||
{
|
||||
const_cast< int& >( size ) &= ~MAPPED_FLAG;
|
||||
}
|
||||
bool OwnsBuffer() const
|
||||
{
|
||||
return ( ( offsetInOtherBuffer & OWNS_BUFFER_FLAG ) != 0 );
|
||||
}
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN( idIndexBuffer );
|
||||
};
|
||||
|
||||
/*
|
||||
================================================
|
||||
idJointBuffer
|
||||
================================================================================================
|
||||
|
||||
idUniformBuffer
|
||||
|
||||
IMPORTANT NOTICE: on the PC, binding to an offset in uniform buffer objects
|
||||
is limited to GL_UNIFORM_BUFFER_OFFSET_ALIGNMENT, which is 256 on current nvidia cards,
|
||||
so joint offsets, which are multiples of 48 bytes, must be in multiples of 16 = 768 bytes.
|
||||
================================================
|
||||
================================================================================================
|
||||
*/
|
||||
class idJointBuffer
|
||||
class idUniformBuffer : public idBufferObject
|
||||
{
|
||||
public:
|
||||
idJointBuffer();
|
||||
~idJointBuffer();
|
||||
idUniformBuffer();
|
||||
~idUniformBuffer();
|
||||
|
||||
// Allocate or free the buffer.
|
||||
bool AllocBufferObject( const float* joints, int numAllocJoints );
|
||||
bool AllocBufferObject( const void* data, int allocSize, bufferUsageType_t usage );
|
||||
void FreeBufferObject();
|
||||
|
||||
// Make this buffer a reference to another buffer.
|
||||
void Reference( const idJointBuffer& other );
|
||||
void Reference( const idJointBuffer& other, int jointRefOffset, int numRefJoints );
|
||||
void Reference( const idUniformBuffer& other );
|
||||
void Reference( const idUniformBuffer& other, int refOffset, int refSize );
|
||||
|
||||
// Copies data to the buffer. 'numJoints' may be less than the originally allocated size.
|
||||
void Update( const float* joints, int numUpdateJoints ) const;
|
||||
// Copies data to the buffer. 'size' may be less than the originally allocated size.
|
||||
void Update( const void* data, int size, int offset = 0 ) const;
|
||||
|
||||
float* MapBuffer( bufferMapType_t mapType ) const;
|
||||
void UnmapBuffer() const;
|
||||
bool IsMapped() const
|
||||
{
|
||||
return ( numJoints & MAPPED_FLAG ) != 0;
|
||||
}
|
||||
|
||||
int GetNumJoints() const
|
||||
{
|
||||
return ( numJoints & ~MAPPED_FLAG );
|
||||
}
|
||||
int GetAllocedSize() const
|
||||
{
|
||||
return ( numJoints & ~MAPPED_FLAG ) * 3 * 4 * sizeof( float );
|
||||
}
|
||||
void* GetAPIObject() const
|
||||
{
|
||||
return apiObject;
|
||||
}
|
||||
int GetOffset() const
|
||||
{
|
||||
return ( offsetInOtherBuffer & ~OWNS_BUFFER_FLAG );
|
||||
}
|
||||
|
||||
void Swap( idJointBuffer& other );
|
||||
|
||||
private:
|
||||
int numJoints;
|
||||
int offsetInOtherBuffer; // offset in bytes
|
||||
void* apiObject;
|
||||
|
||||
// sizeof() confuses typeinfo...
|
||||
static const int MAPPED_FLAG = 1 << ( 4 /* sizeof( int ) */ * 8 - 1 );
|
||||
static const int OWNS_BUFFER_FLAG = 1 << ( 4 /* sizeof( int ) */ * 8 - 1 );
|
||||
void* MapBuffer( bufferMapType_t mapType );
|
||||
void UnmapBuffer();
|
||||
|
||||
private:
|
||||
void ClearWithoutFreeing();
|
||||
void SetMapped() const
|
||||
{
|
||||
const_cast< int& >( numJoints ) |= MAPPED_FLAG;
|
||||
}
|
||||
void SetUnmapped() const
|
||||
{
|
||||
const_cast< int& >( numJoints ) &= ~MAPPED_FLAG;
|
||||
}
|
||||
bool OwnsBuffer() const
|
||||
{
|
||||
return ( ( offsetInOtherBuffer & OWNS_BUFFER_FLAG ) != 0 );
|
||||
}
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN( idJointBuffer );
|
||||
DISALLOW_COPY_AND_ASSIGN( idUniformBuffer );
|
||||
};
|
||||
|
||||
#endif // !__BUFFEROBJECT_H__
|
||||
|
|
|
@ -87,8 +87,8 @@ idGuiModel::BeginFrame
|
|||
*/
|
||||
void idGuiModel::BeginFrame()
|
||||
{
|
||||
vertexBlock = vertexCache.AllocVertex( NULL, ALIGN( MAX_VERTS * sizeof( idDrawVert ), VERTEX_CACHE_ALIGN ) );
|
||||
indexBlock = vertexCache.AllocIndex( NULL, ALIGN( MAX_INDEXES * sizeof( triIndex_t ), INDEX_CACHE_ALIGN ) );
|
||||
vertexBlock = vertexCache.AllocVertex( NULL, MAX_VERTS );
|
||||
indexBlock = vertexCache.AllocIndex( NULL, MAX_INDEXES );
|
||||
vertexPointer = ( idDrawVert* )vertexCache.MappedVertexBuffer( vertexBlock );
|
||||
indexPointer = ( triIndex_t* )vertexCache.MappedIndexBuffer( indexBlock );
|
||||
numVerts = 0;
|
||||
|
|
|
@ -812,8 +812,8 @@ drawSurf_t* idRenderModelDecal::CreateDecalDrawSurf( const viewEntity_t* space,
|
|||
newTri->numVerts = maxVerts;
|
||||
newTri->numIndexes = maxIndexes;
|
||||
|
||||
newTri->ambientCache = vertexCache.AllocVertex( NULL, ALIGN( maxVerts * sizeof( idDrawVert ), VERTEX_CACHE_ALIGN ) );
|
||||
newTri->indexCache = vertexCache.AllocIndex( NULL, ALIGN( maxIndexes * sizeof( triIndex_t ), INDEX_CACHE_ALIGN ) );
|
||||
newTri->ambientCache = vertexCache.AllocVertex( NULL, maxVerts );
|
||||
newTri->indexCache = vertexCache.AllocIndex( NULL, maxIndexes );
|
||||
|
||||
idDrawVert* mappedVerts = ( idDrawVert* )vertexCache.MappedVertexBuffer( newTri->ambientCache );
|
||||
triIndex_t* mappedIndexes = ( triIndex_t* )vertexCache.MappedIndexBuffer( newTri->indexCache );
|
||||
|
|
|
@ -706,8 +706,8 @@ drawSurf_t* idRenderModelOverlay::CreateOverlayDrawSurf( const viewEntity_t* spa
|
|||
srfTriangles_t* newTri = ( srfTriangles_t* )R_ClearedFrameAlloc( sizeof( *newTri ), FRAME_ALLOC_SURFACE_TRIANGLES );
|
||||
newTri->staticModelWithJoints = ( staticModel->jointsInverted != NULL ) ? const_cast< idRenderModelStatic* >( staticModel ) : NULL; // allow GPU skinning
|
||||
|
||||
newTri->ambientCache = vertexCache.AllocVertex( NULL, ALIGN( maxVerts * sizeof( idDrawVert ), VERTEX_CACHE_ALIGN ) );
|
||||
newTri->indexCache = vertexCache.AllocIndex( NULL, ALIGN( maxIndexes * sizeof( triIndex_t ), INDEX_CACHE_ALIGN ) );
|
||||
newTri->ambientCache = vertexCache.AllocVertex( NULL, maxVerts );
|
||||
newTri->indexCache = vertexCache.AllocIndex( NULL, maxIndexes );
|
||||
|
||||
idDrawVert* mappedVerts = ( idDrawVert* )vertexCache.MappedVertexBuffer( newTri->ambientCache );
|
||||
triIndex_t* mappedIndexes = ( triIndex_t* )vertexCache.MappedIndexBuffer( newTri->indexCache );
|
||||
|
|
695
neo/renderer/OpenGL/BufferObject_GL.cpp
Normal file
695
neo/renderer/OpenGL/BufferObject_GL.cpp
Normal file
|
@ -0,0 +1,695 @@
|
|||
/*
|
||||
===========================================================================
|
||||
|
||||
Doom 3 BFG Edition GPL Source Code
|
||||
Copyright (C) 1993-2012 id Software LLC, a ZeniMax Media company.
|
||||
Copyright (C) 2013 Robert Beckebans
|
||||
Copyright (C) 2016-2017 Dustin Land
|
||||
|
||||
This file is part of the Doom 3 BFG Edition GPL Source Code ("Doom 3 BFG Edition Source Code").
|
||||
|
||||
Doom 3 BFG Edition Source Code is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
Doom 3 BFG Edition Source Code is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with Doom 3 BFG Edition Source Code. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
In addition, the Doom 3 BFG Edition Source Code is also subject to certain additional terms. You should have received a copy of these additional terms immediately following the terms and conditions of the GNU General Public License which accompanied the Doom 3 BFG Edition Source Code. If not, please request a copy in writing from id Software at the address below.
|
||||
|
||||
If you have questions concerning this license or the applicable additional terms, you may contact in writing id Software LLC, c/o ZeniMax Media Inc., Suite 120, Rockville, Maryland 20850 USA.
|
||||
|
||||
===========================================================================
|
||||
*/
|
||||
#pragma hdrstop
|
||||
#include "precompiled.h"
|
||||
#include "../RenderCommon.h"
|
||||
|
||||
extern idCVar r_showBuffers;
|
||||
|
||||
|
||||
//static const GLenum bufferUsage = GL_STATIC_DRAW;
|
||||
static const GLenum bufferUsage = GL_DYNAMIC_DRAW;
|
||||
|
||||
|
||||
|
||||
|
||||
/*
|
||||
================================================================================================
|
||||
|
||||
Buffer Objects
|
||||
|
||||
================================================================================================
|
||||
*/
|
||||
|
||||
/*
|
||||
========================
|
||||
UnbindBufferObjects
|
||||
========================
|
||||
*/
|
||||
void UnbindBufferObjects()
|
||||
{
|
||||
glBindBuffer( GL_ARRAY_BUFFER, 0 );
|
||||
glBindBuffer( GL_ELEMENT_ARRAY_BUFFER, 0 );
|
||||
}
|
||||
|
||||
|
||||
|
||||
/*
|
||||
================================================================================================
|
||||
|
||||
idVertexBuffer
|
||||
|
||||
================================================================================================
|
||||
*/
|
||||
|
||||
/*
|
||||
========================
|
||||
idVertexBuffer::idVertexBuffer
|
||||
========================
|
||||
*/
|
||||
idVertexBuffer::idVertexBuffer()
|
||||
{
|
||||
size = 0;
|
||||
offsetInOtherBuffer = OWNS_BUFFER_FLAG;
|
||||
apiObject = 0xFFFF;
|
||||
SetUnmapped();
|
||||
}
|
||||
|
||||
/*
|
||||
========================
|
||||
idVertexBuffer::AllocBufferObject
|
||||
========================
|
||||
*/
|
||||
bool idVertexBuffer::AllocBufferObject( const void* data, int allocSize, bufferUsageType_t _usage )
|
||||
{
|
||||
assert( apiObject == 0xFFFF );
|
||||
assert_16_byte_aligned( data );
|
||||
|
||||
if( allocSize <= 0 )
|
||||
{
|
||||
idLib::Error( "idVertexBuffer::AllocBufferObject: allocSize = %i", allocSize );
|
||||
}
|
||||
|
||||
size = allocSize;
|
||||
usage = _usage;
|
||||
|
||||
bool allocationFailed = false;
|
||||
|
||||
int numBytes = GetAllocedSize();
|
||||
|
||||
// clear out any previous error
|
||||
GL_CheckErrors();
|
||||
|
||||
glGenBuffers( 1, ( GLuint* ) &apiObject );
|
||||
if( apiObject == 0xFFFF )
|
||||
{
|
||||
idLib::FatalError( "idVertexBuffer::AllocBufferObject: failed" );
|
||||
}
|
||||
glBindBuffer( GL_ARRAY_BUFFER, apiObject );
|
||||
|
||||
// these are rewritten every frame
|
||||
glBufferDataARB( GL_ARRAY_BUFFER, numBytes, NULL, bufferUsage );
|
||||
|
||||
GLenum err = glGetError();
|
||||
if( err == GL_OUT_OF_MEMORY )
|
||||
{
|
||||
idLib::Warning( "idVertexBuffer::AllocBufferObject: allocation failed" );
|
||||
allocationFailed = true;
|
||||
}
|
||||
|
||||
if( r_showBuffers.GetBool() )
|
||||
{
|
||||
idLib::Printf( "vertex buffer alloc %p, api %p (%i bytes)\n", this, GetAPIObject(), GetSize() );
|
||||
}
|
||||
|
||||
// copy the data
|
||||
if( data != NULL )
|
||||
{
|
||||
Update( data, allocSize );
|
||||
}
|
||||
|
||||
return !allocationFailed;
|
||||
}
|
||||
|
||||
/*
|
||||
========================
|
||||
idVertexBuffer::FreeBufferObject
|
||||
========================
|
||||
*/
|
||||
void idVertexBuffer::FreeBufferObject()
|
||||
{
|
||||
if( IsMapped() )
|
||||
{
|
||||
UnmapBuffer();
|
||||
}
|
||||
|
||||
// if this is a sub-allocation inside a larger buffer, don't actually free anything.
|
||||
if( OwnsBuffer() == false )
|
||||
{
|
||||
ClearWithoutFreeing();
|
||||
return;
|
||||
}
|
||||
|
||||
if( apiObject == 0xFFFF )
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
if( r_showBuffers.GetBool() )
|
||||
{
|
||||
idLib::Printf( "vertex buffer free %p, api %p (%i bytes)\n", this, GetAPIObject(), GetSize() );
|
||||
}
|
||||
|
||||
glDeleteBuffers( 1, ( GLuint* )&apiObject );
|
||||
|
||||
ClearWithoutFreeing();
|
||||
}
|
||||
|
||||
/*
|
||||
========================
|
||||
idVertexBuffer::Update
|
||||
========================
|
||||
*/
|
||||
void idVertexBuffer::Update( const void* data, int updateSize, int offset ) const
|
||||
{
|
||||
assert( apiObject != 0xFFFF );
|
||||
assert_16_byte_aligned( data );
|
||||
assert( ( GetOffset() & 15 ) == 0 );
|
||||
|
||||
if( updateSize > GetSize() )
|
||||
{
|
||||
idLib::FatalError( "idVertexBuffer::Update: size overrun, %i > %i\n", updateSize, GetSize() );
|
||||
}
|
||||
|
||||
int numBytes = ( updateSize + 15 ) & ~15;
|
||||
|
||||
if( usage == BU_DYNAMIC )
|
||||
{
|
||||
CopyBuffer( ( byte* )buffer + offset, ( const byte* )data, numBytes );
|
||||
}
|
||||
else
|
||||
{
|
||||
glBindBuffer( GL_ARRAY_BUFFER, apiObject );
|
||||
glBufferSubData( GL_ARRAY_BUFFER, GetOffset() + offset, ( GLsizeiptrARB )numBytes, data );
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
========================
|
||||
idVertexBuffer::MapBuffer
|
||||
========================
|
||||
*/
|
||||
void* idVertexBuffer::MapBuffer( bufferMapType_t mapType )
|
||||
{
|
||||
assert( apiObject != 0xFFFF );
|
||||
assert( IsMapped() == false );
|
||||
|
||||
buffer = NULL;
|
||||
|
||||
glBindBuffer( GL_ARRAY_BUFFER, apiObject );
|
||||
if( mapType == BM_READ )
|
||||
{
|
||||
buffer = glMapBufferRange( GL_ARRAY_BUFFER_ARB, 0, GetAllocedSize(), GL_MAP_READ_BIT | GL_MAP_UNSYNCHRONIZED_BIT );
|
||||
if( buffer != NULL )
|
||||
{
|
||||
buffer = ( byte* )buffer + GetOffset();
|
||||
}
|
||||
}
|
||||
else if( mapType == BM_WRITE )
|
||||
{
|
||||
// RB: removed GL_MAP_INVALIDATE_RANGE_BIT as it breaks with an optimization in the Nvidia WHQL drivers >= 344.11
|
||||
buffer = glMapBufferRange( GL_ARRAY_BUFFER, 0, GetAllocedSize(), GL_MAP_WRITE_BIT /*| GL_MAP_INVALIDATE_RANGE_BIT*/ | GL_MAP_UNSYNCHRONIZED_BIT );
|
||||
if( buffer != NULL )
|
||||
{
|
||||
buffer = ( byte* )buffer + GetOffset();
|
||||
}
|
||||
// assert( IsWriteCombined( buffer ) ); // commented out because it spams the console
|
||||
}
|
||||
else
|
||||
{
|
||||
assert( false );
|
||||
}
|
||||
|
||||
SetMapped();
|
||||
|
||||
if( buffer == NULL )
|
||||
{
|
||||
idLib::FatalError( "idVertexBuffer::MapBuffer: failed" );
|
||||
}
|
||||
return buffer;
|
||||
}
|
||||
|
||||
/*
|
||||
========================
|
||||
idVertexBuffer::UnmapBuffer
|
||||
========================
|
||||
*/
|
||||
void idVertexBuffer::UnmapBuffer()
|
||||
{
|
||||
assert( apiObject != 0xFFFF );
|
||||
assert( IsMapped() );
|
||||
|
||||
glBindBuffer( GL_ARRAY_BUFFER, apiObject );
|
||||
if( !glUnmapBuffer( GL_ARRAY_BUFFER ) )
|
||||
{
|
||||
idLib::Printf( "idVertexBuffer::UnmapBuffer failed\n" );
|
||||
}
|
||||
|
||||
SetUnmapped();
|
||||
}
|
||||
|
||||
/*
|
||||
========================
|
||||
idVertexBuffer::ClearWithoutFreeing
|
||||
========================
|
||||
*/
|
||||
void idVertexBuffer::ClearWithoutFreeing()
|
||||
{
|
||||
size = 0;
|
||||
offsetInOtherBuffer = OWNS_BUFFER_FLAG;
|
||||
apiObject = 0xFFFF;
|
||||
}
|
||||
|
||||
/*
|
||||
================================================================================================
|
||||
|
||||
idIndexBuffer
|
||||
|
||||
================================================================================================
|
||||
*/
|
||||
|
||||
/*
|
||||
========================
|
||||
idIndexBuffer::idIndexBuffer
|
||||
========================
|
||||
*/
|
||||
idIndexBuffer::idIndexBuffer()
|
||||
{
|
||||
size = 0;
|
||||
offsetInOtherBuffer = OWNS_BUFFER_FLAG;
|
||||
apiObject = 0xFFFF;
|
||||
SetUnmapped();
|
||||
}
|
||||
|
||||
/*
|
||||
========================
|
||||
idIndexBuffer::AllocBufferObject
|
||||
========================
|
||||
*/
|
||||
bool idIndexBuffer::AllocBufferObject( const void* data, int allocSize, bufferUsageType_t _usage )
|
||||
{
|
||||
assert( apiObject == 0xFFFF );
|
||||
assert_16_byte_aligned( data );
|
||||
|
||||
if( allocSize <= 0 )
|
||||
{
|
||||
idLib::Error( "idIndexBuffer::AllocBufferObject: allocSize = %i", allocSize );
|
||||
}
|
||||
|
||||
size = allocSize;
|
||||
usage = _usage;
|
||||
|
||||
bool allocationFailed = false;
|
||||
|
||||
int numBytes = GetAllocedSize();
|
||||
|
||||
|
||||
// clear out any previous error
|
||||
GL_CheckErrors();
|
||||
|
||||
glGenBuffersARB( 1, ( GLuint* )&apiObject );
|
||||
if( apiObject == 0xFFFF )
|
||||
{
|
||||
GLenum error = glGetError();
|
||||
idLib::FatalError( "idIndexBuffer::AllocBufferObject: failed - GL_Error %d", error );
|
||||
}
|
||||
glBindBuffer( GL_ELEMENT_ARRAY_BUFFER, apiObject );
|
||||
|
||||
// these are rewritten every frame
|
||||
glBufferData( GL_ELEMENT_ARRAY_BUFFER, numBytes, NULL, bufferUsage );
|
||||
|
||||
GLenum err = glGetError();
|
||||
if( err == GL_OUT_OF_MEMORY )
|
||||
{
|
||||
idLib::Warning( "idIndexBuffer:AllocBufferObject: allocation failed" );
|
||||
allocationFailed = true;
|
||||
}
|
||||
|
||||
if( r_showBuffers.GetBool() )
|
||||
{
|
||||
idLib::Printf( "index buffer alloc %p, api %p (%i bytes)\n", this, GetAPIObject(), GetSize() );
|
||||
}
|
||||
|
||||
// copy the data
|
||||
if( data != NULL )
|
||||
{
|
||||
Update( data, allocSize );
|
||||
}
|
||||
|
||||
return !allocationFailed;
|
||||
}
|
||||
|
||||
/*
|
||||
========================
|
||||
idIndexBuffer::FreeBufferObject
|
||||
========================
|
||||
*/
|
||||
void idIndexBuffer::FreeBufferObject()
|
||||
{
|
||||
if( IsMapped() )
|
||||
{
|
||||
UnmapBuffer();
|
||||
}
|
||||
|
||||
// if this is a sub-allocation inside a larger buffer, don't actually free anything.
|
||||
if( OwnsBuffer() == false )
|
||||
{
|
||||
ClearWithoutFreeing();
|
||||
return;
|
||||
}
|
||||
|
||||
if( apiObject == 0xFFFF )
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
if( r_showBuffers.GetBool() )
|
||||
{
|
||||
idLib::Printf( "index buffer free %p, api %p (%i bytes)\n", this, GetAPIObject(), GetSize() );
|
||||
}
|
||||
|
||||
glDeleteBuffers( 1, ( GLuint* )&apiObject );
|
||||
|
||||
ClearWithoutFreeing();
|
||||
}
|
||||
|
||||
/*
|
||||
========================
|
||||
idIndexBuffer::Update
|
||||
========================
|
||||
*/
|
||||
void idIndexBuffer::Update( const void* data, int updateSize, int offset ) const
|
||||
{
|
||||
assert( apiObject != 0xFFFF );
|
||||
assert_16_byte_aligned( data );
|
||||
assert( ( GetOffset() & 15 ) == 0 );
|
||||
|
||||
if( updateSize > GetSize() )
|
||||
{
|
||||
idLib::FatalError( "idIndexBuffer::Update: size overrun, %i > %i\n", updateSize, GetSize() );
|
||||
}
|
||||
|
||||
int numBytes = ( updateSize + 15 ) & ~15;
|
||||
|
||||
if( usage == BU_DYNAMIC )
|
||||
{
|
||||
CopyBuffer( ( byte* )buffer + offset, ( const byte* )data, numBytes );
|
||||
}
|
||||
else
|
||||
{
|
||||
glBindBuffer( GL_ELEMENT_ARRAY_BUFFER, apiObject );
|
||||
glBufferSubData( GL_ELEMENT_ARRAY_BUFFER, GetOffset() + offset, ( GLsizeiptrARB )numBytes, data );
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
========================
|
||||
idIndexBuffer::MapBuffer
|
||||
========================
|
||||
*/
|
||||
void* idIndexBuffer::MapBuffer( bufferMapType_t mapType )
|
||||
{
|
||||
assert( apiObject != 0xFFFF );
|
||||
assert( IsMapped() == false );
|
||||
|
||||
buffer = NULL;
|
||||
|
||||
glBindBuffer( GL_ELEMENT_ARRAY_BUFFER, apiObject );
|
||||
if( mapType == BM_READ )
|
||||
{
|
||||
//buffer = glMapBufferARB( GL_ELEMENT_ARRAY_BUFFER_ARB, GL_READ_ONLY_ARB );
|
||||
buffer = glMapBufferRange( GL_ELEMENT_ARRAY_BUFFER, 0, GetAllocedSize(), GL_MAP_READ_BIT | GL_MAP_UNSYNCHRONIZED_BIT );
|
||||
if( buffer != NULL )
|
||||
{
|
||||
buffer = ( byte* )buffer + GetOffset();
|
||||
}
|
||||
}
|
||||
else if( mapType == BM_WRITE )
|
||||
{
|
||||
//buffer = glMapBufferARB( GL_ELEMENT_ARRAY_BUFFER_ARB, GL_WRITE_ONLY_ARB );
|
||||
|
||||
// RB: removed GL_MAP_INVALIDATE_RANGE_BIT as it breaks with an optimization in the Nvidia WHQL drivers >= 344.11
|
||||
buffer = glMapBufferRange( GL_ELEMENT_ARRAY_BUFFER, 0, GetAllocedSize(), GL_MAP_WRITE_BIT /*| GL_MAP_INVALIDATE_RANGE_BIT*/ | GL_MAP_UNSYNCHRONIZED_BIT );
|
||||
if( buffer != NULL )
|
||||
{
|
||||
buffer = ( byte* )buffer + GetOffset();
|
||||
}
|
||||
// assert( IsWriteCombined( buffer ) ); // commented out because it spams the console
|
||||
}
|
||||
else
|
||||
{
|
||||
assert( false );
|
||||
}
|
||||
|
||||
SetMapped();
|
||||
|
||||
if( buffer == NULL )
|
||||
{
|
||||
idLib::FatalError( "idIndexBuffer::MapBuffer: failed" );
|
||||
}
|
||||
return buffer;
|
||||
}
|
||||
|
||||
/*
|
||||
========================
|
||||
idIndexBuffer::UnmapBuffer
|
||||
========================
|
||||
*/
|
||||
void idIndexBuffer::UnmapBuffer()
|
||||
{
|
||||
assert( apiObject != 0xFFFF );
|
||||
assert( IsMapped() );
|
||||
|
||||
glBindBuffer( GL_ELEMENT_ARRAY_BUFFER, apiObject );
|
||||
if( !glUnmapBuffer( GL_ELEMENT_ARRAY_BUFFER ) )
|
||||
{
|
||||
idLib::Printf( "idIndexBuffer::UnmapBuffer failed\n" );
|
||||
}
|
||||
|
||||
buffer = NULL;
|
||||
|
||||
SetUnmapped();
|
||||
}
|
||||
|
||||
/*
|
||||
========================
|
||||
idIndexBuffer::ClearWithoutFreeing
|
||||
========================
|
||||
*/
|
||||
void idIndexBuffer::ClearWithoutFreeing()
|
||||
{
|
||||
size = 0;
|
||||
offsetInOtherBuffer = OWNS_BUFFER_FLAG;
|
||||
apiObject = 0xFFFF;
|
||||
}
|
||||
|
||||
/*
|
||||
================================================================================================
|
||||
|
||||
idUniformBuffer
|
||||
|
||||
================================================================================================
|
||||
*/
|
||||
|
||||
/*
|
||||
========================
|
||||
idUniformBuffer::idUniformBuffer
|
||||
========================
|
||||
*/
|
||||
idUniformBuffer::idUniformBuffer()
|
||||
{
|
||||
size = 0;
|
||||
offsetInOtherBuffer = OWNS_BUFFER_FLAG;
|
||||
apiObject = 0xFFFF;
|
||||
SetUnmapped();
|
||||
}
|
||||
|
||||
/*
|
||||
========================
|
||||
idUniformBuffer::AllocBufferObject
|
||||
========================
|
||||
*/
|
||||
bool idUniformBuffer::AllocBufferObject( const void* data, int allocSize, bufferUsageType_t _usage )
|
||||
{
|
||||
assert( apiObject == 0xFFFF );
|
||||
assert_16_byte_aligned( data );
|
||||
|
||||
if( allocSize <= 0 )
|
||||
{
|
||||
idLib::Error( "idUniformBuffer::AllocBufferObject: allocSize = %i", allocSize );
|
||||
}
|
||||
|
||||
size = allocSize;
|
||||
usage = _usage;
|
||||
|
||||
bool allocationFailed = false;
|
||||
|
||||
const int numBytes = GetAllocedSize();
|
||||
|
||||
glGenBuffers( 1, ( GLuint* )&apiObject );
|
||||
glBindBuffer( GL_UNIFORM_BUFFER, apiObject );
|
||||
glBufferData( GL_UNIFORM_BUFFER, numBytes, NULL, GL_STREAM_DRAW_ARB );
|
||||
glBindBuffer( GL_UNIFORM_BUFFER, 0 );
|
||||
|
||||
if( r_showBuffers.GetBool() )
|
||||
{
|
||||
idLib::Printf( "joint buffer alloc %p, api %p (%i joints)\n", this, GetAPIObject(), GetSize() );
|
||||
}
|
||||
|
||||
// copy the data
|
||||
if( data != NULL )
|
||||
{
|
||||
Update( data, allocSize );
|
||||
}
|
||||
|
||||
return !allocationFailed;
|
||||
}
|
||||
|
||||
/*
|
||||
========================
|
||||
idUniformBuffer::FreeBufferObject
|
||||
========================
|
||||
*/
|
||||
void idUniformBuffer::FreeBufferObject()
|
||||
{
|
||||
if( IsMapped() )
|
||||
{
|
||||
UnmapBuffer();
|
||||
}
|
||||
|
||||
// if this is a sub-allocation inside a larger buffer, don't actually free anything.
|
||||
if( OwnsBuffer() == false )
|
||||
{
|
||||
ClearWithoutFreeing();
|
||||
return;
|
||||
}
|
||||
|
||||
if( apiObject == 0xFFFF )
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
if( r_showBuffers.GetBool() )
|
||||
{
|
||||
idLib::Printf( "joint buffer free %p, api %p (%i size)\n", this, GetAPIObject(), GetSize() );
|
||||
}
|
||||
|
||||
glBindBuffer( GL_UNIFORM_BUFFER, 0 );
|
||||
glDeleteBuffers( 1, ( GLuint* )&apiObject );
|
||||
|
||||
ClearWithoutFreeing();
|
||||
}
|
||||
|
||||
/*
|
||||
========================
|
||||
idUniformBuffer::Update
|
||||
========================
|
||||
*/
|
||||
void idUniformBuffer::Update( const void* data, int updateSize, int offset ) const
|
||||
{
|
||||
assert( apiObject != 0xFFFF );
|
||||
assert_16_byte_aligned( data );
|
||||
assert( ( GetOffset() & 15 ) == 0 );
|
||||
|
||||
if( updateSize > GetSize() )
|
||||
{
|
||||
idLib::FatalError( "idUniformBuffer::Update: size overrun, %i > %i\n", updateSize, GetSize() );
|
||||
}
|
||||
|
||||
const int numBytes = ( updateSize + 15 ) & ~15;
|
||||
|
||||
if( usage == BU_DYNAMIC )
|
||||
{
|
||||
CopyBuffer( ( byte* )buffer + offset, ( const byte* )data, numBytes );
|
||||
}
|
||||
else
|
||||
{
|
||||
glBindBuffer( GL_ARRAY_BUFFER, apiObject );
|
||||
glBufferSubData( GL_ARRAY_BUFFER, GetOffset() + offset, ( GLsizeiptr )numBytes, data );
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
========================
|
||||
idUniformBuffer::MapBuffer
|
||||
========================
|
||||
*/
|
||||
void* idUniformBuffer::MapBuffer( bufferMapType_t mapType )
|
||||
{
|
||||
assert( IsMapped() == false );
|
||||
assert( mapType == BM_WRITE );
|
||||
assert( apiObject != 0xFFFF );
|
||||
|
||||
int numBytes = GetAllocedSize();
|
||||
|
||||
buffer = NULL;
|
||||
|
||||
glBindBuffer( GL_UNIFORM_BUFFER, apiObject );
|
||||
numBytes = numBytes;
|
||||
assert( GetOffset() == 0 );
|
||||
|
||||
// RB: removed GL_MAP_INVALIDATE_RANGE_BIT as it breaks with an optimization in the Nvidia WHQL drivers >= 344.11
|
||||
buffer = glMapBufferRange( GL_UNIFORM_BUFFER, 0, GetAllocedSize(), GL_MAP_WRITE_BIT /*| GL_MAP_INVALIDATE_RANGE_BIT*/ | GL_MAP_UNSYNCHRONIZED_BIT );
|
||||
if( buffer != NULL )
|
||||
{
|
||||
buffer = ( byte* )buffer + GetOffset();
|
||||
}
|
||||
|
||||
SetMapped();
|
||||
|
||||
if( buffer == NULL )
|
||||
{
|
||||
idLib::FatalError( "idUniformBuffer::MapBuffer: failed" );
|
||||
}
|
||||
return ( float* ) buffer;
|
||||
}
|
||||
|
||||
/*
|
||||
========================
|
||||
idUniformBuffer::UnmapBuffer
|
||||
========================
|
||||
*/
|
||||
void idUniformBuffer::UnmapBuffer()
|
||||
{
|
||||
assert( apiObject != 0xFFFF );
|
||||
assert( IsMapped() );
|
||||
|
||||
glBindBuffer( GL_UNIFORM_BUFFER, apiObject );
|
||||
if( !glUnmapBuffer( GL_UNIFORM_BUFFER ) )
|
||||
{
|
||||
idLib::Printf( "idUniformBuffer::UnmapBuffer failed\n" );
|
||||
}
|
||||
|
||||
buffer = NULL;
|
||||
|
||||
SetUnmapped();
|
||||
}
|
||||
|
||||
/*
|
||||
========================
|
||||
idUniformBuffer::ClearWithoutFreeing
|
||||
========================
|
||||
*/
|
||||
void idUniformBuffer::ClearWithoutFreeing()
|
||||
{
|
||||
size = 0;
|
||||
offsetInOtherBuffer = OWNS_BUFFER_FLAG;
|
||||
apiObject = 0xFFFF;
|
||||
}
|
|
@ -193,7 +193,7 @@ void idRenderBackend::DrawElementsWithCounters( const drawSurf_t* surf )
|
|||
|
||||
if( surf->jointCache )
|
||||
{
|
||||
idJointBuffer jointBuffer;
|
||||
idUniformBuffer jointBuffer;
|
||||
if( !vertexCache.GetJointBuffer( surf->jointCache, &jointBuffer ) )
|
||||
{
|
||||
idLib::Warning( "RB_DrawElementsWithCounters, jointBuffer == NULL" );
|
||||
|
@ -202,10 +202,10 @@ void idRenderBackend::DrawElementsWithCounters( const drawSurf_t* surf )
|
|||
assert( ( jointBuffer.GetOffset() & ( glConfig.uniformBufferOffsetAlignment - 1 ) ) == 0 );
|
||||
|
||||
// RB: 64 bit fixes, changed GLuint to GLintptr
|
||||
const GLintptr ubo = reinterpret_cast< GLintptr >( jointBuffer.GetAPIObject() );
|
||||
const GLintptr ubo = jointBuffer.GetAPIObject();
|
||||
// RB end
|
||||
|
||||
glBindBufferRange( GL_UNIFORM_BUFFER, 0, ubo, jointBuffer.GetOffset(), jointBuffer.GetNumJoints() * sizeof( idJointMat ) );
|
||||
glBindBufferRange( GL_UNIFORM_BUFFER, 0, ubo, jointBuffer.GetOffset(), jointBuffer.GetSize() );
|
||||
}
|
||||
|
||||
renderProgManager.CommitUniforms();
|
||||
|
|
|
@ -2541,7 +2541,7 @@ void idRenderBackend::StencilShadowPass( const drawSurf_t* drawSurfs, const view
|
|||
{
|
||||
assert( renderProgManager.ShaderUsesJoints() );
|
||||
|
||||
idJointBuffer jointBuffer;
|
||||
idUniformBuffer jointBuffer;
|
||||
if( !vertexCache.GetJointBuffer( drawSurf->jointCache, &jointBuffer ) )
|
||||
{
|
||||
idLib::Warning( "RB_DrawElementsWithCounters, jointBuffer == NULL" );
|
||||
|
@ -2549,8 +2549,8 @@ void idRenderBackend::StencilShadowPass( const drawSurf_t* drawSurfs, const view
|
|||
}
|
||||
assert( ( jointBuffer.GetOffset() & ( glConfig.uniformBufferOffsetAlignment - 1 ) ) == 0 );
|
||||
|
||||
const GLintptr ubo = reinterpret_cast< GLintptr >( jointBuffer.GetAPIObject() );
|
||||
glBindBufferRange( GL_UNIFORM_BUFFER, 0, ubo, jointBuffer.GetOffset(), jointBuffer.GetNumJoints() * sizeof( idJointMat ) );
|
||||
const GLintptr ubo = jointBuffer.GetAPIObject();
|
||||
glBindBufferRange( GL_UNIFORM_BUFFER, 0, ubo, jointBuffer.GetOffset(), jointBuffer.GetSize() );
|
||||
|
||||
if( ( vertexLayout != LAYOUT_DRAW_SHADOW_VERT_SKINNED ) || ( currentVertexBuffer != ( GLintptr )vertexBuffer->GetAPIObject() ) || !r_useStateCaching.GetBool() )
|
||||
{
|
||||
|
|
|
@ -914,7 +914,7 @@ void R_InitOpenGL()
|
|||
r_initialized = true;
|
||||
|
||||
// allocate the vertex array range or vertex objects
|
||||
vertexCache.Init();
|
||||
vertexCache.Init( glConfig.uniformBufferOffsetAlignment );
|
||||
|
||||
// allocate the frame data, which may be more if smp is enabled
|
||||
R_InitFrameData();
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
Doom 3 BFG Edition GPL Source Code
|
||||
Copyright (C) 1993-2012 id Software LLC, a ZeniMax Media company.
|
||||
Copyright (C) 2013-2014 Robert Beckebans
|
||||
Copyright (C) 2016-2017 Dustin Land
|
||||
|
||||
This file is part of the Doom 3 BFG Edition GPL Source Code ("Doom 3 BFG Edition Source Code").
|
||||
|
||||
|
@ -101,14 +102,15 @@ static void UnmapGeoBufferSet( geoBufferSet_t& gbs )
|
|||
AllocGeoBufferSet
|
||||
==============
|
||||
*/
|
||||
static void AllocGeoBufferSet( geoBufferSet_t& gbs, const int vertexBytes, const int indexBytes, const int jointBytes )
|
||||
static void AllocGeoBufferSet( geoBufferSet_t& gbs, const int vertexBytes, const int indexBytes, const int jointBytes, bufferUsageType_t usage )
|
||||
{
|
||||
gbs.vertexBuffer.AllocBufferObject( NULL, vertexBytes );
|
||||
gbs.indexBuffer.AllocBufferObject( NULL, indexBytes );
|
||||
if( jointBytes != 0 )
|
||||
gbs.vertexBuffer.AllocBufferObject( NULL, vertexBytes, usage );
|
||||
gbs.indexBuffer.AllocBufferObject( NULL, indexBytes, usage );
|
||||
if( jointBytes > 0 )
|
||||
{
|
||||
gbs.jointBuffer.AllocBufferObject( NULL, jointBytes / sizeof( idJointMat ) );
|
||||
gbs.jointBuffer.AllocBufferObject( NULL, jointBytes, usage );
|
||||
}
|
||||
|
||||
ClearGeoBufferSet( gbs );
|
||||
}
|
||||
|
||||
|
@ -117,22 +119,28 @@ static void AllocGeoBufferSet( geoBufferSet_t& gbs, const int vertexBytes, const
|
|||
idVertexCache::Init
|
||||
==============
|
||||
*/
|
||||
void idVertexCache::Init( bool restart )
|
||||
void idVertexCache::Init( int _uniformBufferOffsetAlignment )
|
||||
{
|
||||
currentFrame = 0;
|
||||
listNum = 0;
|
||||
|
||||
uniformBufferOffsetAlignment = _uniformBufferOffsetAlignment;
|
||||
|
||||
mostUsedVertex = 0;
|
||||
mostUsedIndex = 0;
|
||||
mostUsedJoint = 0;
|
||||
|
||||
for( int i = 0; i < VERTCACHE_NUM_FRAMES; i++ )
|
||||
for( int i = 0; i < NUM_FRAME_DATA; i++ )
|
||||
{
|
||||
AllocGeoBufferSet( frameData[i], VERTCACHE_VERTEX_MEMORY_PER_FRAME, VERTCACHE_INDEX_MEMORY_PER_FRAME, VERTCACHE_JOINT_MEMORY_PER_FRAME );
|
||||
AllocGeoBufferSet( frameData[i], VERTCACHE_VERTEX_MEMORY_PER_FRAME, VERTCACHE_INDEX_MEMORY_PER_FRAME, VERTCACHE_JOINT_MEMORY_PER_FRAME, BU_DYNAMIC );
|
||||
}
|
||||
AllocGeoBufferSet( staticData, STATIC_VERTEX_MEMORY, STATIC_INDEX_MEMORY, 0 );
|
||||
#if 1
|
||||
AllocGeoBufferSet( staticData, STATIC_VERTEX_MEMORY, STATIC_INDEX_MEMORY, 0, BU_STATIC );
|
||||
#else
|
||||
AllocGeoBufferSet( staticData, STATIC_VERTEX_MEMORY, STATIC_INDEX_MEMORY, 0, BU_DYNAMIC );
|
||||
#endif
|
||||
|
||||
MapGeoBufferSet( frameData[listNum] );
|
||||
MapGeoBufferSet( frameData[ listNum ] );
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -142,7 +150,7 @@ idVertexCache::Shutdown
|
|||
*/
|
||||
void idVertexCache::Shutdown()
|
||||
{
|
||||
for( int i = 0; i < VERTCACHE_NUM_FRAMES; i++ )
|
||||
for( int i = 0; i < NUM_FRAME_DATA; i++ )
|
||||
{
|
||||
frameData[i].vertexBuffer.FreeBufferObject();
|
||||
frameData[i].indexBuffer.FreeBufferObject();
|
||||
|
@ -158,7 +166,7 @@ idVertexCache::PurgeAll
|
|||
void idVertexCache::PurgeAll()
|
||||
{
|
||||
Shutdown();
|
||||
Init( true );
|
||||
Init( uniformBufferOffsetAlignment );
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -194,52 +202,80 @@ vertCacheHandle_t idVertexCache::ActuallyAlloc( geoBufferSet_t& vcs, const void*
|
|||
|
||||
assert( ( bytes & 15 ) == 0 );
|
||||
|
||||
// thread safe interlocked adds
|
||||
byte** base = NULL;
|
||||
int endPos = 0;
|
||||
if( type == CACHE_INDEX )
|
||||
int offset = 0;
|
||||
|
||||
switch( type )
|
||||
{
|
||||
base = &vcs.mappedIndexBase;
|
||||
endPos = vcs.indexMemUsed.Add( bytes );
|
||||
if( endPos > vcs.indexBuffer.GetAllocedSize() )
|
||||
case CACHE_INDEX:
|
||||
{
|
||||
idLib::Error( "Out of index cache" );
|
||||
endPos = vcs.indexMemUsed.Add( bytes );
|
||||
if( endPos > vcs.indexBuffer.GetAllocedSize() )
|
||||
{
|
||||
idLib::Error( "Out of index cache" );
|
||||
}
|
||||
|
||||
offset = endPos - bytes;
|
||||
|
||||
if( data != NULL )
|
||||
{
|
||||
if( vcs.indexBuffer.GetUsage() == BU_DYNAMIC )
|
||||
{
|
||||
MapGeoBufferSet( vcs );
|
||||
}
|
||||
vcs.indexBuffer.Update( data, bytes, offset );
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
else if( type == CACHE_VERTEX )
|
||||
{
|
||||
base = &vcs.mappedVertexBase;
|
||||
endPos = vcs.vertexMemUsed.Add( bytes );
|
||||
if( endPos > vcs.vertexBuffer.GetAllocedSize() )
|
||||
case CACHE_VERTEX:
|
||||
{
|
||||
idLib::Error( "Out of vertex cache" );
|
||||
endPos = vcs.vertexMemUsed.Add( bytes );
|
||||
if( endPos > vcs.vertexBuffer.GetAllocedSize() )
|
||||
{
|
||||
idLib::Error( "Out of vertex cache" );
|
||||
}
|
||||
|
||||
offset = endPos - bytes;
|
||||
|
||||
if( data != NULL )
|
||||
{
|
||||
if( vcs.vertexBuffer.GetUsage() == BU_DYNAMIC )
|
||||
{
|
||||
MapGeoBufferSet( vcs );
|
||||
}
|
||||
vcs.vertexBuffer.Update( data, bytes, offset );
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
else if( type == CACHE_JOINT )
|
||||
{
|
||||
base = &vcs.mappedJointBase;
|
||||
endPos = vcs.jointMemUsed.Add( bytes );
|
||||
if( endPos > vcs.jointBuffer.GetAllocedSize() )
|
||||
case CACHE_JOINT:
|
||||
{
|
||||
idLib::Error( "Out of joint buffer cache" );
|
||||
endPos = vcs.jointMemUsed.Add( bytes );
|
||||
if( endPos > vcs.jointBuffer.GetAllocedSize() )
|
||||
{
|
||||
idLib::Error( "Out of joint buffer cache" );
|
||||
}
|
||||
|
||||
offset = endPos - bytes;
|
||||
|
||||
if( data != NULL )
|
||||
{
|
||||
if( vcs.jointBuffer.GetUsage() == BU_DYNAMIC )
|
||||
{
|
||||
MapGeoBufferSet( vcs );
|
||||
}
|
||||
vcs.jointBuffer.Update( data, bytes, offset );
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
assert( false );
|
||||
default:
|
||||
assert( false );
|
||||
}
|
||||
|
||||
vcs.allocations++;
|
||||
|
||||
int offset = endPos - bytes;
|
||||
|
||||
// Actually perform the data transfer
|
||||
if( data != NULL )
|
||||
{
|
||||
MapGeoBufferSet( vcs );
|
||||
CopyBuffer( *base + offset, ( const byte* )data, bytes );
|
||||
}
|
||||
|
||||
vertCacheHandle_t handle = ( ( uint64 )( currentFrame & VERTCACHE_FRAME_MASK ) << VERTCACHE_FRAME_SHIFT ) |
|
||||
( ( uint64 )( offset & VERTCACHE_OFFSET_MASK ) << VERTCACHE_OFFSET_SHIFT ) |
|
||||
( ( uint64 )( bytes & VERTCACHE_SIZE_MASK ) << VERTCACHE_SIZE_SHIFT );
|
||||
|
@ -250,6 +286,112 @@ vertCacheHandle_t idVertexCache::ActuallyAlloc( geoBufferSet_t& vcs, const void*
|
|||
return handle;
|
||||
}
|
||||
|
||||
/*
|
||||
==============
|
||||
idVertexCache::AllocVertex
|
||||
==============
|
||||
*/
|
||||
vertCacheHandle_t idVertexCache::AllocVertex( const void* data, int num, size_t size /*= sizeof( idDrawVert ) */ )
|
||||
{
|
||||
return ActuallyAlloc( frameData[ listNum ], data, ALIGN( num * size, VERTEX_CACHE_ALIGN ), CACHE_VERTEX );
|
||||
}
|
||||
|
||||
/*
|
||||
==============
|
||||
idVertexCache::AllocIndex
|
||||
==============
|
||||
*/
|
||||
vertCacheHandle_t idVertexCache::AllocIndex( const void* data, int num, size_t size /*= sizeof( triIndex_t ) */ )
|
||||
{
|
||||
return ActuallyAlloc( frameData[ listNum ], data, ALIGN( num * size, INDEX_CACHE_ALIGN ), CACHE_INDEX );
|
||||
}
|
||||
|
||||
/*
|
||||
==============
|
||||
idVertexCache::AllocJoint
|
||||
==============
|
||||
*/
|
||||
vertCacheHandle_t idVertexCache::AllocJoint( const void* data, int num, size_t size /*= sizeof( idJointMat ) */ )
|
||||
{
|
||||
return ActuallyAlloc( frameData[ listNum ], data, ALIGN( num * size, uniformBufferOffsetAlignment ), CACHE_JOINT );
|
||||
}
|
||||
|
||||
/*
|
||||
==============
|
||||
idVertexCache::AllocStaticVertex
|
||||
==============
|
||||
*/
|
||||
vertCacheHandle_t idVertexCache::AllocStaticVertex( const void* data, int bytes )
|
||||
{
|
||||
if( staticData.vertexMemUsed.GetValue() + bytes > STATIC_VERTEX_MEMORY )
|
||||
{
|
||||
idLib::FatalError( "AllocStaticVertex failed, increase STATIC_VERTEX_MEMORY" );
|
||||
}
|
||||
return ActuallyAlloc( staticData, data, bytes, CACHE_VERTEX );
|
||||
}
|
||||
|
||||
/*
|
||||
==============
|
||||
idVertexCache::AllocStaticIndex
|
||||
==============
|
||||
*/
|
||||
vertCacheHandle_t idVertexCache::AllocStaticIndex( const void* data, int bytes )
|
||||
{
|
||||
if( staticData.indexMemUsed.GetValue() + bytes > STATIC_INDEX_MEMORY )
|
||||
{
|
||||
idLib::FatalError( "AllocStaticIndex failed, increase STATIC_INDEX_MEMORY" );
|
||||
}
|
||||
return ActuallyAlloc( staticData, data, bytes, CACHE_INDEX );
|
||||
}
|
||||
|
||||
/*
|
||||
==============
|
||||
idVertexCache::MappedVertexBuffer
|
||||
==============
|
||||
*/
|
||||
byte* idVertexCache::MappedVertexBuffer( vertCacheHandle_t handle )
|
||||
{
|
||||
release_assert( !CacheIsStatic( handle ) );
|
||||
const uint64 offset = ( int )( handle >> VERTCACHE_OFFSET_SHIFT ) & VERTCACHE_OFFSET_MASK;
|
||||
const uint64 frameNum = ( int )( handle >> VERTCACHE_FRAME_SHIFT ) & VERTCACHE_FRAME_MASK;
|
||||
release_assert( frameNum == ( currentFrame & VERTCACHE_FRAME_MASK ) );
|
||||
return frameData[ listNum ].mappedVertexBase + offset;
|
||||
}
|
||||
|
||||
/*
|
||||
==============
|
||||
idVertexCache::MappedIndexBuffer
|
||||
==============
|
||||
*/
|
||||
byte* idVertexCache::MappedIndexBuffer( vertCacheHandle_t handle )
|
||||
{
|
||||
release_assert( !CacheIsStatic( handle ) );
|
||||
const uint64 offset = ( int )( handle >> VERTCACHE_OFFSET_SHIFT ) & VERTCACHE_OFFSET_MASK;
|
||||
const uint64 frameNum = ( int )( handle >> VERTCACHE_FRAME_SHIFT ) & VERTCACHE_FRAME_MASK;
|
||||
release_assert( frameNum == ( currentFrame & VERTCACHE_FRAME_MASK ) );
|
||||
return frameData[ listNum ].mappedIndexBase + offset;
|
||||
}
|
||||
|
||||
/*
|
||||
==============
|
||||
idVertexCache::CacheIsCurrent
|
||||
==============
|
||||
*/
|
||||
bool idVertexCache::CacheIsCurrent( const vertCacheHandle_t handle )
|
||||
{
|
||||
const int isStatic = handle & VERTCACHE_STATIC;
|
||||
if( isStatic )
|
||||
{
|
||||
return true;
|
||||
}
|
||||
const uint64 frameNum = ( int )( handle >> VERTCACHE_FRAME_SHIFT ) & VERTCACHE_FRAME_MASK;
|
||||
if( frameNum != ( currentFrame & VERTCACHE_FRAME_MASK ) )
|
||||
{
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
==============
|
||||
idVertexCache::GetVertexBuffer
|
||||
|
@ -270,7 +412,7 @@ bool idVertexCache::GetVertexBuffer( vertCacheHandle_t handle, idVertexBuffer* v
|
|||
{
|
||||
return false;
|
||||
}
|
||||
vb->Reference( frameData[drawListNum].vertexBuffer, offset, size );
|
||||
vb->Reference( frameData[ drawListNum ].vertexBuffer, offset, size );
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -294,7 +436,7 @@ bool idVertexCache::GetIndexBuffer( vertCacheHandle_t handle, idIndexBuffer* ib
|
|||
{
|
||||
return false;
|
||||
}
|
||||
ib->Reference( frameData[drawListNum].indexBuffer, offset, size );
|
||||
ib->Reference( frameData[ drawListNum ].indexBuffer, offset, size );
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -303,23 +445,22 @@ bool idVertexCache::GetIndexBuffer( vertCacheHandle_t handle, idIndexBuffer* ib
|
|||
idVertexCache::GetJointBuffer
|
||||
==============
|
||||
*/
|
||||
bool idVertexCache::GetJointBuffer( vertCacheHandle_t handle, idJointBuffer* jb )
|
||||
bool idVertexCache::GetJointBuffer( vertCacheHandle_t handle, idUniformBuffer* jb )
|
||||
{
|
||||
const int isStatic = handle & VERTCACHE_STATIC;
|
||||
const uint64 numBytes = ( int )( handle >> VERTCACHE_SIZE_SHIFT ) & VERTCACHE_SIZE_MASK;
|
||||
const uint64 jointOffset = ( int )( handle >> VERTCACHE_OFFSET_SHIFT ) & VERTCACHE_OFFSET_MASK;
|
||||
const uint64 frameNum = ( int )( handle >> VERTCACHE_FRAME_SHIFT ) & VERTCACHE_FRAME_MASK;
|
||||
const uint64 numJoints = numBytes / sizeof( idJointMat );
|
||||
if( isStatic )
|
||||
{
|
||||
jb->Reference( staticData.jointBuffer, jointOffset, numJoints );
|
||||
jb->Reference( staticData.jointBuffer, jointOffset, numBytes );
|
||||
return true;
|
||||
}
|
||||
if( frameNum != ( ( currentFrame - 1 ) & VERTCACHE_FRAME_MASK ) )
|
||||
{
|
||||
return false;
|
||||
}
|
||||
jb->Reference( frameData[drawListNum].jointBuffer, jointOffset, numJoints );
|
||||
jb->Reference( frameData[ drawListNum ].jointBuffer, jointOffset, numBytes );
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -330,17 +471,17 @@ idVertexCache::BeginBackEnd
|
|||
*/
|
||||
void idVertexCache::BeginBackEnd()
|
||||
{
|
||||
mostUsedVertex = Max( mostUsedVertex, frameData[listNum].vertexMemUsed.GetValue() );
|
||||
mostUsedIndex = Max( mostUsedIndex, frameData[listNum].indexMemUsed.GetValue() );
|
||||
mostUsedJoint = Max( mostUsedJoint, frameData[listNum].jointMemUsed.GetValue() );
|
||||
mostUsedVertex = Max( mostUsedVertex, frameData[ listNum ].vertexMemUsed.GetValue() );
|
||||
mostUsedIndex = Max( mostUsedIndex, frameData[ listNum ].indexMemUsed.GetValue() );
|
||||
mostUsedJoint = Max( mostUsedJoint, frameData[ listNum ].jointMemUsed.GetValue() );
|
||||
|
||||
if( r_showVertexCache.GetBool() )
|
||||
{
|
||||
idLib::Printf( "%08d: %d allocations, %dkB vertex, %dkB index, %ikB joint : %dkB vertex, %dkB index, %ikB joint\n",
|
||||
currentFrame, frameData[listNum].allocations,
|
||||
frameData[listNum].vertexMemUsed.GetValue() / 1024,
|
||||
frameData[listNum].indexMemUsed.GetValue() / 1024,
|
||||
frameData[listNum].jointMemUsed.GetValue() / 1024,
|
||||
currentFrame, frameData[ listNum ].allocations,
|
||||
frameData[ listNum ].vertexMemUsed.GetValue() / 1024,
|
||||
frameData[ listNum ].indexMemUsed.GetValue() / 1024,
|
||||
frameData[ listNum ].jointMemUsed.GetValue() / 1024,
|
||||
mostUsedVertex / 1024,
|
||||
mostUsedIndex / 1024,
|
||||
mostUsedJoint / 1024 );
|
||||
|
@ -348,7 +489,7 @@ void idVertexCache::BeginBackEnd()
|
|||
|
||||
// unmap the current frame so the GPU can read it
|
||||
const int startUnmap = Sys_Milliseconds();
|
||||
UnmapGeoBufferSet( frameData[listNum] );
|
||||
UnmapGeoBufferSet( frameData[ listNum ] );
|
||||
UnmapGeoBufferSet( staticData );
|
||||
const int endUnmap = Sys_Milliseconds();
|
||||
if( endUnmap - startUnmap > 1 )
|
||||
|
@ -360,28 +501,15 @@ void idVertexCache::BeginBackEnd()
|
|||
// prepare the next frame for writing to by the CPU
|
||||
currentFrame++;
|
||||
|
||||
listNum = currentFrame % VERTCACHE_NUM_FRAMES;
|
||||
listNum = currentFrame % NUM_FRAME_DATA;
|
||||
const int startMap = Sys_Milliseconds();
|
||||
MapGeoBufferSet( frameData[listNum] );
|
||||
MapGeoBufferSet( frameData[ listNum ] );
|
||||
const int endMap = Sys_Milliseconds();
|
||||
if( endMap - startMap > 1 )
|
||||
{
|
||||
idLib::PrintfIf( r_showVertexCacheTimings.GetBool(), "idVertexCache::map took %i msec\n", endMap - startMap );
|
||||
}
|
||||
|
||||
ClearGeoBufferSet( frameData[listNum] );
|
||||
|
||||
|
||||
#if 0
|
||||
const int startBind = Sys_Milliseconds();
|
||||
glBindBuffer( GL_ARRAY_BUFFER, ( GLuint )frameData[drawListNum].vertexBuffer.GetAPIObject() );
|
||||
glBindBuffer( GL_ELEMENT_ARRAY_BUFFER, ( GLuint )frameData[drawListNum].indexBuffer.GetAPIObject() );
|
||||
const int endBind = Sys_Milliseconds();
|
||||
if( endBind - startBind > 1 )
|
||||
{
|
||||
idLib::Printf( "idVertexCache::bind took %i msec\n", endBind - startBind );
|
||||
}
|
||||
#endif
|
||||
|
||||
ClearGeoBufferSet( frameData[ listNum ] );
|
||||
}
|
||||
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
|
||||
Doom 3 BFG Edition GPL Source Code
|
||||
Copyright (C) 1993-2012 id Software LLC, a ZeniMax Media company.
|
||||
Copyright (C) 2016-2017 Dustin Land
|
||||
|
||||
This file is part of the Doom 3 BFG Edition GPL Source Code ("Doom 3 BFG Edition Source Code").
|
||||
|
||||
|
@ -25,19 +26,17 @@ If you have questions concerning this license or the applicable additional terms
|
|||
|
||||
===========================================================================
|
||||
*/
|
||||
#ifndef __VERTEXCACHE2_H__
|
||||
#define __VERTEXCACHE2_H__
|
||||
#ifndef __VERTEXCACHE_H__
|
||||
#define __VERTEXCACHE_H__
|
||||
|
||||
const int VERTCACHE_INDEX_MEMORY_PER_FRAME = 31 * 1024 * 1024;
|
||||
const int VERTCACHE_VERTEX_MEMORY_PER_FRAME = 31 * 1024 * 1024;
|
||||
const int VERTCACHE_JOINT_MEMORY_PER_FRAME = 256 * 1024;
|
||||
|
||||
const int VERTCACHE_NUM_FRAMES = 2;
|
||||
|
||||
// there are a lot more static indexes than vertexes, because interactions are just new
|
||||
// index lists that reference existing vertexes
|
||||
const int STATIC_INDEX_MEMORY = 31 * 1024 * 1024;
|
||||
const int STATIC_VERTEX_MEMORY = 62 * 1024 * 1024; // make sure it fits in VERTCACHE_OFFSET_MASK!
|
||||
const int STATIC_VERTEX_MEMORY = 31 * 1024 * 1024; // make sure it fits in VERTCACHE_OFFSET_MASK!
|
||||
|
||||
// vertCacheHandle_t packs size, offset, and frame number into 64 bits
|
||||
typedef uint64 vertCacheHandle_t;
|
||||
|
@ -64,7 +63,7 @@ struct geoBufferSet_t
|
|||
{
|
||||
idIndexBuffer indexBuffer;
|
||||
idVertexBuffer vertexBuffer;
|
||||
idJointBuffer jointBuffer;
|
||||
idUniformBuffer jointBuffer;
|
||||
byte* mappedVertexBase;
|
||||
byte* mappedIndexBase;
|
||||
byte* mappedJointBase;
|
||||
|
@ -77,7 +76,7 @@ struct geoBufferSet_t
|
|||
class idVertexCache
|
||||
{
|
||||
public:
|
||||
void Init( bool restart = false );
|
||||
void Init( int uniformBufferOffsetAlignment );
|
||||
void Shutdown();
|
||||
void PurgeAll();
|
||||
|
||||
|
@ -85,73 +84,21 @@ public:
|
|||
void FreeStaticData();
|
||||
|
||||
// this data is only valid for one frame of rendering
|
||||
vertCacheHandle_t AllocVertex( const void* data, int bytes )
|
||||
{
|
||||
return ActuallyAlloc( frameData[listNum], data, bytes, CACHE_VERTEX );
|
||||
}
|
||||
vertCacheHandle_t AllocIndex( const void* data, int bytes )
|
||||
{
|
||||
return ActuallyAlloc( frameData[listNum], data, bytes, CACHE_INDEX );
|
||||
}
|
||||
vertCacheHandle_t AllocJoint( const void* data, int bytes )
|
||||
{
|
||||
return ActuallyAlloc( frameData[listNum], data, bytes, CACHE_JOINT );
|
||||
}
|
||||
vertCacheHandle_t AllocVertex( const void* data, int num, size_t size = sizeof( idDrawVert ) );
|
||||
vertCacheHandle_t AllocIndex( const void* data, int num, size_t size = sizeof( triIndex_t ) );
|
||||
vertCacheHandle_t AllocJoint( const void* data, int num, size_t size = sizeof( idJointMat ) );
|
||||
|
||||
// this data is valid until the next map load
|
||||
vertCacheHandle_t AllocStaticVertex( const void* data, int bytes )
|
||||
{
|
||||
if( staticData.vertexMemUsed.GetValue() + bytes > STATIC_VERTEX_MEMORY )
|
||||
{
|
||||
idLib::FatalError( "AllocStaticVertex failed, increase STATIC_VERTEX_MEMORY" );
|
||||
}
|
||||
return ActuallyAlloc( staticData, data, bytes, CACHE_VERTEX );
|
||||
}
|
||||
vertCacheHandle_t AllocStaticIndex( const void* data, int bytes )
|
||||
{
|
||||
if( staticData.indexMemUsed.GetValue() + bytes > STATIC_INDEX_MEMORY )
|
||||
{
|
||||
idLib::FatalError( "AllocStaticIndex failed, increase STATIC_INDEX_MEMORY" );
|
||||
}
|
||||
return ActuallyAlloc( staticData, data, bytes, CACHE_INDEX );
|
||||
}
|
||||
vertCacheHandle_t AllocStaticVertex( const void* data, int bytes );
|
||||
vertCacheHandle_t AllocStaticIndex( const void* data, int bytes );
|
||||
|
||||
byte* MappedVertexBuffer( vertCacheHandle_t handle )
|
||||
{
|
||||
release_assert( !CacheIsStatic( handle ) );
|
||||
const uint64 offset = ( int )( handle >> VERTCACHE_OFFSET_SHIFT ) & VERTCACHE_OFFSET_MASK;
|
||||
const uint64 frameNum = ( int )( handle >> VERTCACHE_FRAME_SHIFT ) & VERTCACHE_FRAME_MASK;
|
||||
release_assert( frameNum == ( currentFrame & VERTCACHE_FRAME_MASK ) );
|
||||
return frameData[ listNum ].mappedVertexBase + offset;
|
||||
}
|
||||
|
||||
byte* MappedIndexBuffer( vertCacheHandle_t handle )
|
||||
{
|
||||
release_assert( !CacheIsStatic( handle ) );
|
||||
const uint64 offset = ( int )( handle >> VERTCACHE_OFFSET_SHIFT ) & VERTCACHE_OFFSET_MASK;
|
||||
const uint64 frameNum = ( int )( handle >> VERTCACHE_FRAME_SHIFT ) & VERTCACHE_FRAME_MASK;
|
||||
release_assert( frameNum == ( currentFrame & VERTCACHE_FRAME_MASK ) );
|
||||
return frameData[ listNum ].mappedIndexBase + offset;
|
||||
}
|
||||
byte* MappedVertexBuffer( vertCacheHandle_t handle );
|
||||
byte* MappedIndexBuffer( vertCacheHandle_t handle );
|
||||
|
||||
// Returns false if it's been purged
|
||||
// This can only be called by the front end, the back end should only be looking at
|
||||
// vertCacheHandle_t that are already validated.
|
||||
bool CacheIsCurrent( const vertCacheHandle_t handle )
|
||||
{
|
||||
const int isStatic = handle & VERTCACHE_STATIC;
|
||||
if( isStatic )
|
||||
{
|
||||
return true;
|
||||
}
|
||||
const uint64 frameNum = ( int )( handle >> VERTCACHE_FRAME_SHIFT ) & VERTCACHE_FRAME_MASK;
|
||||
if( frameNum != ( currentFrame & VERTCACHE_FRAME_MASK ) )
|
||||
{
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool CacheIsCurrent( const vertCacheHandle_t handle );
|
||||
static bool CacheIsStatic( const vertCacheHandle_t handle )
|
||||
{
|
||||
return ( handle & VERTCACHE_STATIC ) != 0;
|
||||
|
@ -160,17 +107,19 @@ public:
|
|||
// vb/ib is a temporary reference -- don't store it
|
||||
bool GetVertexBuffer( vertCacheHandle_t handle, idVertexBuffer* vb );
|
||||
bool GetIndexBuffer( vertCacheHandle_t handle, idIndexBuffer* ib );
|
||||
bool GetJointBuffer( vertCacheHandle_t handle, idJointBuffer* jb );
|
||||
bool GetJointBuffer( vertCacheHandle_t handle, idUniformBuffer* jb );
|
||||
|
||||
void BeginBackEnd();
|
||||
|
||||
public:
|
||||
int currentFrame; // for determining the active buffers
|
||||
int listNum; // currentFrame % VERTCACHE_NUM_FRAMES
|
||||
int drawListNum; // (currentFrame-1) % VERTCACHE_NUM_FRAMES
|
||||
int listNum; // currentFrame % NUM_FRAME_DATA
|
||||
int drawListNum; // (currentFrame-1) % NUM_FRAME_DATA
|
||||
|
||||
geoBufferSet_t staticData;
|
||||
geoBufferSet_t frameData[VERTCACHE_NUM_FRAMES];
|
||||
geoBufferSet_t frameData[ NUM_FRAME_DATA ];
|
||||
|
||||
int uniformBufferOffsetAlignment;
|
||||
|
||||
// High water marks for the per-frame buffers
|
||||
int mostUsedVertex;
|
||||
|
@ -187,4 +136,4 @@ void CopyBuffer( byte* dst, const byte* src, int numBytes );
|
|||
|
||||
extern idVertexCache vertexCache;
|
||||
|
||||
#endif // __VERTEXCACHE2_H__
|
||||
#endif // __VERTEXCACHE_H__
|
||||
|
|
|
@ -348,7 +348,7 @@ void R_SetupDrawSurfJoints( drawSurf_t* drawSurf, const srfTriangles_t* tri, con
|
|||
if( !vertexCache.CacheIsCurrent( model->jointsInvertedBuffer ) )
|
||||
{
|
||||
const int alignment = glConfig.uniformBufferOffsetAlignment;
|
||||
model->jointsInvertedBuffer = vertexCache.AllocJoint( model->jointsInverted, ALIGN( model->numInvertedJoints * sizeof( idJointMat ), alignment ) );
|
||||
model->jointsInvertedBuffer = vertexCache.AllocJoint( model->jointsInverted, model->numInvertedJoints );
|
||||
}
|
||||
drawSurf->jointCache = model->jointsInvertedBuffer;
|
||||
}
|
||||
|
@ -721,7 +721,7 @@ void R_AddSingleModel( viewEntity_t* vEntity )
|
|||
// make sure we have an ambient cache and all necessary normals / tangents
|
||||
if( !vertexCache.CacheIsCurrent( tri->indexCache ) )
|
||||
{
|
||||
tri->indexCache = vertexCache.AllocIndex( tri->indexes, ALIGN( tri->numIndexes * sizeof( triIndex_t ), INDEX_CACHE_ALIGN ) );
|
||||
tri->indexCache = vertexCache.AllocIndex( tri->indexes, tri->numIndexes );
|
||||
}
|
||||
|
||||
if( !vertexCache.CacheIsCurrent( tri->ambientCache ) )
|
||||
|
@ -736,7 +736,7 @@ void R_AddSingleModel( viewEntity_t* vEntity )
|
|||
//assert( false ); // this should no longer be hit
|
||||
// RB end
|
||||
}
|
||||
tri->ambientCache = vertexCache.AllocVertex( tri->verts, ALIGN( tri->numVerts * sizeof( idDrawVert ), VERTEX_CACHE_ALIGN ) );
|
||||
tri->ambientCache = vertexCache.AllocVertex( tri->verts, tri->numVerts );
|
||||
}
|
||||
|
||||
// add the surface for drawing
|
||||
|
@ -778,11 +778,11 @@ void R_AddSingleModel( viewEntity_t* vEntity )
|
|||
// copy verts and indexes to this frame's hardware memory if they aren't already there
|
||||
if( !vertexCache.CacheIsCurrent( tri->ambientCache ) )
|
||||
{
|
||||
tri->ambientCache = vertexCache.AllocVertex( tri->verts, ALIGN( tri->numVerts * sizeof( tri->verts[0] ), VERTEX_CACHE_ALIGN ) );
|
||||
tri->ambientCache = vertexCache.AllocVertex( tri->verts, tri->numVerts );
|
||||
}
|
||||
if( !vertexCache.CacheIsCurrent( tri->indexCache ) )
|
||||
{
|
||||
tri->indexCache = vertexCache.AllocIndex( tri->indexes, ALIGN( tri->numIndexes * sizeof( tri->indexes[0] ), INDEX_CACHE_ALIGN ) );
|
||||
tri->indexCache = vertexCache.AllocIndex( tri->indexes, tri->numIndexes );
|
||||
}
|
||||
|
||||
R_SetupDrawSurfJoints( baseDrawSurf, tri, shader );
|
||||
|
@ -878,7 +878,7 @@ void R_AddSingleModel( viewEntity_t* vEntity )
|
|||
// when using shadow volumes
|
||||
if( r_cullDynamicLightTriangles.GetBool() && !r_skipDynamicShadows.GetBool() && !r_useShadowMapping.GetBool() && shader->SurfaceCastsShadow() )
|
||||
{
|
||||
vertCacheHandle_t lightIndexCache = vertexCache.AllocIndex( NULL, ALIGN( lightDrawSurf->numIndexes * sizeof( triIndex_t ), INDEX_CACHE_ALIGN ) );
|
||||
vertCacheHandle_t lightIndexCache = vertexCache.AllocIndex( NULL, lightDrawSurf->numIndexes );
|
||||
if( vertexCache.CacheIsCurrent( lightIndexCache ) )
|
||||
{
|
||||
lightDrawSurf->indexCache = lightIndexCache;
|
||||
|
@ -1063,7 +1063,7 @@ void R_AddSingleModel( viewEntity_t* vEntity )
|
|||
// make sure we have an ambient cache and all necessary normals / tangents
|
||||
if( !vertexCache.CacheIsCurrent( tri->indexCache ) )
|
||||
{
|
||||
tri->indexCache = vertexCache.AllocIndex( tri->indexes, ALIGN( tri->numIndexes * sizeof( triIndex_t ), INDEX_CACHE_ALIGN ) );
|
||||
tri->indexCache = vertexCache.AllocIndex( tri->indexes, tri->numIndexes );
|
||||
}
|
||||
|
||||
// throw the entire source surface at it without any per-triangle culling
|
||||
|
@ -1083,7 +1083,7 @@ void R_AddSingleModel( viewEntity_t* vEntity )
|
|||
//assert( false ); // this should no longer be hit
|
||||
// RB end
|
||||
}
|
||||
tri->ambientCache = vertexCache.AllocVertex( tri->verts, ALIGN( tri->numVerts * sizeof( idDrawVert ), VERTEX_CACHE_ALIGN ) );
|
||||
tri->ambientCache = vertexCache.AllocVertex( tri->verts, tri->numVerts );
|
||||
}
|
||||
|
||||
shadowDrawSurf->ambientCache = tri->ambientCache;
|
||||
|
@ -1187,7 +1187,7 @@ void R_AddSingleModel( viewEntity_t* vEntity )
|
|||
// duplicates them with w set to 0 and 1 for the vertex program to project.
|
||||
// This is constant for any number of lights, the vertex program takes care
|
||||
// of projecting the verts to infinity for a particular light.
|
||||
tri->shadowCache = vertexCache.AllocVertex( NULL, ALIGN( tri->numVerts * 2 * sizeof( idShadowVert ), VERTEX_CACHE_ALIGN ) );
|
||||
tri->shadowCache = vertexCache.AllocVertex( NULL, tri->numVerts * 2, sizeof( idShadowVert ) );
|
||||
idShadowVert* shadowVerts = ( idShadowVert* )vertexCache.MappedVertexBuffer( tri->shadowCache );
|
||||
idShadowVert::CreateShadowCache( shadowVerts, tri->verts, tri->numVerts );
|
||||
}
|
||||
|
@ -1195,7 +1195,7 @@ void R_AddSingleModel( viewEntity_t* vEntity )
|
|||
const int maxShadowVolumeIndexes = tri->numSilEdges * 6 + tri->numIndexes * 2;
|
||||
|
||||
shadowDrawSurf->numIndexes = 0;
|
||||
shadowDrawSurf->indexCache = vertexCache.AllocIndex( NULL, ALIGN( maxShadowVolumeIndexes * sizeof( triIndex_t ), INDEX_CACHE_ALIGN ) );
|
||||
shadowDrawSurf->indexCache = vertexCache.AllocIndex( NULL, maxShadowVolumeIndexes );
|
||||
shadowDrawSurf->shadowCache = tri->shadowCache;
|
||||
shadowDrawSurf->scissorRect = vLight->scissorRect; // default to the light scissor and light depth bounds
|
||||
shadowDrawSurf->shadowVolumeState = SHADOWVOLUME_DONE; // assume the shadow volume is done in case the index cache allocation failed
|
||||
|
|
|
@ -48,8 +48,8 @@ R_FinishDeform
|
|||
*/
|
||||
static drawSurf_t* R_FinishDeform( drawSurf_t* surf, srfTriangles_t* newTri, const idDrawVert* newVerts, const triIndex_t* newIndexes )
|
||||
{
|
||||
newTri->ambientCache = vertexCache.AllocVertex( newVerts, ALIGN( newTri->numVerts * sizeof( idDrawVert ), VERTEX_CACHE_ALIGN ) );
|
||||
newTri->indexCache = vertexCache.AllocIndex( newIndexes, ALIGN( newTri->numIndexes * sizeof( triIndex_t ), INDEX_CACHE_ALIGN ) );
|
||||
newTri->ambientCache = vertexCache.AllocVertex( newVerts, newTri->numVerts );
|
||||
newTri->indexCache = vertexCache.AllocIndex( newIndexes, newTri->numIndexes );
|
||||
|
||||
surf->frontEndGeo = newTri;
|
||||
surf->numIndexes = newTri->numIndexes;
|
||||
|
@ -1092,8 +1092,8 @@ static drawSurf_t* R_ParticleDeform( drawSurf_t* surf, bool useArea )
|
|||
newTri->bounds = stage->bounds; // just always draw the particles
|
||||
newTri->numVerts = numVerts;
|
||||
newTri->numIndexes = numIndexes;
|
||||
newTri->ambientCache = vertexCache.AllocVertex( newVerts, ALIGN( numVerts * sizeof( idDrawVert ), VERTEX_CACHE_ALIGN ) );
|
||||
newTri->indexCache = vertexCache.AllocIndex( newIndexes, ALIGN( numIndexes * sizeof( triIndex_t ), INDEX_CACHE_ALIGN ) );
|
||||
newTri->ambientCache = vertexCache.AllocVertex( newVerts, numVerts );
|
||||
newTri->indexCache = vertexCache.AllocIndex( newIndexes, numIndexes );
|
||||
|
||||
drawSurf_t* drawSurf = ( drawSurf_t* )R_FrameAlloc( sizeof( *drawSurf ), FRAME_ALLOC_DRAW_SURFACE );
|
||||
drawSurf->frontEndGeo = newTri;
|
||||
|
|
|
@ -2141,11 +2141,11 @@ void R_InitDrawSurfFromTri( drawSurf_t& ds, srfTriangles_t& tri )
|
|||
}
|
||||
else if( !vertexCache.CacheIsCurrent( tri.ambientCache ) )
|
||||
{
|
||||
tri.ambientCache = vertexCache.AllocVertex( tri.verts, ALIGN( tri.numVerts * sizeof( tri.verts[0] ), VERTEX_CACHE_ALIGN ) );
|
||||
tri.ambientCache = vertexCache.AllocVertex( tri.verts, tri.numVerts );
|
||||
}
|
||||
if( !vertexCache.CacheIsCurrent( tri.indexCache ) )
|
||||
{
|
||||
tri.indexCache = vertexCache.AllocIndex( tri.indexes, ALIGN( tri.numIndexes * sizeof( tri.indexes[0] ), INDEX_CACHE_ALIGN ) );
|
||||
tri.indexCache = vertexCache.AllocIndex( tri.indexes, tri.numIndexes );
|
||||
}
|
||||
|
||||
ds.numIndexes = tri.numIndexes;
|
||||
|
|
Loading…
Reference in a new issue