Use aligned memory allocation for GL surface

git-svn-id: https://svn.eduke32.com/eduke32@6928 1a8010ca-5511-0410-912e-c29ae57300e0
This commit is contained in:
terminx 2018-06-24 00:55:17 +00:00
parent 983781b56d
commit cbf6dc3c42

View file

@ -46,10 +46,10 @@ static GLuint compileShader(GLenum shaderType, const char* const source)
glGetShaderiv(shaderID, GL_INFO_LOG_LENGTH, &logLength);
if (logLength > 0)
{
char *infoLog = (char*) malloc(logLength);
char *infoLog = (char*) Bmalloc(logLength);
glGetShaderInfoLog(shaderID, logLength, &logLength, infoLog);
OSD_Printf("Log:\n%s\n", infoLog);
free(infoLog);
Bfree(infoLog);
}
}
@ -64,7 +64,7 @@ bool glsurface_initialize(vec2_t inputBufferResolution)
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
bufferRes = inputBufferResolution;
buffer = malloc(bufferRes.x*bufferRes.y);
buffer = Xaligned_alloc(16, bufferRes.x * bufferRes.y);
glGenBuffers(1, &quadVertsID);
glBindBuffer(GL_ARRAY_BUFFER, quadVertsID);
@ -168,8 +168,7 @@ void glsurface_destroy()
if (!buffer)
return;
free(buffer);
buffer = 0;
ALIGNED_FREE_AND_NULL(buffer);
glDeleteBuffers(1, &quadVertsID);
quadVertsID = 0;