/* =========================================================================== Copyright (C) 1999-2005 Id Software, Inc. This file is part of Quake III Arena source code. Quake III Arena source code is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. Quake III Arena source code is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with Quake III Arena source code; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA =========================================================================== */ // tr_map.c #include "tr_local.h" #define JSON_IMPLEMENTATION #include "../qcommon/json.h" #undef JSON_IMPLEMENTATION /* Loads and prepares a map file for scene rendering. A single entry point: void RE_LoadWorldMap( const char *name ); */ static world_t s_worldData; static byte *fileBase; static int c_gridVerts; //=============================================================================== static void HSVtoRGB( float h, float s, float v, float rgb[3] ) { int i; float f; float p, q, t; h *= 5; i = floor( h ); f = h - i; p = v * ( 1 - s ); q = v * ( 1 - s * f ); t = v * ( 1 - s * ( 1 - f ) ); switch ( i ) { case 0: rgb[0] = v; rgb[1] = t; rgb[2] = p; break; case 1: rgb[0] = q; rgb[1] = v; rgb[2] = p; break; case 2: rgb[0] = p; rgb[1] = v; rgb[2] = t; break; case 3: rgb[0] = p; rgb[1] = q; rgb[2] = v; break; case 4: rgb[0] = t; rgb[1] = p; rgb[2] = v; break; case 5: rgb[0] = v; rgb[1] = p; rgb[2] = q; break; } } /* =============== R_ColorShiftLightingBytes =============== */ static void R_ColorShiftLightingBytes( const byte in[4], byte out[4] ) { int shift, r, g, b; // shift the color data based on overbright range shift = r_mapOverBrightBits->integer - tr.overbrightBits; // shift the data based on overbright range r = in[0] << shift; g = in[1] << shift; b = in[2] << shift; // normalize by color instead of saturating to white if ( ( r | g | b ) > 255 ) { int max; max = r > g ? r : g; max = max > b ? max : b; r = r * 255 / max; g = g * 255 / max; b = b * 255 / max; } out[0] = r; out[1] = g; out[2] = b; out[3] = in[3]; } /* =============== R_ColorShiftLightingFloats =============== */ static void R_ColorShiftLightingFloats(const float in[4], float out[4]) { float r, g, b; float scale = (1 << (r_mapOverBrightBits->integer - tr.overbrightBits)) / 255.0f; r = in[0] * scale; g = in[1] * scale; b = in[2] * scale; // normalize by color instead of saturating to white if ( r > 1 || g > 1 || b > 1 ) { float max; max = r > g ? r : g; max = max > b ? max : b; r = r / max; g = g / max; b = b / max; } out[0] = r; out[1] = g; out[2] = b; out[3] = in[3]; } // Modified from http://graphicrants.blogspot.jp/2009/04/rgbm-color-encoding.html void ColorToRGBM(const vec3_t color, unsigned char rgbm[4]) { vec3_t sample; float maxComponent; VectorCopy(color, sample); maxComponent = MAX(sample[0], sample[1]); maxComponent = MAX(maxComponent, sample[2]); maxComponent = CLAMP(maxComponent, 1.0f/255.0f, 1.0f); rgbm[3] = (unsigned char) ceil(maxComponent * 255.0f); maxComponent = 255.0f / rgbm[3]; VectorScale(sample, maxComponent, sample); rgbm[0] = (unsigned char) (sample[0] * 255); rgbm[1] = (unsigned char) (sample[1] * 255); rgbm[2] = (unsigned char) (sample[2] * 255); } static void ColorToRGB16(const vec3_t color, uint16_t rgb16[3]) { rgb16[0] = color[0] * 65535.0f + 0.5f; rgb16[1] = color[1] * 65535.0f + 0.5f; rgb16[2] = color[2] * 65535.0f + 0.5f; } /* =============== R_LoadLightmaps =============== */ #define DEFAULT_LIGHTMAP_SIZE 128 static void R_LoadLightmaps( const lump_t *l, const lump_t *surfs ) { imgFlags_t imgFlags = IMGFLAG_NOLIGHTSCALE | IMGFLAG_NO_COMPRESSION | IMGFLAG_CLAMPTOEDGE; byte *buf, *buf_p; dsurface_t *surf; int len; byte *image; int i, j, numLightmaps, textureInternalFormat = 0; int numLightmapsPerPage = 16; float maxIntensity = 0; double sumIntensity = 0; // if we are in r_vertexLight mode, we don't need the lightmaps at all if ( ( r_vertexLight->integer && tr.vertexLightingAllowed ) || glConfig.hardwareType == GLHW_PERMEDIA2 ) { return; } len = l->filelen; if ( !len ) { return; } buf = fileBase + l->fileofs; // we are about to upload textures R_IssuePendingRenderCommands(); tr.lightmapSize = DEFAULT_LIGHTMAP_SIZE; numLightmaps = len / (tr.lightmapSize * tr.lightmapSize * 3); // check for deluxe mapping if (numLightmaps <= 1) { tr.worldDeluxeMapping = qfalse; } else { tr.worldDeluxeMapping = qtrue; for( i = 0, surf = (dsurface_t *)(fileBase + surfs->fileofs); i < surfs->filelen / sizeof(dsurface_t); i++, surf++ ) { int lightmapNum = LittleLong( surf->lightmapNum ); if ( lightmapNum >= 0 && (lightmapNum & 1) != 0 ) { tr.worldDeluxeMapping = qfalse; break; } } } image = ri.Malloc(tr.lightmapSize * tr.lightmapSize * 4 * 2); if (tr.worldDeluxeMapping) numLightmaps >>= 1; // Use fat lightmaps of an appropriate size. if (r_mergeLightmaps->integer) { int maxLightmapsPerAxis = glConfig.maxTextureSize / tr.lightmapSize; int lightmapCols = 4, lightmapRows = 4; // Increase width at first, then height. while (lightmapCols * lightmapRows < numLightmaps && lightmapCols != maxLightmapsPerAxis) lightmapCols <<= 1; while (lightmapCols * lightmapRows < numLightmaps && lightmapRows != maxLightmapsPerAxis) lightmapRows <<= 1; tr.fatLightmapCols = lightmapCols; tr.fatLightmapRows = lightmapRows; numLightmapsPerPage = lightmapCols * lightmapRows; tr.numLightmaps = (numLightmaps + (numLightmapsPerPage - 1)) / numLightmapsPerPage; } else { tr.numLightmaps = numLightmaps; } tr.lightmaps = ri.Hunk_Alloc( tr.numLightmaps * sizeof(image_t *), h_low ); if (tr.worldDeluxeMapping) tr.deluxemaps = ri.Hunk_Alloc( tr.numLightmaps * sizeof(image_t *), h_low ); textureInternalFormat = GL_RGBA8; if (r_hdr->integer) { // Check for the first hdr lightmap, if it exists, use GL_RGBA16 for textures. char filename[MAX_QPATH]; Com_sprintf(filename, sizeof(filename), "maps/%s/lm_0000.hdr", s_worldData.baseName); if (ri.FS_FileExists(filename)) textureInternalFormat = GL_RGBA16; } if (r_mergeLightmaps->integer) { int width = tr.fatLightmapCols * tr.lightmapSize; int height = tr.fatLightmapRows * tr.lightmapSize; for (i = 0; i < tr.numLightmaps; i++) { tr.lightmaps[i] = R_CreateImage(va("_fatlightmap%d", i), NULL, width, height, IMGTYPE_COLORALPHA, imgFlags, textureInternalFormat); if (tr.worldDeluxeMapping) tr.deluxemaps[i] = R_CreateImage(va("_fatdeluxemap%d", i), NULL, width, height, IMGTYPE_DELUXE, imgFlags, 0); } } for(i = 0; i < numLightmaps; i++) { int xoff = 0, yoff = 0; int lightmapnum = i; // expand the 24 bit on-disk to 32 bit if (r_mergeLightmaps->integer) { int lightmaponpage = i % numLightmapsPerPage; xoff = (lightmaponpage % tr.fatLightmapCols) * tr.lightmapSize; yoff = (lightmaponpage / tr.fatLightmapCols) * tr.lightmapSize; lightmapnum /= numLightmapsPerPage; } // if (tr.worldLightmapping) { char filename[MAX_QPATH]; byte *hdrLightmap = NULL; int size = 0; // look for hdr lightmaps if (textureInternalFormat == GL_RGBA16) { Com_sprintf( filename, sizeof( filename ), "maps/%s/lm_%04d.hdr", s_worldData.baseName, i * (tr.worldDeluxeMapping ? 2 : 1) ); //ri.Printf(PRINT_ALL, "looking for %s\n", filename); size = ri.FS_ReadFile(filename, (void **)&hdrLightmap); } if (hdrLightmap) { byte *p = hdrLightmap, *end = hdrLightmap + size; //ri.Printf(PRINT_ALL, "found!\n"); /* FIXME: don't just skip over this header and actually parse it */ while (p < end && !(*p == '\n' && *(p+1) == '\n')) p++; p += 2; while (p < end && !(*p == '\n')) p++; p++; if (p >= end) ri.Error(ERR_DROP, "Bad header for %s!", filename); buf_p = p; #if 0 // HDRFILE_RGBE if ((int)(end - hdrLightmap) != tr.lightmapSize * tr.lightmapSize * 4) ri.Error(ERR_DROP, "Bad size for %s (%i)!", filename, size); #else // HDRFILE_FLOAT if ((int)(end - hdrLightmap) != tr.lightmapSize * tr.lightmapSize * 12) ri.Error(ERR_DROP, "Bad size for %s (%i)!", filename, size); #endif } else { int imgOffset = tr.worldDeluxeMapping ? i * 2 : i; buf_p = buf + imgOffset * tr.lightmapSize * tr.lightmapSize * 3; } for ( j = 0 ; j < tr.lightmapSize * tr.lightmapSize; j++ ) { if (hdrLightmap) { vec4_t color; #if 0 // HDRFILE_RGBE float exponent = exp2(buf_p[j*4+3] - 128); color[0] = buf_p[j*4+0] * exponent; color[1] = buf_p[j*4+1] * exponent; color[2] = buf_p[j*4+2] * exponent; #else // HDRFILE_FLOAT memcpy(color, &buf_p[j*12], 12); color[0] = LittleFloat(color[0]); color[1] = LittleFloat(color[1]); color[2] = LittleFloat(color[2]); #endif color[3] = 1.0f; R_ColorShiftLightingFloats(color, color); ColorToRGB16(color, (uint16_t *)(&image[j * 8])); ((uint16_t *)(&image[j * 8]))[3] = 65535; } else if (textureInternalFormat == GL_RGBA16) { vec4_t color; //hack: convert LDR lightmap to HDR one color[0] = MAX(buf_p[j*3+0], 0.499f); color[1] = MAX(buf_p[j*3+1], 0.499f); color[2] = MAX(buf_p[j*3+2], 0.499f); // if under an arbitrary value (say 12) grey it out // this prevents weird splotches in dimly lit areas if (color[0] + color[1] + color[2] < 12.0f) { float avg = (color[0] + color[1] + color[2]) * 0.3333f; color[0] = avg; color[1] = avg; color[2] = avg; } color[3] = 1.0f; R_ColorShiftLightingFloats(color, color); ColorToRGB16(color, (uint16_t *)(&image[j * 8])); ((uint16_t *)(&image[j * 8]))[3] = 65535; } else { if ( r_lightmap->integer == 2 ) { // color code by intensity as development tool (FIXME: check range) float r = buf_p[j*3+0]; float g = buf_p[j*3+1]; float b = buf_p[j*3+2]; float intensity; float out[3] = {0.0, 0.0, 0.0}; intensity = 0.33f * r + 0.685f * g + 0.063f * b; if ( intensity > 255 ) intensity = 1.0f; else intensity /= 255.0f; if ( intensity > maxIntensity ) maxIntensity = intensity; HSVtoRGB( intensity, 1.00, 0.50, out ); image[j*4+0] = out[0] * 255; image[j*4+1] = out[1] * 255; image[j*4+2] = out[2] * 255; image[j*4+3] = 255; sumIntensity += intensity; } else { R_ColorShiftLightingBytes( &buf_p[j*3], &image[j*4] ); image[j*4+3] = 255; } } } if (r_mergeLightmaps->integer) R_UpdateSubImage(tr.lightmaps[lightmapnum], image, xoff, yoff, tr.lightmapSize, tr.lightmapSize, textureInternalFormat); else tr.lightmaps[i] = R_CreateImage(va("*lightmap%d", i), image, tr.lightmapSize, tr.lightmapSize, IMGTYPE_COLORALPHA, imgFlags, textureInternalFormat ); if (hdrLightmap) ri.FS_FreeFile(hdrLightmap); } if (tr.worldDeluxeMapping) { buf_p = buf + (i * 2 + 1) * tr.lightmapSize * tr.lightmapSize * 3; for ( j = 0 ; j < tr.lightmapSize * tr.lightmapSize; j++ ) { image[j*4+0] = buf_p[j*3+0]; image[j*4+1] = buf_p[j*3+1]; image[j*4+2] = buf_p[j*3+2]; // make 0,0,0 into 127,127,127 if ((image[j*4+0] == 0) && (image[j*4+1] == 0) && (image[j*4+2] == 0)) { image[j*4+0] = image[j*4+1] = image[j*4+2] = 127; } image[j*4+3] = 255; } if (r_mergeLightmaps->integer) R_UpdateSubImage(tr.deluxemaps[lightmapnum], image, xoff, yoff, tr.lightmapSize, tr.lightmapSize, GL_RGBA8 ); else tr.deluxemaps[i] = R_CreateImage(va("*deluxemap%d", i), image, tr.lightmapSize, tr.lightmapSize, IMGTYPE_DELUXE, imgFlags, 0 ); } } if ( r_lightmap->integer == 2 ) { ri.Printf( PRINT_ALL, "Brightest lightmap value: %d\n", ( int ) ( maxIntensity * 255 ) ); } ri.Free(image); } static float FatPackU(float input, int lightmapnum) { if (lightmapnum < 0) return input; if (tr.worldDeluxeMapping) lightmapnum >>= 1; if (tr.fatLightmapCols > 0) { lightmapnum %= (tr.fatLightmapCols * tr.fatLightmapRows); return (input + (lightmapnum % tr.fatLightmapCols)) / (float)(tr.fatLightmapCols); } return input; } static float FatPackV(float input, int lightmapnum) { if (lightmapnum < 0) return input; if (tr.worldDeluxeMapping) lightmapnum >>= 1; if (tr.fatLightmapCols > 0) { lightmapnum %= (tr.fatLightmapCols * tr.fatLightmapRows); return (input + (lightmapnum / tr.fatLightmapCols)) / (float)(tr.fatLightmapRows); } return input; } static int FatLightmap(int lightmapnum) { if (lightmapnum < 0) return lightmapnum; if (tr.worldDeluxeMapping) lightmapnum >>= 1; if (tr.fatLightmapCols > 0) return lightmapnum / (tr.fatLightmapCols * tr.fatLightmapRows); return lightmapnum; } /* ================= RE_SetWorldVisData This is called by the clipmodel subsystem so we can share the 1.8 megs of space in big maps... ================= */ void RE_SetWorldVisData( const byte *vis ) { tr.externalVisData = vis; } /* ================= R_LoadVisibility ================= */ static void R_LoadVisibility( const lump_t *l ) { int len; byte *buf; len = l->filelen; if ( !len ) { return; } buf = fileBase + l->fileofs; s_worldData.numClusters = LittleLong( ((int *)buf)[0] ); s_worldData.clusterBytes = LittleLong( ((int *)buf)[1] ); // CM_Load should have given us the vis data to share, so // we don't need to allocate another copy if ( tr.externalVisData ) { s_worldData.vis = tr.externalVisData; } else { byte *dest; dest = ri.Hunk_Alloc( len - 8, h_low ); Com_Memcpy( dest, buf + 8, len - 8 ); s_worldData.vis = dest; } } //=============================================================================== /* =============== ShaderForShaderNum =============== */ static shader_t *ShaderForShaderNum( int shaderNum, int lightmapNum ) { shader_t *shader; dshader_t *dsh; int _shaderNum = LittleLong( shaderNum ); if ( _shaderNum < 0 || _shaderNum >= s_worldData.numShaders ) { ri.Error( ERR_DROP, "ShaderForShaderNum: bad num %i", _shaderNum ); } dsh = &s_worldData.shaders[ _shaderNum ]; if ( ( r_vertexLight->integer && tr.vertexLightingAllowed ) || glConfig.hardwareType == GLHW_PERMEDIA2 ) { lightmapNum = LIGHTMAP_BY_VERTEX; } if ( r_fullbright->integer ) { lightmapNum = LIGHTMAP_WHITEIMAGE; } shader = R_FindShader( dsh->shader, lightmapNum, qtrue ); // if the shader had errors, just use default shader if ( shader->defaultShader ) { return tr.defaultShader; } return shader; } static void LoadDrawVertToSrfVert(srfVert_t *s, const drawVert_t *d, int realLightmapNum, float hdrVertColors[3], vec3_t *bounds) { vec4_t v; s->xyz[0] = LittleFloat(d->xyz[0]); s->xyz[1] = LittleFloat(d->xyz[1]); s->xyz[2] = LittleFloat(d->xyz[2]); if (bounds) AddPointToBounds(s->xyz, bounds[0], bounds[1]); s->st[0] = LittleFloat(d->st[0]); s->st[1] = LittleFloat(d->st[1]); if (realLightmapNum >= 0) { s->lightmap[0] = FatPackU(LittleFloat(d->lightmap[0]), realLightmapNum); s->lightmap[1] = FatPackV(LittleFloat(d->lightmap[1]), realLightmapNum); } else { s->lightmap[0] = LittleFloat(d->lightmap[0]); s->lightmap[1] = LittleFloat(d->lightmap[1]); } v[0] = LittleFloat(d->normal[0]); v[1] = LittleFloat(d->normal[1]); v[2] = LittleFloat(d->normal[2]); R_VaoPackNormal(s->normal, v); if (hdrVertColors) { v[0] = hdrVertColors[0]; v[1] = hdrVertColors[1]; v[2] = hdrVertColors[2]; } else { //hack: convert LDR vertex colors to HDR if (r_hdr->integer) { v[0] = MAX(d->color.rgba[0], 0.499f); v[1] = MAX(d->color.rgba[1], 0.499f); v[2] = MAX(d->color.rgba[2], 0.499f); } else { v[0] = d->color.rgba[0]; v[1] = d->color.rgba[1]; v[2] = d->color.rgba[2]; } } v[3] = d->color.rgba[3] / 255.0f; R_ColorShiftLightingFloats(v, v); R_VaoPackColor(s->color, v); } /* =============== ParseFace =============== */ static void ParseFace( const dsurface_t *ds, const drawVert_t *verts, float *hdrVertColors, msurface_t *surf, int *indexes ) { int i, j; srfBspSurface_t *cv; glIndex_t *tri; int numVerts, numIndexes, badTriangles; int realLightmapNum; realLightmapNum = LittleLong( ds->lightmapNum ); // get fog volume surf->fogIndex = LittleLong( ds->fogNum ) + 1; // get shader value surf->shader = ShaderForShaderNum( ds->shaderNum, FatLightmap(realLightmapNum) ); if ( r_singleShader->integer && !surf->shader->isSky ) { surf->shader = tr.defaultShader; } numVerts = LittleLong(ds->numVerts); if (numVerts > MAX_FACE_POINTS) { ri.Printf( PRINT_WARNING, "WARNING: MAX_FACE_POINTS exceeded: %i\n", numVerts); numVerts = MAX_FACE_POINTS; surf->shader = tr.defaultShader; } numIndexes = LittleLong(ds->numIndexes); //cv = ri.Hunk_Alloc(sizeof(*cv), h_low); cv = (void *)surf->data; cv->surfaceType = SF_FACE; cv->numIndexes = numIndexes; cv->indexes = ri.Hunk_Alloc(numIndexes * sizeof(cv->indexes[0]), h_low); cv->numVerts = numVerts; cv->verts = ri.Hunk_Alloc(numVerts * sizeof(cv->verts[0]), h_low); // copy vertexes surf->cullinfo.type = CULLINFO_PLANE | CULLINFO_BOX; ClearBounds(surf->cullinfo.bounds[0], surf->cullinfo.bounds[1]); verts += LittleLong(ds->firstVert); for(i = 0; i < numVerts; i++) LoadDrawVertToSrfVert(&cv->verts[i], &verts[i], realLightmapNum, hdrVertColors ? hdrVertColors + (ds->firstVert + i) * 3 : NULL, surf->cullinfo.bounds); // copy triangles badTriangles = 0; indexes += LittleLong(ds->firstIndex); for(i = 0, tri = cv->indexes; i < numIndexes; i += 3, tri += 3) { for(j = 0; j < 3; j++) { tri[j] = LittleLong(indexes[i + j]); if(tri[j] >= numVerts) { ri.Error(ERR_DROP, "Bad index in face surface"); } } if ((tri[0] == tri[1]) || (tri[1] == tri[2]) || (tri[0] == tri[2])) { tri -= 3; badTriangles++; } } if (badTriangles) { ri.Printf(PRINT_WARNING, "Face has bad triangles, originally shader %s %d tris %d verts, now %d tris\n", surf->shader->name, numIndexes / 3, numVerts, numIndexes / 3 - badTriangles); cv->numIndexes -= badTriangles * 3; } // take the plane information from the lightmap vector for ( i = 0 ; i < 3 ; i++ ) { cv->cullPlane.normal[i] = LittleFloat( ds->lightmapVecs[2][i] ); } cv->cullPlane.dist = DotProduct( cv->verts[0].xyz, cv->cullPlane.normal ); SetPlaneSignbits( &cv->cullPlane ); cv->cullPlane.type = PlaneTypeForNormal( cv->cullPlane.normal ); surf->cullinfo.plane = cv->cullPlane; surf->data = (surfaceType_t *)cv; // Calculate tangent spaces { srfVert_t *dv[3]; for(i = 0, tri = cv->indexes; i < numIndexes; i += 3, tri += 3) { dv[0] = &cv->verts[tri[0]]; dv[1] = &cv->verts[tri[1]]; dv[2] = &cv->verts[tri[2]]; R_CalcTangentVectors(dv); } } } /* =============== ParseMesh =============== */ static void ParseMesh ( const dsurface_t *ds, const drawVert_t *verts, float *hdrVertColors, msurface_t *surf ) { srfBspSurface_t *grid = (srfBspSurface_t *)surf->data; int i; int width, height, numPoints; srfVert_t points[MAX_PATCH_SIZE*MAX_PATCH_SIZE]; vec3_t bounds[2]; vec3_t tmpVec; static surfaceType_t skipData = SF_SKIP; int realLightmapNum; realLightmapNum = LittleLong( ds->lightmapNum ); // get fog volume surf->fogIndex = LittleLong( ds->fogNum ) + 1; // get shader value surf->shader = ShaderForShaderNum( ds->shaderNum, FatLightmap(realLightmapNum) ); if ( r_singleShader->integer && !surf->shader->isSky ) { surf->shader = tr.defaultShader; } // we may have a nodraw surface, because they might still need to // be around for movement clipping if ( s_worldData.shaders[ LittleLong( ds->shaderNum ) ].surfaceFlags & SURF_NODRAW ) { surf->data = &skipData; return; } width = LittleLong( ds->patchWidth ); height = LittleLong( ds->patchHeight ); if(width < 0 || width > MAX_PATCH_SIZE || height < 0 || height > MAX_PATCH_SIZE) ri.Error(ERR_DROP, "ParseMesh: bad size"); verts += LittleLong( ds->firstVert ); numPoints = width * height; for(i = 0; i < numPoints; i++) LoadDrawVertToSrfVert(&points[i], &verts[i], realLightmapNum, hdrVertColors ? hdrVertColors + (ds->firstVert + i) * 3 : NULL, NULL); // pre-tesseleate R_SubdividePatchToGrid( grid, width, height, points ); // copy the level of detail origin, which is the center // of the group of all curves that must subdivide the same // to avoid cracking for ( i = 0 ; i < 3 ; i++ ) { bounds[0][i] = LittleFloat( ds->lightmapVecs[0][i] ); bounds[1][i] = LittleFloat( ds->lightmapVecs[1][i] ); } VectorAdd( bounds[0], bounds[1], bounds[1] ); VectorScale( bounds[1], 0.5f, grid->lodOrigin ); VectorSubtract( bounds[0], grid->lodOrigin, tmpVec ); grid->lodRadius = VectorLength( tmpVec ); surf->cullinfo.type = CULLINFO_BOX | CULLINFO_SPHERE; VectorCopy(grid->cullBounds[0], surf->cullinfo.bounds[0]); VectorCopy(grid->cullBounds[1], surf->cullinfo.bounds[1]); VectorCopy(grid->cullOrigin, surf->cullinfo.localOrigin); surf->cullinfo.radius = grid->cullRadius; } /* =============== ParseTriSurf =============== */ static void ParseTriSurf( const dsurface_t *ds, const drawVert_t *verts, float *hdrVertColors, msurface_t *surf, int *indexes ) { srfBspSurface_t *cv; glIndex_t *tri; int i, j; int numVerts, numIndexes, badTriangles; // get fog volume surf->fogIndex = LittleLong( ds->fogNum ) + 1; // get shader surf->shader = ShaderForShaderNum( ds->shaderNum, LIGHTMAP_BY_VERTEX ); if ( r_singleShader->integer && !surf->shader->isSky ) { surf->shader = tr.defaultShader; } numVerts = LittleLong(ds->numVerts); numIndexes = LittleLong(ds->numIndexes); //cv = ri.Hunk_Alloc(sizeof(*cv), h_low); cv = (void *)surf->data; cv->surfaceType = SF_TRIANGLES; cv->numIndexes = numIndexes; cv->indexes = ri.Hunk_Alloc(numIndexes * sizeof(cv->indexes[0]), h_low); cv->numVerts = numVerts; cv->verts = ri.Hunk_Alloc(numVerts * sizeof(cv->verts[0]), h_low); surf->data = (surfaceType_t *) cv; // copy vertexes surf->cullinfo.type = CULLINFO_BOX; ClearBounds(surf->cullinfo.bounds[0], surf->cullinfo.bounds[1]); verts += LittleLong(ds->firstVert); for(i = 0; i < numVerts; i++) LoadDrawVertToSrfVert(&cv->verts[i], &verts[i], -1, hdrVertColors ? hdrVertColors + (ds->firstVert + i) * 3 : NULL, surf->cullinfo.bounds); // copy triangles badTriangles = 0; indexes += LittleLong(ds->firstIndex); for(i = 0, tri = cv->indexes; i < numIndexes; i += 3, tri += 3) { for(j = 0; j < 3; j++) { tri[j] = LittleLong(indexes[i + j]); if(tri[j] >= numVerts) { ri.Error(ERR_DROP, "Bad index in face surface"); } } if ((tri[0] == tri[1]) || (tri[1] == tri[2]) || (tri[0] == tri[2])) { tri -= 3; badTriangles++; } } if (badTriangles) { ri.Printf(PRINT_WARNING, "Trisurf has bad triangles, originally shader %s %d tris %d verts, now %d tris\n", surf->shader->name, numIndexes / 3, numVerts, numIndexes / 3 - badTriangles); cv->numIndexes -= badTriangles * 3; } // Calculate tangent spaces { srfVert_t *dv[3]; for(i = 0, tri = cv->indexes; i < numIndexes; i += 3, tri += 3) { dv[0] = &cv->verts[tri[0]]; dv[1] = &cv->verts[tri[1]]; dv[2] = &cv->verts[tri[2]]; R_CalcTangentVectors(dv); } } } /* =============== ParseFlare =============== */ static void ParseFlare( const dsurface_t *ds, const drawVert_t *verts, msurface_t *surf, int *indexes ) { srfFlare_t *flare; int i; // get fog volume surf->fogIndex = LittleLong( ds->fogNum ) + 1; // get shader surf->shader = ShaderForShaderNum( ds->shaderNum, LIGHTMAP_BY_VERTEX ); if ( r_singleShader->integer && !surf->shader->isSky ) { surf->shader = tr.defaultShader; } //flare = ri.Hunk_Alloc( sizeof( *flare ), h_low ); flare = (void *)surf->data; flare->surfaceType = SF_FLARE; surf->data = (surfaceType_t *)flare; for ( i = 0 ; i < 3 ; i++ ) { flare->origin[i] = LittleFloat( ds->lightmapOrigin[i] ); flare->color[i] = LittleFloat( ds->lightmapVecs[0][i] ); flare->normal[i] = LittleFloat( ds->lightmapVecs[2][i] ); } surf->cullinfo.type = CULLINFO_NONE; } /* ================= R_MergedWidthPoints returns qtrue if there are grid points merged on a width edge ================= */ static int R_MergedWidthPoints( const srfBspSurface_t *grid, int offset) { int i, j; for (i = 1; i < grid->width-1; i++) { for (j = i + 1; j < grid->width-1; j++) { if ( fabs(grid->verts[i + offset].xyz[0] - grid->verts[j + offset].xyz[0]) > .1) continue; if ( fabs(grid->verts[i + offset].xyz[1] - grid->verts[j + offset].xyz[1]) > .1) continue; if ( fabs(grid->verts[i + offset].xyz[2] - grid->verts[j + offset].xyz[2]) > .1) continue; return qtrue; } } return qfalse; } /* ================= R_MergedHeightPoints returns qtrue if there are grid points merged on a height edge ================= */ static int R_MergedHeightPoints(const srfBspSurface_t *grid, int offset) { int i, j; for (i = 1; i < grid->height-1; i++) { for (j = i + 1; j < grid->height-1; j++) { if ( fabs(grid->verts[grid->width * i + offset].xyz[0] - grid->verts[grid->width * j + offset].xyz[0]) > .1) continue; if ( fabs(grid->verts[grid->width * i + offset].xyz[1] - grid->verts[grid->width * j + offset].xyz[1]) > .1) continue; if ( fabs(grid->verts[grid->width * i + offset].xyz[2] - grid->verts[grid->width * j + offset].xyz[2]) > .1) continue; return qtrue; } } return qfalse; } /* ================= R_FixSharedVertexLodError_r NOTE: never sync LoD through grid edges with merged points! FIXME: write generalized version that also avoids cracks between a patch and one that meets half way? ================= */ static void R_FixSharedVertexLodError_r( int start, srfBspSurface_t *grid1 ) { int j, k, l, m, n, offset1, offset2, touch; srfBspSurface_t *grid2; for ( j = start; j < s_worldData.numsurfaces; j++ ) { // grid2 = (srfBspSurface_t *) s_worldData.surfaces[j].data; // if this surface is not a grid if ( grid2->surfaceType != SF_GRID ) continue; // if the LOD errors are already fixed for this patch if ( grid2->lodFixed == 2 ) continue; // grids in the same LOD group should have the exact same lod radius if ( grid1->lodRadius != grid2->lodRadius ) continue; // grids in the same LOD group should have the exact same lod origin if ( grid1->lodOrigin[0] != grid2->lodOrigin[0] ) continue; if ( grid1->lodOrigin[1] != grid2->lodOrigin[1] ) continue; if ( grid1->lodOrigin[2] != grid2->lodOrigin[2] ) continue; // touch = qfalse; for (n = 0; n < 2; n++) { // if (n) offset1 = (grid1->height-1) * grid1->width; else offset1 = 0; if (R_MergedWidthPoints(grid1, offset1)) continue; for (k = 1; k < grid1->width-1; k++) { for (m = 0; m < 2; m++) { if (m) offset2 = (grid2->height-1) * grid2->width; else offset2 = 0; if (R_MergedWidthPoints(grid2, offset2)) continue; for ( l = 1; l < grid2->width-1; l++) { // if ( fabs(grid1->verts[k + offset1].xyz[0] - grid2->verts[l + offset2].xyz[0]) > .1) continue; if ( fabs(grid1->verts[k + offset1].xyz[1] - grid2->verts[l + offset2].xyz[1]) > .1) continue; if ( fabs(grid1->verts[k + offset1].xyz[2] - grid2->verts[l + offset2].xyz[2]) > .1) continue; // ok the points are equal and should have the same lod error grid2->widthLodError[l] = grid1->widthLodError[k]; touch = qtrue; } } for (m = 0; m < 2; m++) { if (m) offset2 = grid2->width-1; else offset2 = 0; if (R_MergedHeightPoints(grid2, offset2)) continue; for ( l = 1; l < grid2->height-1; l++) { // if ( fabs(grid1->verts[k + offset1].xyz[0] - grid2->verts[grid2->width * l + offset2].xyz[0]) > .1) continue; if ( fabs(grid1->verts[k + offset1].xyz[1] - grid2->verts[grid2->width * l + offset2].xyz[1]) > .1) continue; if ( fabs(grid1->verts[k + offset1].xyz[2] - grid2->verts[grid2->width * l + offset2].xyz[2]) > .1) continue; // ok the points are equal and should have the same lod error grid2->heightLodError[l] = grid1->widthLodError[k]; touch = qtrue; } } } } for (n = 0; n < 2; n++) { // if (n) offset1 = grid1->width-1; else offset1 = 0; if (R_MergedHeightPoints(grid1, offset1)) continue; for (k = 1; k < grid1->height-1; k++) { for (m = 0; m < 2; m++) { if (m) offset2 = (grid2->height-1) * grid2->width; else offset2 = 0; if (R_MergedWidthPoints(grid2, offset2)) continue; for ( l = 1; l < grid2->width-1; l++) { // if ( fabs(grid1->verts[grid1->width * k + offset1].xyz[0] - grid2->verts[l + offset2].xyz[0]) > .1) continue; if ( fabs(grid1->verts[grid1->width * k + offset1].xyz[1] - grid2->verts[l + offset2].xyz[1]) > .1) continue; if ( fabs(grid1->verts[grid1->width * k + offset1].xyz[2] - grid2->verts[l + offset2].xyz[2]) > .1) continue; // ok the points are equal and should have the same lod error grid2->widthLodError[l] = grid1->heightLodError[k]; touch = qtrue; } } for (m = 0; m < 2; m++) { if (m) offset2 = grid2->width-1; else offset2 = 0; if (R_MergedHeightPoints(grid2, offset2)) continue; for ( l = 1; l < grid2->height-1; l++) { // if ( fabs(grid1->verts[grid1->width * k + offset1].xyz[0] - grid2->verts[grid2->width * l + offset2].xyz[0]) > .1) continue; if ( fabs(grid1->verts[grid1->width * k + offset1].xyz[1] - grid2->verts[grid2->width * l + offset2].xyz[1]) > .1) continue; if ( fabs(grid1->verts[grid1->width * k + offset1].xyz[2] - grid2->verts[grid2->width * l + offset2].xyz[2]) > .1) continue; // ok the points are equal and should have the same lod error grid2->heightLodError[l] = grid1->heightLodError[k]; touch = qtrue; } } } } if (touch) { grid2->lodFixed = 2; R_FixSharedVertexLodError_r ( start, grid2 ); //NOTE: this would be correct but makes things really slow //grid2->lodFixed = 1; } } } /* ================= R_FixSharedVertexLodError This function assumes that all patches in one group are nicely stitched together for the highest LoD. If this is not the case this function will still do its job but won't fix the highest LoD cracks. ================= */ static void R_FixSharedVertexLodError( void ) { int i; srfBspSurface_t *grid1; for ( i = 0; i < s_worldData.numsurfaces; i++ ) { // grid1 = (srfBspSurface_t *) s_worldData.surfaces[i].data; // if this surface is not a grid if ( grid1->surfaceType != SF_GRID ) continue; // if ( grid1->lodFixed ) continue; // grid1->lodFixed = 2; // recursively fix other patches in the same LOD group R_FixSharedVertexLodError_r( i + 1, grid1); } } /* =============== R_StitchPatches =============== */ static int R_StitchPatches( int grid1num, int grid2num ) { float *v1, *v2; srfBspSurface_t *grid1, *grid2; int k, l, m, n, offset1, offset2, row, column; grid1 = (srfBspSurface_t *) s_worldData.surfaces[grid1num].data; grid2 = (srfBspSurface_t *) s_worldData.surfaces[grid2num].data; for (n = 0; n < 2; n++) { // if (n) offset1 = (grid1->height-1) * grid1->width; else offset1 = 0; if (R_MergedWidthPoints(grid1, offset1)) continue; for (k = 0; k < grid1->width-2; k += 2) { for (m = 0; m < 2; m++) { if ( grid2->width >= MAX_GRID_SIZE ) break; if (m) offset2 = (grid2->height-1) * grid2->width; else offset2 = 0; for ( l = 0; l < grid2->width-1; l++) { // v1 = grid1->verts[k + offset1].xyz; v2 = grid2->verts[l + offset2].xyz; if ( fabs(v1[0] - v2[0]) > .1) continue; if ( fabs(v1[1] - v2[1]) > .1) continue; if ( fabs(v1[2] - v2[2]) > .1) continue; v1 = grid1->verts[k + 2 + offset1].xyz; v2 = grid2->verts[l + 1 + offset2].xyz; if ( fabs(v1[0] - v2[0]) > .1) continue; if ( fabs(v1[1] - v2[1]) > .1) continue; if ( fabs(v1[2] - v2[2]) > .1) continue; // v1 = grid2->verts[l + offset2].xyz; v2 = grid2->verts[l + 1 + offset2].xyz; if ( fabs(v1[0] - v2[0]) < .01 && fabs(v1[1] - v2[1]) < .01 && fabs(v1[2] - v2[2]) < .01) continue; // //ri.Printf( PRINT_ALL, "found highest LoD crack between two patches\n" ); // insert column into grid2 right after column l if (m) row = grid2->height-1; else row = 0; R_GridInsertColumn( grid2, l+1, row, grid1->verts[k + 1 + offset1].xyz, grid1->widthLodError[k+1]); grid2->lodStitched = qfalse; s_worldData.surfaces[grid2num].data = (void *) grid2; return qtrue; } } for (m = 0; m < 2; m++) { if (grid2->height >= MAX_GRID_SIZE) break; if (m) offset2 = grid2->width-1; else offset2 = 0; for ( l = 0; l < grid2->height-1; l++) { // v1 = grid1->verts[k + offset1].xyz; v2 = grid2->verts[grid2->width * l + offset2].xyz; if ( fabs(v1[0] - v2[0]) > .1) continue; if ( fabs(v1[1] - v2[1]) > .1) continue; if ( fabs(v1[2] - v2[2]) > .1) continue; v1 = grid1->verts[k + 2 + offset1].xyz; v2 = grid2->verts[grid2->width * (l + 1) + offset2].xyz; if ( fabs(v1[0] - v2[0]) > .1) continue; if ( fabs(v1[1] - v2[1]) > .1) continue; if ( fabs(v1[2] - v2[2]) > .1) continue; // v1 = grid2->verts[grid2->width * l + offset2].xyz; v2 = grid2->verts[grid2->width * (l + 1) + offset2].xyz; if ( fabs(v1[0] - v2[0]) < .01 && fabs(v1[1] - v2[1]) < .01 && fabs(v1[2] - v2[2]) < .01) continue; // //ri.Printf( PRINT_ALL, "found highest LoD crack between two patches\n" ); // insert row into grid2 right after row l if (m) column = grid2->width-1; else column = 0; R_GridInsertRow( grid2, l+1, column, grid1->verts[k + 1 + offset1].xyz, grid1->widthLodError[k+1]); grid2->lodStitched = qfalse; s_worldData.surfaces[grid2num].data = (void *) grid2; return qtrue; } } } } for (n = 0; n < 2; n++) { // if (n) offset1 = grid1->width-1; else offset1 = 0; if (R_MergedHeightPoints(grid1, offset1)) continue; for (k = 0; k < grid1->height-2; k += 2) { for (m = 0; m < 2; m++) { if ( grid2->width >= MAX_GRID_SIZE ) break; if (m) offset2 = (grid2->height-1) * grid2->width; else offset2 = 0; for ( l = 0; l < grid2->width-1; l++) { // v1 = grid1->verts[grid1->width * k + offset1].xyz; v2 = grid2->verts[l + offset2].xyz; if ( fabs(v1[0] - v2[0]) > .1) continue; if ( fabs(v1[1] - v2[1]) > .1) continue; if ( fabs(v1[2] - v2[2]) > .1) continue; v1 = grid1->verts[grid1->width * (k + 2) + offset1].xyz; v2 = grid2->verts[l + 1 + offset2].xyz; if ( fabs(v1[0] - v2[0]) > .1) continue; if ( fabs(v1[1] - v2[1]) > .1) continue; if ( fabs(v1[2] - v2[2]) > .1) continue; // v1 = grid2->verts[l + offset2].xyz; v2 = grid2->verts[(l + 1) + offset2].xyz; if ( fabs(v1[0] - v2[0]) < .01 && fabs(v1[1] - v2[1]) < .01 && fabs(v1[2] - v2[2]) < .01) continue; // //ri.Printf( PRINT_ALL, "found highest LoD crack between two patches\n" ); // insert column into grid2 right after column l if (m) row = grid2->height-1; else row = 0; R_GridInsertColumn( grid2, l+1, row, grid1->verts[grid1->width * (k + 1) + offset1].xyz, grid1->heightLodError[k+1]); grid2->lodStitched = qfalse; s_worldData.surfaces[grid2num].data = (void *) grid2; return qtrue; } } for (m = 0; m < 2; m++) { if (grid2->height >= MAX_GRID_SIZE) break; if (m) offset2 = grid2->width-1; else offset2 = 0; for ( l = 0; l < grid2->height-1; l++) { // v1 = grid1->verts[grid1->width * k + offset1].xyz; v2 = grid2->verts[grid2->width * l + offset2].xyz; if ( fabs(v1[0] - v2[0]) > .1) continue; if ( fabs(v1[1] - v2[1]) > .1) continue; if ( fabs(v1[2] - v2[2]) > .1) continue; v1 = grid1->verts[grid1->width * (k + 2) + offset1].xyz; v2 = grid2->verts[grid2->width * (l + 1) + offset2].xyz; if ( fabs(v1[0] - v2[0]) > .1) continue; if ( fabs(v1[1] - v2[1]) > .1) continue; if ( fabs(v1[2] - v2[2]) > .1) continue; // v1 = grid2->verts[grid2->width * l + offset2].xyz; v2 = grid2->verts[grid2->width * (l + 1) + offset2].xyz; if ( fabs(v1[0] - v2[0]) < .01 && fabs(v1[1] - v2[1]) < .01 && fabs(v1[2] - v2[2]) < .01) continue; // //ri.Printf( PRINT_ALL, "found highest LoD crack between two patches\n" ); // insert row into grid2 right after row l if (m) column = grid2->width-1; else column = 0; R_GridInsertRow( grid2, l+1, column, grid1->verts[grid1->width * (k + 1) + offset1].xyz, grid1->heightLodError[k+1]); grid2->lodStitched = qfalse; s_worldData.surfaces[grid2num].data = (void *) grid2; return qtrue; } } } } for (n = 0; n < 2; n++) { // if (n) offset1 = (grid1->height-1) * grid1->width; else offset1 = 0; if (R_MergedWidthPoints(grid1, offset1)) continue; for (k = grid1->width-1; k > 1; k -= 2) { for (m = 0; m < 2; m++) { if ( !grid2 || grid2->width >= MAX_GRID_SIZE ) break; if (m) offset2 = (grid2->height-1) * grid2->width; else offset2 = 0; for ( l = 0; l < grid2->width-1; l++) { // v1 = grid1->verts[k + offset1].xyz; v2 = grid2->verts[l + offset2].xyz; if ( fabs(v1[0] - v2[0]) > .1) continue; if ( fabs(v1[1] - v2[1]) > .1) continue; if ( fabs(v1[2] - v2[2]) > .1) continue; v1 = grid1->verts[k - 2 + offset1].xyz; v2 = grid2->verts[l + 1 + offset2].xyz; if ( fabs(v1[0] - v2[0]) > .1) continue; if ( fabs(v1[1] - v2[1]) > .1) continue; if ( fabs(v1[2] - v2[2]) > .1) continue; // v1 = grid2->verts[l + offset2].xyz; v2 = grid2->verts[(l + 1) + offset2].xyz; if ( fabs(v1[0] - v2[0]) < .01 && fabs(v1[1] - v2[1]) < .01 && fabs(v1[2] - v2[2]) < .01) continue; // //ri.Printf( PRINT_ALL, "found highest LoD crack between two patches\n" ); // insert column into grid2 right after column l if (m) row = grid2->height-1; else row = 0; R_GridInsertColumn( grid2, l+1, row, grid1->verts[k - 1 + offset1].xyz, grid1->widthLodError[k+1]); grid2->lodStitched = qfalse; s_worldData.surfaces[grid2num].data = (void *) grid2; return qtrue; } } for (m = 0; m < 2; m++) { if (!grid2 || grid2->height >= MAX_GRID_SIZE) break; if (m) offset2 = grid2->width-1; else offset2 = 0; for ( l = 0; l < grid2->height-1; l++) { // v1 = grid1->verts[k + offset1].xyz; v2 = grid2->verts[grid2->width * l + offset2].xyz; if ( fabs(v1[0] - v2[0]) > .1) continue; if ( fabs(v1[1] - v2[1]) > .1) continue; if ( fabs(v1[2] - v2[2]) > .1) continue; v1 = grid1->verts[k - 2 + offset1].xyz; v2 = grid2->verts[grid2->width * (l + 1) + offset2].xyz; if ( fabs(v1[0] - v2[0]) > .1) continue; if ( fabs(v1[1] - v2[1]) > .1) continue; if ( fabs(v1[2] - v2[2]) > .1) continue; // v1 = grid2->verts[grid2->width * l + offset2].xyz; v2 = grid2->verts[grid2->width * (l + 1) + offset2].xyz; if ( fabs(v1[0] - v2[0]) < .01 && fabs(v1[1] - v2[1]) < .01 && fabs(v1[2] - v2[2]) < .01) continue; // //ri.Printf( PRINT_ALL, "found highest LoD crack between two patches\n" ); // insert row into grid2 right after row l if (m) column = grid2->width-1; else column = 0; R_GridInsertRow( grid2, l+1, column, grid1->verts[k - 1 + offset1].xyz, grid1->widthLodError[k+1]); if (!grid2) break; grid2->lodStitched = qfalse; s_worldData.surfaces[grid2num].data = (void *) grid2; return qtrue; } } } } for (n = 0; n < 2; n++) { // if (n) offset1 = grid1->width-1; else offset1 = 0; if (R_MergedHeightPoints(grid1, offset1)) continue; for (k = grid1->height-1; k > 1; k -= 2) { for (m = 0; m < 2; m++) { if (!grid2 || grid2->width >= MAX_GRID_SIZE ) break; if (m) offset2 = (grid2->height-1) * grid2->width; else offset2 = 0; for ( l = 0; l < grid2->width-1; l++) { // v1 = grid1->verts[grid1->width * k + offset1].xyz; v2 = grid2->verts[l + offset2].xyz; if ( fabs(v1[0] - v2[0]) > .1) continue; if ( fabs(v1[1] - v2[1]) > .1) continue; if ( fabs(v1[2] - v2[2]) > .1) continue; v1 = grid1->verts[grid1->width * (k - 2) + offset1].xyz; v2 = grid2->verts[l + 1 + offset2].xyz; if ( fabs(v1[0] - v2[0]) > .1) continue; if ( fabs(v1[1] - v2[1]) > .1) continue; if ( fabs(v1[2] - v2[2]) > .1) continue; // v1 = grid2->verts[l + offset2].xyz; v2 = grid2->verts[(l + 1) + offset2].xyz; if ( fabs(v1[0] - v2[0]) < .01 && fabs(v1[1] - v2[1]) < .01 && fabs(v1[2] - v2[2]) < .01) continue; // //ri.Printf( PRINT_ALL, "found highest LoD crack between two patches\n" ); // insert column into grid2 right after column l if (m) row = grid2->height-1; else row = 0; R_GridInsertColumn( grid2, l+1, row, grid1->verts[grid1->width * (k - 1) + offset1].xyz, grid1->heightLodError[k+1]); grid2->lodStitched = qfalse; s_worldData.surfaces[grid2num].data = (void *) grid2; return qtrue; } } for (m = 0; m < 2; m++) { if (!grid2 || grid2->height >= MAX_GRID_SIZE) break; if (m) offset2 = grid2->width-1; else offset2 = 0; for ( l = 0; l < grid2->height-1; l++) { // v1 = grid1->verts[grid1->width * k + offset1].xyz; v2 = grid2->verts[grid2->width * l + offset2].xyz; if ( fabs(v1[0] - v2[0]) > .1) continue; if ( fabs(v1[1] - v2[1]) > .1) continue; if ( fabs(v1[2] - v2[2]) > .1) continue; v1 = grid1->verts[grid1->width * (k - 2) + offset1].xyz; v2 = grid2->verts[grid2->width * (l + 1) + offset2].xyz; if ( fabs(v1[0] - v2[0]) > .1) continue; if ( fabs(v1[1] - v2[1]) > .1) continue; if ( fabs(v1[2] - v2[2]) > .1) continue; // v1 = grid2->verts[grid2->width * l + offset2].xyz; v2 = grid2->verts[grid2->width * (l + 1) + offset2].xyz; if ( fabs(v1[0] - v2[0]) < .01 && fabs(v1[1] - v2[1]) < .01 && fabs(v1[2] - v2[2]) < .01) continue; // //ri.Printf( PRINT_ALL, "found highest LoD crack between two patches\n" ); // insert row into grid2 right after row l if (m) column = grid2->width-1; else column = 0; R_GridInsertRow( grid2, l+1, column, grid1->verts[grid1->width * (k - 1) + offset1].xyz, grid1->heightLodError[k+1]); grid2->lodStitched = qfalse; s_worldData.surfaces[grid2num].data = (void *) grid2; return qtrue; } } } } return qfalse; } /* =============== R_TryStitchPatch This function will try to stitch patches in the same LoD group together for the highest LoD. Only single missing vertex cracks will be fixed. Vertices will be joined at the patch side a crack is first found, at the other side of the patch (on the same row or column) the vertices will not be joined and cracks might still appear at that side. =============== */ static int R_TryStitchingPatch( int grid1num ) { int j, numstitches; srfBspSurface_t *grid1, *grid2; numstitches = 0; grid1 = (srfBspSurface_t *) s_worldData.surfaces[grid1num].data; for ( j = 0; j < s_worldData.numsurfaces; j++ ) { // grid2 = (srfBspSurface_t *) s_worldData.surfaces[j].data; // if this surface is not a grid if ( grid2->surfaceType != SF_GRID ) continue; // grids in the same LOD group should have the exact same lod radius if ( grid1->lodRadius != grid2->lodRadius ) continue; // grids in the same LOD group should have the exact same lod origin if ( grid1->lodOrigin[0] != grid2->lodOrigin[0] ) continue; if ( grid1->lodOrigin[1] != grid2->lodOrigin[1] ) continue; if ( grid1->lodOrigin[2] != grid2->lodOrigin[2] ) continue; // while (R_StitchPatches(grid1num, j)) { numstitches++; } } return numstitches; } /* =============== R_StitchAllPatches =============== */ static void R_StitchAllPatches( void ) { int i, stitched, numstitches; srfBspSurface_t *grid1; numstitches = 0; do { stitched = qfalse; for ( i = 0; i < s_worldData.numsurfaces; i++ ) { // grid1 = (srfBspSurface_t *) s_worldData.surfaces[i].data; // if this surface is not a grid if ( grid1->surfaceType != SF_GRID ) continue; // if ( grid1->lodStitched ) continue; // grid1->lodStitched = qtrue; stitched = qtrue; // numstitches += R_TryStitchingPatch( i ); } } while (stitched); ri.Printf( PRINT_ALL, "stitched %d LoD cracks\n", numstitches ); } /* =============== R_MovePatchSurfacesToHunk =============== */ static void R_MovePatchSurfacesToHunk(void) { int i; srfBspSurface_t *grid; for ( i = 0; i < s_worldData.numsurfaces; i++ ) { void *copyFrom; // grid = (srfBspSurface_t *) s_worldData.surfaces[i].data; // if this surface is not a grid if ( grid->surfaceType != SF_GRID ) continue; // copyFrom = grid->widthLodError; grid->widthLodError = ri.Hunk_Alloc( grid->width * 4, h_low ); Com_Memcpy(grid->widthLodError, copyFrom, grid->width * 4); ri.Free(copyFrom); copyFrom = grid->heightLodError; grid->heightLodError = ri.Hunk_Alloc(grid->height * 4, h_low); Com_Memcpy(grid->heightLodError, copyFrom, grid->height * 4); ri.Free(copyFrom); copyFrom = grid->indexes; grid->indexes = ri.Hunk_Alloc(grid->numIndexes * sizeof(glIndex_t), h_low); Com_Memcpy(grid->indexes, copyFrom, grid->numIndexes * sizeof(glIndex_t)); ri.Free(copyFrom); copyFrom = grid->verts; grid->verts = ri.Hunk_Alloc(grid->numVerts * sizeof(srfVert_t), h_low); Com_Memcpy(grid->verts, copyFrom, grid->numVerts * sizeof(srfVert_t)); ri.Free(copyFrom); } } /* =============== R_LoadSurfaces =============== */ static void R_LoadSurfaces( const lump_t *surfs, const lump_t *verts, const lump_t *indexLump ) { const dsurface_t *in; msurface_t *out; const drawVert_t *dv; int *indexes; int count; int numFaces, numMeshes, numTriSurfs, numFlares; int i; float *hdrVertColors = NULL; numFaces = 0; numMeshes = 0; numTriSurfs = 0; numFlares = 0; if (surfs->filelen % sizeof(*in)) ri.Error (ERR_DROP, "LoadMap: funny lump size in %s",s_worldData.name); count = surfs->filelen / sizeof(*in); dv = (void *)(fileBase + verts->fileofs); if (verts->filelen % sizeof(*dv)) ri.Error (ERR_DROP, "LoadMap: funny lump size in %s",s_worldData.name); indexes = (void *)(fileBase + indexLump->fileofs); if ( indexLump->filelen % sizeof(*indexes)) ri.Error (ERR_DROP, "LoadMap: funny lump size in %s",s_worldData.name); out = ri.Hunk_Alloc ( count * sizeof(*out), h_low ); s_worldData.surfaces = out; s_worldData.numsurfaces = count; s_worldData.surfacesViewCount = ri.Hunk_Alloc ( count * sizeof(*s_worldData.surfacesViewCount), h_low ); s_worldData.surfacesDlightBits = ri.Hunk_Alloc ( count * sizeof(*s_worldData.surfacesDlightBits), h_low ); s_worldData.surfacesPshadowBits = ri.Hunk_Alloc ( count * sizeof(*s_worldData.surfacesPshadowBits), h_low ); // load hdr vertex colors if (r_hdr->integer) { char filename[MAX_QPATH]; int size; Com_sprintf( filename, sizeof( filename ), "maps/%s/vertlight.raw", s_worldData.baseName); //ri.Printf(PRINT_ALL, "looking for %s\n", filename); size = ri.FS_ReadFile(filename, (void **)&hdrVertColors); if (hdrVertColors) { //ri.Printf(PRINT_ALL, "Found!\n"); if (size != sizeof(float) * 3 * (verts->filelen / sizeof(*dv))) ri.Error(ERR_DROP, "Bad size for %s (%i, expected %i)!", filename, size, (int)((sizeof(float)) * 3 * (verts->filelen / sizeof(*dv)))); } } // Two passes, allocate surfaces first, then load them full of data // This ensures surfaces are close together to reduce L2 cache misses when using VAOs, // which don't actually use the verts and indexes in = (void *)(fileBase + surfs->fileofs); out = s_worldData.surfaces; for ( i = 0 ; i < count ; i++, in++, out++ ) { switch ( LittleLong( in->surfaceType ) ) { case MST_PATCH: out->data = ri.Hunk_Alloc( sizeof(srfBspSurface_t), h_low); break; case MST_TRIANGLE_SOUP: out->data = ri.Hunk_Alloc( sizeof(srfBspSurface_t), h_low); break; case MST_PLANAR: out->data = ri.Hunk_Alloc( sizeof(srfBspSurface_t), h_low); break; case MST_FLARE: out->data = ri.Hunk_Alloc( sizeof(srfFlare_t), h_low); break; default: break; } } in = (void *)(fileBase + surfs->fileofs); out = s_worldData.surfaces; for ( i = 0 ; i < count ; i++, in++, out++ ) { switch ( LittleLong( in->surfaceType ) ) { case MST_PATCH: ParseMesh ( in, dv, hdrVertColors, out ); numMeshes++; break; case MST_TRIANGLE_SOUP: ParseTriSurf( in, dv, hdrVertColors, out, indexes ); numTriSurfs++; break; case MST_PLANAR: ParseFace( in, dv, hdrVertColors, out, indexes ); numFaces++; break; case MST_FLARE: ParseFlare( in, dv, out, indexes ); numFlares++; break; default: ri.Error( ERR_DROP, "Bad surfaceType" ); } } if (hdrVertColors) { ri.FS_FreeFile(hdrVertColors); } #ifdef PATCH_STITCHING R_StitchAllPatches(); #endif R_FixSharedVertexLodError(); #ifdef PATCH_STITCHING R_MovePatchSurfacesToHunk(); #endif ri.Printf( PRINT_ALL, "...loaded %d faces, %i meshes, %i trisurfs, %i flares\n", numFaces, numMeshes, numTriSurfs, numFlares ); } /* ================= R_LoadSubmodels ================= */ static void R_LoadSubmodels( const lump_t *l ) { const dmodel_t *in; bmodel_t *out; int i, j, count; in = (void *)(fileBase + l->fileofs); if (l->filelen % sizeof(*in)) ri.Error (ERR_DROP, "LoadMap: funny lump size in %s",s_worldData.name); count = l->filelen / sizeof(*in); s_worldData.numBModels = count; s_worldData.bmodels = out = ri.Hunk_Alloc( count * sizeof(*out), h_low ); for ( i=0 ; itype = MOD_BRUSH; model->bmodel = out; Com_sprintf( model->name, sizeof( model->name ), "*%d", i ); for (j=0 ; j<3 ; j++) { out->bounds[0][j] = LittleFloat (in->mins[j]); out->bounds[1][j] = LittleFloat (in->maxs[j]); } out->firstSurface = LittleLong( in->firstSurface ); out->numSurfaces = LittleLong( in->numSurfaces ); if(i == 0) { // Add this for limiting VAO surface creation s_worldData.numWorldSurfaces = out->numSurfaces; } } } //================================================================== /* ================= R_SetParent ================= */ static void R_SetParent (mnode_t *node, mnode_t *parent) { node->parent = parent; if (node->contents != -1) return; R_SetParent (node->children[0], node); R_SetParent (node->children[1], node); } /* ================= R_LoadNodesAndLeafs ================= */ static void R_LoadNodesAndLeafs (const lump_t *nodeLump, const lump_t *leafLump) { int i, j, p; const dnode_t *in; dleaf_t *inLeaf; mnode_t *out; int numNodes, numLeafs; in = (void *)(fileBase + nodeLump->fileofs); if (nodeLump->filelen % sizeof(dnode_t) || leafLump->filelen % sizeof(dleaf_t) ) { ri.Error (ERR_DROP, "LoadMap: funny lump size in %s",s_worldData.name); } numNodes = nodeLump->filelen / sizeof(dnode_t); numLeafs = leafLump->filelen / sizeof(dleaf_t); out = ri.Hunk_Alloc ( (numNodes + numLeafs) * sizeof(*out), h_low); s_worldData.nodes = out; s_worldData.numnodes = numNodes + numLeafs; s_worldData.numDecisionNodes = numNodes; // load nodes for ( i=0 ; imins[j] = LittleLong (in->mins[j]); out->maxs[j] = LittleLong (in->maxs[j]); } p = LittleLong(in->planeNum); out->plane = s_worldData.planes + p; out->contents = CONTENTS_NODE; // differentiate from leafs for (j=0 ; j<2 ; j++) { p = LittleLong (in->children[j]); if (p >= 0) out->children[j] = s_worldData.nodes + p; else out->children[j] = s_worldData.nodes + numNodes + (-1 - p); } } // load leafs inLeaf = (void *)(fileBase + leafLump->fileofs); for ( i=0 ; imins[j] = LittleLong (inLeaf->mins[j]); out->maxs[j] = LittleLong (inLeaf->maxs[j]); } out->cluster = LittleLong(inLeaf->cluster); out->area = LittleLong(inLeaf->area); if ( out->cluster >= s_worldData.numClusters ) { s_worldData.numClusters = out->cluster + 1; } out->firstmarksurface = LittleLong(inLeaf->firstLeafSurface); out->nummarksurfaces = LittleLong(inLeaf->numLeafSurfaces); } // chain descendants R_SetParent (s_worldData.nodes, NULL); } //============================================================================= /* ================= R_LoadShaders ================= */ static void R_LoadShaders( const lump_t *l ) { int i, count; dshader_t *in, *out; in = (void *)(fileBase + l->fileofs); if (l->filelen % sizeof(*in)) ri.Error (ERR_DROP, "LoadMap: funny lump size in %s",s_worldData.name); count = l->filelen / sizeof(*in); out = ri.Hunk_Alloc ( count*sizeof(*out), h_low ); s_worldData.shaders = out; s_worldData.numShaders = count; Com_Memcpy( out, in, count*sizeof(*out) ); for ( i=0 ; ifileofs); if (l->filelen % sizeof(*in)) ri.Error (ERR_DROP, "LoadMap: funny lump size in %s",s_worldData.name); count = l->filelen / sizeof(*in); out = ri.Hunk_Alloc ( count*sizeof(*out), h_low); s_worldData.marksurfaces = out; s_worldData.nummarksurfaces = count; for ( i=0 ; ifileofs); if (l->filelen % sizeof(*in)) ri.Error (ERR_DROP, "LoadMap: funny lump size in %s",s_worldData.name); count = l->filelen / sizeof(*in); out = ri.Hunk_Alloc ( count*2*sizeof(*out), h_low); s_worldData.planes = out; s_worldData.numplanes = count; for ( i=0 ; inormal[j] = LittleFloat (in->normal[j]); if (out->normal[j] < 0) { bits |= 1<dist = LittleFloat (in->dist); out->type = PlaneTypeForNormal( out->normal ); out->signbits = bits; } } /* ================= R_LoadFogs ================= */ static void R_LoadFogs( const lump_t *l, const lump_t *brushesLump, const lump_t *sidesLump ) { int i; fog_t *out; const dfog_t *fogs; const dbrush_t *brushes, *brush; const dbrushside_t *sides; int count, brushesCount, sidesCount; int sideNum; int planeNum; shader_t *shader; float d; int firstSide; fogs = (void *)(fileBase + l->fileofs); if (l->filelen % sizeof(*fogs)) { ri.Error (ERR_DROP, "LoadMap: funny lump size in %s",s_worldData.name); } count = l->filelen / sizeof(*fogs); // create fog structures for them s_worldData.numfogs = count + 1; s_worldData.fogs = ri.Hunk_Alloc ( s_worldData.numfogs*sizeof(*out), h_low); out = s_worldData.fogs + 1; if ( !count ) { return; } brushes = (void *)(fileBase + brushesLump->fileofs); if (brushesLump->filelen % sizeof(*brushes)) { ri.Error (ERR_DROP, "LoadMap: funny lump size in %s",s_worldData.name); } brushesCount = brushesLump->filelen / sizeof(*brushes); sides = (void *)(fileBase + sidesLump->fileofs); if (sidesLump->filelen % sizeof(*sides)) { ri.Error (ERR_DROP, "LoadMap: funny lump size in %s",s_worldData.name); } sidesCount = sidesLump->filelen / sizeof(*sides); for ( i=0 ; ioriginalBrushNumber = LittleLong( fogs->brushNum ); if ( (unsigned)out->originalBrushNumber >= brushesCount ) { ri.Error( ERR_DROP, "fog brushNumber out of range" ); } brush = brushes + out->originalBrushNumber; firstSide = LittleLong( brush->firstSide ); if ( (unsigned)firstSide > sidesCount - 6 ) { ri.Error( ERR_DROP, "fog brush sideNumber out of range" ); } // brushes are always sorted with the axial sides first sideNum = firstSide + 0; planeNum = LittleLong( sides[ sideNum ].planeNum ); out->bounds[0][0] = -s_worldData.planes[ planeNum ].dist; sideNum = firstSide + 1; planeNum = LittleLong( sides[ sideNum ].planeNum ); out->bounds[1][0] = s_worldData.planes[ planeNum ].dist; sideNum = firstSide + 2; planeNum = LittleLong( sides[ sideNum ].planeNum ); out->bounds[0][1] = -s_worldData.planes[ planeNum ].dist; sideNum = firstSide + 3; planeNum = LittleLong( sides[ sideNum ].planeNum ); out->bounds[1][1] = s_worldData.planes[ planeNum ].dist; sideNum = firstSide + 4; planeNum = LittleLong( sides[ sideNum ].planeNum ); out->bounds[0][2] = -s_worldData.planes[ planeNum ].dist; sideNum = firstSide + 5; planeNum = LittleLong( sides[ sideNum ].planeNum ); out->bounds[1][2] = s_worldData.planes[ planeNum ].dist; // get information from the shader for fog parameters shader = R_FindShader( fogs->shader, LIGHTMAP_NONE, qtrue ); out->parms = shader->fogParms; out->colorInt = ColorBytes4 ( shader->fogParms.color[0], shader->fogParms.color[1], shader->fogParms.color[2], 1.0 ); d = shader->fogParms.depthForOpaque < 1 ? 1 : shader->fogParms.depthForOpaque; out->tcScale = 1.0f / ( d * 8 ); // set the gradient vector sideNum = LittleLong( fogs->visibleSide ); if ( sideNum == -1 ) { out->hasSurface = qfalse; } else { out->hasSurface = qtrue; planeNum = LittleLong( sides[ firstSide + sideNum ].planeNum ); VectorSubtract( vec3_origin, s_worldData.planes[ planeNum ].normal, out->surface ); out->surface[3] = -s_worldData.planes[ planeNum ].dist; } out++; } } /* ================ R_LoadLightGrid ================ */ static void R_LoadLightGrid( const lump_t *l ) { int i; vec3_t maxs; int numGridPoints; world_t *w; float *wMins, *wMaxs; w = &s_worldData; w->lightGridInverseSize[0] = 1.0f / w->lightGridSize[0]; w->lightGridInverseSize[1] = 1.0f / w->lightGridSize[1]; w->lightGridInverseSize[2] = 1.0f / w->lightGridSize[2]; wMins = w->bmodels[0].bounds[0]; wMaxs = w->bmodels[0].bounds[1]; for ( i = 0 ; i < 3 ; i++ ) { w->lightGridOrigin[i] = w->lightGridSize[i] * ceil( wMins[i] / w->lightGridSize[i] ); maxs[i] = w->lightGridSize[i] * floor( wMaxs[i] / w->lightGridSize[i] ); w->lightGridBounds[i] = (maxs[i] - w->lightGridOrigin[i])/w->lightGridSize[i] + 1; } numGridPoints = w->lightGridBounds[0] * w->lightGridBounds[1] * w->lightGridBounds[2]; if ( l->filelen != numGridPoints * 8 ) { ri.Printf( PRINT_WARNING, "WARNING: light grid mismatch\n" ); w->lightGridData = NULL; return; } w->lightGridData = ri.Hunk_Alloc( l->filelen, h_low ); Com_Memcpy( w->lightGridData, (void *)(fileBase + l->fileofs), l->filelen ); // deal with overbright bits for ( i = 0 ; i < numGridPoints ; i++ ) { R_ColorShiftLightingBytes( &w->lightGridData[i*8], &w->lightGridData[i*8] ); R_ColorShiftLightingBytes( &w->lightGridData[i*8+3], &w->lightGridData[i*8+3] ); } // load hdr lightgrid if (r_hdr->integer) { char filename[MAX_QPATH]; float *hdrLightGrid; int size; Com_sprintf( filename, sizeof( filename ), "maps/%s/lightgrid.raw", s_worldData.baseName); //ri.Printf(PRINT_ALL, "looking for %s\n", filename); size = ri.FS_ReadFile(filename, (void **)&hdrLightGrid); if (hdrLightGrid) { //ri.Printf(PRINT_ALL, "found!\n"); if (size != sizeof(float) * 6 * numGridPoints) ri.Error(ERR_DROP, "Bad size for %s (%i, expected %i)!", filename, size, (int)(sizeof(float)) * 6 * numGridPoints); w->lightGrid16 = ri.Hunk_Alloc(sizeof(w->lightGrid16) * 6 * numGridPoints, h_low); for (i = 0; i < numGridPoints ; i++) { vec4_t c; c[0] = hdrLightGrid[i * 6]; c[1] = hdrLightGrid[i * 6 + 1]; c[2] = hdrLightGrid[i * 6 + 2]; c[3] = 1.0f; R_ColorShiftLightingFloats(c, c); ColorToRGB16(c, &w->lightGrid16[i * 6]); c[0] = hdrLightGrid[i * 6 + 3]; c[1] = hdrLightGrid[i * 6 + 4]; c[2] = hdrLightGrid[i * 6 + 5]; c[3] = 1.0f; R_ColorShiftLightingFloats(c, c); ColorToRGB16(c, &w->lightGrid16[i * 6 + 3]); } } else if (0) { // promote 8-bit lightgrid to 16-bit w->lightGrid16 = ri.Hunk_Alloc(sizeof(w->lightGrid16) * 6 * numGridPoints, h_low); for (i = 0; i < numGridPoints; i++) { w->lightGrid16[i * 6] = w->lightGridData[i * 8] * 257; w->lightGrid16[i * 6 + 1] = w->lightGridData[i * 8 + 1] * 257; w->lightGrid16[i * 6 + 2] = w->lightGridData[i * 8 + 2] * 257; w->lightGrid16[i * 6 + 3] = w->lightGridData[i * 8 + 3] * 257; w->lightGrid16[i * 6 + 4] = w->lightGridData[i * 8 + 4] * 257; w->lightGrid16[i * 6 + 5] = w->lightGridData[i * 8 + 5] * 257; } } if (hdrLightGrid) ri.FS_FreeFile(hdrLightGrid); } } /* ================ R_LoadEntities ================ */ static void R_LoadEntities( const lump_t *l ) { const char *p, *token, *s; char keyname[MAX_TOKEN_CHARS]; char value[MAX_TOKEN_CHARS]; world_t *w; w = &s_worldData; w->lightGridSize[0] = 64; w->lightGridSize[1] = 64; w->lightGridSize[2] = 128; p = (const char *)(fileBase + l->fileofs); // store for reference by the cgame w->entityString = ri.Hunk_Alloc( l->filelen + 1, h_low ); strcpy( w->entityString, p ); w->entityParsePoint = w->entityString; token = COM_ParseExt( &p, qtrue ); if (!*token || *token != '{') { return; } // only parse the world spawn while ( 1 ) { // parse key token = COM_ParseExt( &p, qtrue ); if ( !*token || *token == '}' ) { break; } Q_strncpyz(keyname, token, sizeof(keyname)); // parse value token = COM_ParseExt( &p, qtrue ); if ( !*token || *token == '}' ) { break; } Q_strncpyz(value, token, sizeof(value)); // check for remapping of shaders for vertex lighting s = "vertexremapshader"; if (!Q_strncmp(keyname, s, strlen(s)) ) { char *vs = strchr(value, ';'); if (!vs) { ri.Printf( PRINT_WARNING, "WARNING: no semi colon in vertexshaderremap '%s'\n", value ); break; } *vs++ = 0; if ( r_vertexLight->integer && tr.vertexLightingAllowed ) { R_RemapShader(value, s, "0"); } continue; } // check for remapping of shaders s = "remapshader"; if (!Q_strncmp(keyname, s, strlen(s)) ) { char *vs = strchr(value, ';'); if (!vs) { ri.Printf( PRINT_WARNING, "WARNING: no semi colon in shaderremap '%s'\n", value ); break; } *vs++ = 0; R_RemapShader(value, s, "0"); continue; } // check for a different grid size if (!Q_stricmp(keyname, "gridsize")) { sscanf(value, "%f %f %f", &w->lightGridSize[0], &w->lightGridSize[1], &w->lightGridSize[2] ); continue; } // check for auto exposure if (!Q_stricmp(keyname, "autoExposureMinMax")) { sscanf(value, "%f %f", &tr.autoExposureMinMax[0], &tr.autoExposureMinMax[1]); continue; } } } /* ================= R_GetEntityToken ================= */ qboolean R_GetEntityToken( char *buffer, int size ) { const char *s; s = COM_Parse( &s_worldData.entityParsePoint ); Q_strncpyz( buffer, s, size ); if ( !s_worldData.entityParsePoint && !s[0] ) { s_worldData.entityParsePoint = s_worldData.entityString; return qfalse; } else { return qtrue; } } #ifndef MAX_SPAWN_VARS #define MAX_SPAWN_VARS 64 #endif // derived from G_ParseSpawnVars() in g_spawn.c static qboolean R_ParseSpawnVars( char *spawnVarChars, int maxSpawnVarChars, int *numSpawnVars, const char *spawnVars[MAX_SPAWN_VARS][2] ) { char keyname[MAX_TOKEN_CHARS]; char com_token[MAX_TOKEN_CHARS]; int numSpawnVarChars = 0; *numSpawnVars = 0; // parse the opening brace if ( !R_GetEntityToken( com_token, sizeof( com_token ) ) ) { // end of spawn string return qfalse; } if ( com_token[0] != '{' ) { ri.Printf( PRINT_ALL, "R_ParseSpawnVars: found %s when expecting {\n",com_token ); return qfalse; } // go through all the key / value pairs while ( 1 ) { int keyLength, tokenLength; // parse key if ( !R_GetEntityToken( keyname, sizeof( keyname ) ) ) { ri.Printf( PRINT_ALL, "R_ParseSpawnVars: EOF without closing brace\n" ); return qfalse; } if ( keyname[0] == '}' ) { break; } // parse value if ( !R_GetEntityToken( com_token, sizeof( com_token ) ) ) { ri.Printf( PRINT_ALL, "R_ParseSpawnVars: EOF without closing brace\n" ); return qfalse; } if ( com_token[0] == '}' ) { ri.Printf( PRINT_ALL, "R_ParseSpawnVars: closing brace without data\n" ); return qfalse; } if ( *numSpawnVars == MAX_SPAWN_VARS ) { ri.Printf( PRINT_ALL, "R_ParseSpawnVars: MAX_SPAWN_VARS\n" ); return qfalse; } keyLength = strlen(keyname) + 1; tokenLength = strlen(com_token) + 1; if (numSpawnVarChars + keyLength + tokenLength > maxSpawnVarChars) { ri.Printf( PRINT_ALL, "R_ParseSpawnVars: MAX_SPAWN_VAR_CHARS\n" ); return qfalse; } strcpy(spawnVarChars + numSpawnVarChars, keyname); spawnVars[ *numSpawnVars ][0] = spawnVarChars + numSpawnVarChars; numSpawnVarChars += keyLength; strcpy(spawnVarChars + numSpawnVarChars, com_token); spawnVars[ *numSpawnVars ][1] = spawnVarChars + numSpawnVarChars; numSpawnVarChars += tokenLength; (*numSpawnVars)++; } return qtrue; } static void R_LoadEnvironmentJson(const char *baseName) { char filename[MAX_QPATH]; union { char *c; void *v; } buffer; char *bufferEnd; const char *cubemapArrayJson; int filelen, i; Com_sprintf(filename, MAX_QPATH, "cubemaps/%s/env.json", baseName); filelen = ri.FS_ReadFile(filename, &buffer.v); if (!buffer.c) return; bufferEnd = buffer.c + filelen; if (JSON_ValueGetType(buffer.c, bufferEnd) != JSONTYPE_OBJECT) { ri.Printf(PRINT_ALL, "Bad %s: does not start with a object\n", filename); ri.FS_FreeFile(buffer.v); return; } cubemapArrayJson = JSON_ObjectGetNamedValue(buffer.c, bufferEnd, "Cubemaps"); if (!cubemapArrayJson) { ri.Printf(PRINT_ALL, "Bad %s: no Cubemaps\n", filename); ri.FS_FreeFile(buffer.v); return; } if (JSON_ValueGetType(cubemapArrayJson, bufferEnd) != JSONTYPE_ARRAY) { ri.Printf(PRINT_ALL, "Bad %s: Cubemaps not an array\n", filename); ri.FS_FreeFile(buffer.v); return; } tr.numCubemaps = JSON_ArrayGetIndex(cubemapArrayJson, bufferEnd, NULL, 0); tr.cubemaps = ri.Hunk_Alloc(tr.numCubemaps * sizeof(*tr.cubemaps), h_low); memset(tr.cubemaps, 0, tr.numCubemaps * sizeof(*tr.cubemaps)); for (i = 0; i < tr.numCubemaps; i++) { cubemap_t *cubemap = &tr.cubemaps[i]; const char *cubemapJson, *keyValueJson, *indexes[3]; int j; cubemapJson = JSON_ArrayGetValue(cubemapArrayJson, bufferEnd, i); keyValueJson = JSON_ObjectGetNamedValue(cubemapJson, bufferEnd, "Name"); if (!JSON_ValueGetString(keyValueJson, bufferEnd, cubemap->name, MAX_QPATH)) cubemap->name[0] = '\0'; keyValueJson = JSON_ObjectGetNamedValue(cubemapJson, bufferEnd, "Position"); JSON_ArrayGetIndex(keyValueJson, bufferEnd, indexes, 3); for (j = 0; j < 3; j++) cubemap->origin[j] = JSON_ValueGetFloat(indexes[j], bufferEnd); cubemap->parallaxRadius = 1000.0f; keyValueJson = JSON_ObjectGetNamedValue(cubemapJson, bufferEnd, "Radius"); if (keyValueJson) cubemap->parallaxRadius = JSON_ValueGetFloat(keyValueJson, bufferEnd); } ri.FS_FreeFile(buffer.v); } static void R_LoadCubemapEntities(char *cubemapEntityName) { char spawnVarChars[2048]; int numSpawnVars; const char *spawnVars[MAX_SPAWN_VARS][2]; int numCubemaps = 0; // count cubemaps numCubemaps = 0; while(R_ParseSpawnVars(spawnVarChars, sizeof(spawnVarChars), &numSpawnVars, spawnVars)) { int i; for (i = 0; i < numSpawnVars; i++) { if (!Q_stricmp(spawnVars[i][0], "classname") && !Q_stricmp(spawnVars[i][1], cubemapEntityName)) numCubemaps++; } } if (!numCubemaps) return; tr.numCubemaps = numCubemaps; tr.cubemaps = ri.Hunk_Alloc(tr.numCubemaps * sizeof(*tr.cubemaps), h_low); memset(tr.cubemaps, 0, tr.numCubemaps * sizeof(*tr.cubemaps)); numCubemaps = 0; while(R_ParseSpawnVars(spawnVarChars, sizeof(spawnVarChars), &numSpawnVars, spawnVars)) { int i; char name[MAX_QPATH]; qboolean isCubemap = qfalse; qboolean originSet = qfalse; vec3_t origin; float parallaxRadius = 1000.0f; name[0] = '\0'; for (i = 0; i < numSpawnVars; i++) { if (!Q_stricmp(spawnVars[i][0], "classname") && !Q_stricmp(spawnVars[i][1], cubemapEntityName)) isCubemap = qtrue; if (!Q_stricmp(spawnVars[i][0], "name")) Q_strncpyz(name, spawnVars[i][1], sizeof(name)); if (!Q_stricmp(spawnVars[i][0], "origin")) { sscanf(spawnVars[i][1], "%f %f %f", &origin[0], &origin[1], &origin[2]); originSet = qtrue; } else if (!Q_stricmp(spawnVars[i][0], "radius")) { sscanf(spawnVars[i][1], "%f", ¶llaxRadius); } } if (isCubemap && originSet) { cubemap_t *cubemap = &tr.cubemaps[numCubemaps]; Q_strncpyz(cubemap->name, name, sizeof(cubemap->name)); VectorCopy(origin, cubemap->origin); cubemap->parallaxRadius = parallaxRadius; numCubemaps++; } } } static void R_AssignCubemapsToWorldSurfaces(void) { world_t *w; int i; w = &s_worldData; for (i = 0; i < w->numsurfaces; i++) { msurface_t *surf = &w->surfaces[i]; vec3_t surfOrigin; if (surf->cullinfo.type & CULLINFO_SPHERE) { VectorCopy(surf->cullinfo.localOrigin, surfOrigin); } else if (surf->cullinfo.type & CULLINFO_BOX) { surfOrigin[0] = (surf->cullinfo.bounds[0][0] + surf->cullinfo.bounds[1][0]) * 0.5f; surfOrigin[1] = (surf->cullinfo.bounds[0][1] + surf->cullinfo.bounds[1][1]) * 0.5f; surfOrigin[2] = (surf->cullinfo.bounds[0][2] + surf->cullinfo.bounds[1][2]) * 0.5f; } else { //ri.Printf(PRINT_ALL, "surface %d has no cubemap\n", i); continue; } surf->cubemapIndex = R_CubemapForPoint(surfOrigin); //ri.Printf(PRINT_ALL, "surface %d has cubemap %d\n", i, surf->cubemapIndex); } } static void R_LoadCubemaps(void) { int i; imgFlags_t flags = IMGFLAG_CLAMPTOEDGE | IMGFLAG_MIPMAP | IMGFLAG_NOLIGHTSCALE | IMGFLAG_CUBEMAP; for (i = 0; i < tr.numCubemaps; i++) { char filename[MAX_QPATH]; cubemap_t *cubemap = &tr.cubemaps[i]; Com_sprintf(filename, MAX_QPATH, "cubemaps/%s/%03d.dds", tr.world->baseName, i); cubemap->image = R_FindImageFile(filename, IMGTYPE_COLORALPHA, flags); } } static void R_RenderMissingCubemaps(void) { int i, j; imgFlags_t flags = IMGFLAG_NO_COMPRESSION | IMGFLAG_CLAMPTOEDGE | IMGFLAG_MIPMAP | IMGFLAG_NOLIGHTSCALE | IMGFLAG_CUBEMAP; for (i = 0; i < tr.numCubemaps; i++) { if (!tr.cubemaps[i].image) { tr.cubemaps[i].image = R_CreateImage(va("*cubeMap%d", i), NULL, r_cubemapSize->integer, r_cubemapSize->integer, IMGTYPE_COLORALPHA, flags, GL_RGBA8); for (j = 0; j < 6; j++) { RE_ClearScene(); R_RenderCubemapSide(i, j, qfalse); R_IssuePendingRenderCommands(); R_InitNextFrame(); } } } } static void R_CalcVertexLightDirs( void ) { int i, k; msurface_t *surface; for(k = 0, surface = &s_worldData.surfaces[0]; k < s_worldData.numsurfaces /* s_worldData.numWorldSurfaces */; k++, surface++) { srfBspSurface_t *bspSurf = (srfBspSurface_t *) surface->data; switch(bspSurf->surfaceType) { case SF_FACE: case SF_GRID: case SF_TRIANGLES: for(i = 0; i < bspSurf->numVerts; i++) { vec3_t lightDir; vec3_t normal; R_VaoUnpackNormal(normal, bspSurf->verts[i].normal); R_LightDirForPoint( bspSurf->verts[i].xyz, lightDir, normal, &s_worldData ); R_VaoPackNormal(bspSurf->verts[i].lightdir, lightDir); } break; default: break; } } } /* ================= RE_LoadWorldMap Called directly from cgame ================= */ void RE_LoadWorldMap( const char *name ) { int i; dheader_t *header; union { byte *b; void *v; } buffer; byte *startMarker; if ( tr.worldMapLoaded ) { ri.Error( ERR_DROP, "ERROR: attempted to redundantly load world map" ); } // set default map light scale tr.sunShadowScale = 0.5f; // set default sun direction to be used if it isn't // overridden by a shader tr.sunDirection[0] = 0.45f; tr.sunDirection[1] = 0.3f; tr.sunDirection[2] = 0.9f; VectorNormalize( tr.sunDirection ); // set default autoexposure settings tr.autoExposureMinMax[0] = -2.0f; tr.autoExposureMinMax[1] = 2.0f; // set default tone mapping settings tr.toneMinAvgMaxLevel[0] = -8.0f; tr.toneMinAvgMaxLevel[1] = -2.0f; tr.toneMinAvgMaxLevel[2] = 0.0f; // reset last cascade sun direction so last shadow cascade is rerendered VectorClear(tr.lastCascadeSunDirection); tr.worldMapLoaded = qtrue; // load it ri.FS_ReadFile( name, &buffer.v ); if ( !buffer.b ) { ri.Error (ERR_DROP, "RE_LoadWorldMap: %s not found", name); } // clear tr.world so if the level fails to load, the next // try will not look at the partially loaded version tr.world = NULL; Com_Memset( &s_worldData, 0, sizeof( s_worldData ) ); Q_strncpyz( s_worldData.name, name, sizeof( s_worldData.name ) ); Q_strncpyz( s_worldData.baseName, COM_SkipPath( s_worldData.name ), sizeof( s_worldData.name ) ); COM_StripExtension(s_worldData.baseName, s_worldData.baseName, sizeof(s_worldData.baseName)); startMarker = ri.Hunk_Alloc(0, h_low); c_gridVerts = 0; header = (dheader_t *)buffer.b; fileBase = (byte *)header; i = LittleLong (header->version); if ( i != BSP_VERSION ) { ri.Error (ERR_DROP, "RE_LoadWorldMap: %s has wrong version number (%i should be %i)", name, i, BSP_VERSION); } // swap all the lumps for (i=0 ; ilumps[LUMP_ENTITIES] ); R_LoadShaders( &header->lumps[LUMP_SHADERS] ); R_LoadLightmaps( &header->lumps[LUMP_LIGHTMAPS], &header->lumps[LUMP_SURFACES] ); R_LoadPlanes (&header->lumps[LUMP_PLANES]); R_LoadFogs( &header->lumps[LUMP_FOGS], &header->lumps[LUMP_BRUSHES], &header->lumps[LUMP_BRUSHSIDES] ); R_LoadSurfaces( &header->lumps[LUMP_SURFACES], &header->lumps[LUMP_DRAWVERTS], &header->lumps[LUMP_DRAWINDEXES] ); R_LoadMarksurfaces (&header->lumps[LUMP_LEAFSURFACES]); R_LoadNodesAndLeafs (&header->lumps[LUMP_NODES], &header->lumps[LUMP_LEAFS]); R_LoadSubmodels (&header->lumps[LUMP_MODELS]); R_LoadVisibility( &header->lumps[LUMP_VISIBILITY] ); R_LoadLightGrid( &header->lumps[LUMP_LIGHTGRID] ); // determine vertex light directions R_CalcVertexLightDirs(); // determine which parts of the map are in sunlight if (0) { world_t *w; uint8_t *primaryLightGrid, *data; int lightGridSize; int i; w = &s_worldData; lightGridSize = w->lightGridBounds[0] * w->lightGridBounds[1] * w->lightGridBounds[2]; primaryLightGrid = ri.Malloc(lightGridSize * sizeof(*primaryLightGrid)); memset(primaryLightGrid, 0, lightGridSize * sizeof(*primaryLightGrid)); data = w->lightGridData; for (i = 0; i < lightGridSize; i++, data += 8) { int lat, lng; vec3_t gridLightDir, gridLightCol; // skip samples in wall if (!(data[0]+data[1]+data[2]+data[3]+data[4]+data[5]) ) continue; gridLightCol[0] = ByteToFloat(data[3]); gridLightCol[1] = ByteToFloat(data[4]); gridLightCol[2] = ByteToFloat(data[5]); (void)gridLightCol; // Suppress unused-but-set-variable warning lat = data[7]; lng = data[6]; lat *= (FUNCTABLE_SIZE/256); lng *= (FUNCTABLE_SIZE/256); // decode X as cos( lat ) * sin( long ) // decode Y as sin( lat ) * sin( long ) // decode Z as cos( long ) gridLightDir[0] = tr.sinTable[(lat+(FUNCTABLE_SIZE/4))&FUNCTABLE_MASK] * tr.sinTable[lng]; gridLightDir[1] = tr.sinTable[lat] * tr.sinTable[lng]; gridLightDir[2] = tr.sinTable[(lng+(FUNCTABLE_SIZE/4))&FUNCTABLE_MASK]; // FIXME: magic number for determining if light direction is close enough to sunlight if (DotProduct(gridLightDir, tr.sunDirection) > 0.75f) { primaryLightGrid[i] = 1; } else { primaryLightGrid[i] = 255; } } if (0) { int i; byte *buffer = ri.Malloc(w->lightGridBounds[0] * w->lightGridBounds[1] * 3 + 18); byte *out; uint8_t *in; char fileName[MAX_QPATH]; Com_Memset (buffer, 0, 18); buffer[2] = 2; // uncompressed type buffer[12] = w->lightGridBounds[0] & 255; buffer[13] = w->lightGridBounds[0] >> 8; buffer[14] = w->lightGridBounds[1] & 255; buffer[15] = w->lightGridBounds[1] >> 8; buffer[16] = 24; // pixel size in = primaryLightGrid; for (i = 0; i < w->lightGridBounds[2]; i++) { int j; sprintf(fileName, "primarylg%d.tga", i); out = buffer + 18; for (j = 0; j < w->lightGridBounds[0] * w->lightGridBounds[1]; j++) { if (*in == 1) { *out++ = 255; *out++ = 255; *out++ = 255; } else if (*in == 255) { *out++ = 64; *out++ = 64; *out++ = 64; } else { *out++ = 0; *out++ = 0; *out++ = 0; } in++; } ri.FS_WriteFile(fileName, buffer, w->lightGridBounds[0] * w->lightGridBounds[1] * 3 + 18); } ri.Free(buffer); } for (i = 0; i < w->numWorldSurfaces; i++) { msurface_t *surf = w->surfaces + i; cullinfo_t *ci = &surf->cullinfo; if(ci->type & CULLINFO_PLANE) { if (DotProduct(ci->plane.normal, tr.sunDirection) <= 0.0f) { //ri.Printf(PRINT_ALL, "surface %d is not oriented towards sunlight\n", i); continue; } } if(ci->type & CULLINFO_BOX) { int ibounds[2][3], x, y, z, goodSamples, numSamples; vec3_t lightOrigin; VectorSubtract( ci->bounds[0], w->lightGridOrigin, lightOrigin ); ibounds[0][0] = floor(lightOrigin[0] * w->lightGridInverseSize[0]); ibounds[0][1] = floor(lightOrigin[1] * w->lightGridInverseSize[1]); ibounds[0][2] = floor(lightOrigin[2] * w->lightGridInverseSize[2]); VectorSubtract( ci->bounds[1], w->lightGridOrigin, lightOrigin ); ibounds[1][0] = ceil(lightOrigin[0] * w->lightGridInverseSize[0]); ibounds[1][1] = ceil(lightOrigin[1] * w->lightGridInverseSize[1]); ibounds[1][2] = ceil(lightOrigin[2] * w->lightGridInverseSize[2]); ibounds[0][0] = CLAMP(ibounds[0][0], 0, w->lightGridSize[0]); ibounds[0][1] = CLAMP(ibounds[0][1], 0, w->lightGridSize[1]); ibounds[0][2] = CLAMP(ibounds[0][2], 0, w->lightGridSize[2]); ibounds[1][0] = CLAMP(ibounds[1][0], 0, w->lightGridSize[0]); ibounds[1][1] = CLAMP(ibounds[1][1], 0, w->lightGridSize[1]); ibounds[1][2] = CLAMP(ibounds[1][2], 0, w->lightGridSize[2]); /* ri.Printf(PRINT_ALL, "surf %d bounds (%f %f %f)-(%f %f %f) ibounds (%d %d %d)-(%d %d %d)\n", i, ci->bounds[0][0], ci->bounds[0][1], ci->bounds[0][2], ci->bounds[1][0], ci->bounds[1][1], ci->bounds[1][2], ibounds[0][0], ibounds[0][1], ibounds[0][2], ibounds[1][0], ibounds[1][1], ibounds[1][2]); */ goodSamples = 0; numSamples = 0; for (x = ibounds[0][0]; x <= ibounds[1][0]; x++) { for (y = ibounds[0][1]; y <= ibounds[1][1]; y++) { for (z = ibounds[0][2]; z <= ibounds[1][2]; z++) { uint8_t primaryLight = primaryLightGrid[x * 8 + y * 8 * w->lightGridBounds[0] + z * 8 * w->lightGridBounds[0] * w->lightGridBounds[2]]; if (primaryLight == 0) continue; numSamples++; if (primaryLight == 1) goodSamples++; } } } // FIXME: magic number for determining whether object is mostly in sunlight if (goodSamples > numSamples * 0.75f) { //ri.Printf(PRINT_ALL, "surface %d is in sunlight\n", i); //surf->primaryLight = 1; } } } ri.Free(primaryLightGrid); } // load cubemaps if (r_cubeMapping->integer) { // Try loading an env.json file first R_LoadEnvironmentJson(s_worldData.baseName); if (!tr.numCubemaps) { R_LoadCubemapEntities("misc_cubemap"); } if (!tr.numCubemaps) { // use deathmatch spawn points as cubemaps R_LoadCubemapEntities("info_player_deathmatch"); } if (tr.numCubemaps) { R_AssignCubemapsToWorldSurfaces(); } } s_worldData.dataSize = (byte *)ri.Hunk_Alloc(0, h_low) - startMarker; // only set tr.world now that we know the entire level has loaded properly tr.world = &s_worldData; // make sure the VAO glState entry is safe R_BindNullVao(); // Render or load all cubemaps if (r_cubeMapping->integer && tr.numCubemaps && glRefConfig.framebufferObject) { R_LoadCubemaps(); R_RenderMissingCubemaps(); } ri.FS_FreeFile( buffer.v ); }