diff --git a/code/renderer/tr_init.c b/code/renderer/tr_init.c index c27afecd..57c4a24b 100644 --- a/code/renderer/tr_init.c +++ b/code/renderer/tr_init.c @@ -143,6 +143,8 @@ cvar_t *r_debugSort; cvar_t *r_printShaders; cvar_t *r_saveFontData; +cvar_t *r_GLlibCoolDownMsec; + cvar_t *r_maxpolys; int max_polys; cvar_t *r_maxpolyverts; @@ -988,6 +990,8 @@ void R_Register( void ) r_maxpolys = ri.Cvar_Get( "r_maxpolys", va("%d", MAX_POLYS), 0); r_maxpolyverts = ri.Cvar_Get( "r_maxpolyverts", va("%d", MAX_POLYVERTS), 0); + r_GLlibCoolDownMsec = ri.Cvar_Get( "r_GLlibCoolDownMsec", "0", CVAR_ARCHIVE ); + // make sure all the commands added here are also // removed in R_Shutdown ri.Cmd_AddCommand( "imagelist", R_ImageList_f ); diff --git a/code/renderer/tr_local.h b/code/renderer/tr_local.h index e29feba6..e9de83ad 100644 --- a/code/renderer/tr_local.h +++ b/code/renderer/tr_local.h @@ -1091,6 +1091,8 @@ extern cvar_t *r_debugSort; extern cvar_t *r_printShaders; extern cvar_t *r_saveFontData; +extern cvar_t *r_GLlibCoolDownMsec; + //==================================================================== float R_NoiseGet4f( float x, float y, float z, float t ); diff --git a/code/unix/linux_qgl.c b/code/unix/linux_qgl.c index b38edbcb..d4841c27 100644 --- a/code/unix/linux_qgl.c +++ b/code/unix/linux_qgl.c @@ -2651,6 +2651,24 @@ void QGL_Shutdown( void ) { if ( glw_state.OpenGLLib ) { + // 25/09/05 Tim Angus + // Certain combinations of hardware and software, specifically + // Linux/SMP/Nvidia/agpgart (OK, OK. MY combination of hardware and + // software), seem to cause a catastrophic (hard reboot required) crash + // when libGL is dynamically unloaded. I'm unsure of the precise cause, + // suffice to say I don't see anything in the Q3 code that could cause it. + // I suspect it's an Nvidia driver bug, but without the source or means to + // debug I obviously can't prove (or disprove) this. Interestingly (though + // perhaps not suprisingly), Enemy Territory and Doom 3 both exhibit the + // same problem. + // + // After many, many reboots and prodding here and there, it seems that a + // placing a short delay before libGL is unloaded works around the problem. + // This delay is changable via the r_GLlibCoolDownMsec cvar (nice name + // huh?), and it defaults to 0. For me, 500 seems to work. + if( r_GLlibCoolDownMsec->integer ) + usleep( r_GLlibCoolDownMsec->integer * 1000 ); + dlclose ( glw_state.OpenGLLib ); glw_state.OpenGLLib = NULL; }