* AVI video output

- Uses motion jpeg codec by default
  - Use cl_avidemo to set a framerate
  - \video [filename] to start capture
  - \stopvideo to stop capture
  - Audio capture is a bit ropey
This commit is contained in:
Tim Angus 2006-01-04 03:12:12 +00:00
parent 92ad3e99dc
commit a21eb2bbcb
15 changed files with 910 additions and 11 deletions

619
code/client/cl_avi.c Normal file
View file

@ -0,0 +1,619 @@
/*
===========================================================================
Copyright (C) 2005-2006 Tim Angus
This file is part of Quake III Arena source code.
Quake III Arena source code is free software; you can redistribute it
and/or modify it under the terms of the GNU General Public License as
published by the Free Software Foundation; either version 2 of the License,
or (at your option) any later version.
Quake III Arena source code is distributed in the hope that it will be
useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Quake III Arena source code; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
===========================================================================
*/
#include "client.h"
#include "snd_local.h"
#define MAX_RIFF_CHUNKS 16
typedef struct audioFormat_s
{
int rate;
int format;
int channels;
int bits;
int sampleSize;
int totalBytes;
} audioFormat_t;
typedef struct aviFileData_s
{
qboolean fileOpen;
fileHandle_t f;
char fileName[ MAX_QPATH ];
int fileSize;
int moviOffset;
int moviSize;
fileHandle_t idxF;
int numIndices;
int frameRate;
int framePeriod;
int width, height;
int numVideoFrames;
int maxRecordSize;
qboolean motionJpeg;
qboolean audio;
audioFormat_t a;
int numAudioFrames;
int chunkStack[ MAX_RIFF_CHUNKS ];
int chunkStackTop;
byte *cBuffer, *eBuffer;
} aviFileData_t;
static aviFileData_t afd;
#define MAX_AVI_BUFFER 2048
static byte buffer[ MAX_AVI_BUFFER ];
static int bufIndex;
/*
===============
SafeFS_Write
===============
*/
static ID_INLINE void SafeFS_Write( const void *buffer, int len, fileHandle_t f )
{
if( FS_Write( buffer, len, f ) < len )
Com_Error( ERR_DROP, "Failed to write avi file\n" );
}
/*
===============
WRITE_STRING
===============
*/
static ID_INLINE void WRITE_STRING( const char *s )
{
Com_Memcpy( &buffer[ bufIndex ], s, strlen( s ) );
bufIndex += strlen( s );
}
/*
===============
WRITE_4BYTES
===============
*/
static ID_INLINE void WRITE_4BYTES( int x )
{
buffer[ bufIndex + 0 ] = (byte)( ( x >> 0 ) & 0xFF );
buffer[ bufIndex + 1 ] = (byte)( ( x >> 8 ) & 0xFF );
buffer[ bufIndex + 2 ] = (byte)( ( x >> 16 ) & 0xFF );
buffer[ bufIndex + 3 ] = (byte)( ( x >> 24 ) & 0xFF );
bufIndex += 4;
}
/*
===============
WRITE_2BYTES
===============
*/
static ID_INLINE void WRITE_2BYTES( int x )
{
buffer[ bufIndex + 0 ] = (byte)( ( x >> 0 ) & 0xFF );
buffer[ bufIndex + 1 ] = (byte)( ( x >> 8 ) & 0xFF );
bufIndex += 2;
}
/*
===============
WRITE_1BYTES
===============
*/
static ID_INLINE void WRITE_1BYTES( int x )
{
buffer[ bufIndex ] = x;
bufIndex += 1;
}
/*
===============
START_CHUNK
===============
*/
static ID_INLINE void START_CHUNK( const char *s )
{
if( afd.chunkStackTop == MAX_RIFF_CHUNKS )
{
Com_Error( ERR_DROP, "ERROR: Top of chunkstack breached\n" );
}
afd.chunkStack[ afd.chunkStackTop ] = bufIndex;
afd.chunkStackTop++;
WRITE_STRING( s );
WRITE_4BYTES( 0 );
}
/*
===============
END_CHUNK
===============
*/
static ID_INLINE void END_CHUNK( void )
{
int endIndex = bufIndex;
if( afd.chunkStackTop <= 0 )
{
Com_Error( ERR_DROP, "ERROR: Bottom of chunkstack breached\n" );
}
afd.chunkStackTop--;
bufIndex = afd.chunkStack[ afd.chunkStackTop ];
bufIndex += 4;
WRITE_4BYTES( endIndex - bufIndex - 1 );
bufIndex = endIndex;
bufIndex = PAD( bufIndex, 2 );
}
/*
===============
CL_WriteAVIHeader
===============
*/
void CL_WriteAVIHeader( void )
{
bufIndex = 0;
afd.chunkStackTop = 0;
START_CHUNK( "RIFF" );
{
WRITE_STRING( "AVI " );
{
START_CHUNK( "LIST" );
{
WRITE_STRING( "hdrl" );
WRITE_STRING( "avih" );
WRITE_4BYTES( 56 ); //"avih" "chunk" size
WRITE_4BYTES( afd.framePeriod ); //dwMicroSecPerFrame
WRITE_4BYTES( afd.maxRecordSize *
afd.frameRate ); //dwMaxBytesPerSec
WRITE_4BYTES( 0 ); //dwReserved1
WRITE_4BYTES( 0x110 ); //dwFlags bits HAS_INDEX and IS_INTERLEAVED
WRITE_4BYTES( afd.numVideoFrames ); //dwTotalFrames
WRITE_4BYTES( 0 ); //dwInitialFrame
if( afd.audio ) //dwStreams
WRITE_4BYTES( 2 );
else
WRITE_4BYTES( 1 );
WRITE_4BYTES( afd.maxRecordSize ); //dwSuggestedBufferSize
WRITE_4BYTES( afd.width ); //dwWidth
WRITE_4BYTES( afd.height ); //dwHeight
WRITE_4BYTES( 0 ); //dwReserved[ 0 ]
WRITE_4BYTES( 0 ); //dwReserved[ 1 ]
WRITE_4BYTES( 0 ); //dwReserved[ 2 ]
WRITE_4BYTES( 0 ); //dwReserved[ 3 ]
START_CHUNK( "LIST" );
{
WRITE_STRING( "strl" );
WRITE_STRING( "strh" );
WRITE_4BYTES( 56 ); //"strh" "chunk" size
WRITE_STRING( "vids" );
if( afd.motionJpeg )
WRITE_STRING( "MJPG" );
else
WRITE_STRING( " BGR" );
WRITE_4BYTES( 0 ); //dwFlags
WRITE_4BYTES( 0 ); //dwPriority
WRITE_4BYTES( 0 ); //dwInitialFrame
WRITE_4BYTES( 1 ); //dwTimescale
WRITE_4BYTES( afd.frameRate ); //dwDataRate
WRITE_4BYTES( 0 ); //dwStartTime
WRITE_4BYTES( afd.numVideoFrames ); //dwDataLength
WRITE_4BYTES( afd.maxRecordSize ); //dwSuggestedBufferSize
WRITE_4BYTES( -1 ); //dwQuality
WRITE_4BYTES( 0 ); //dwSampleSize
WRITE_2BYTES( 0 ); //rcFrame
WRITE_2BYTES( 0 ); //rcFrame
WRITE_2BYTES( afd.width ); //rcFrame
WRITE_2BYTES( afd.height ); //rcFrame
WRITE_STRING( "strf" );
WRITE_4BYTES( 40 ); //"strf" "chunk" size
WRITE_4BYTES( 40 ); //biSize
WRITE_4BYTES( afd.width ); //biWidth
WRITE_4BYTES( afd.height ); //biHeight
WRITE_2BYTES( 1 ); //biPlanes
WRITE_2BYTES( 24 ); //biBitCount
if( afd.motionJpeg ) //biCompression
WRITE_STRING( "MJPG" );
else
WRITE_STRING( " BGR" );
WRITE_4BYTES( afd.width *
afd.height ); //biSizeImage
WRITE_4BYTES( 0 ); //biXPelsPetMeter
WRITE_4BYTES( 0 ); //biYPelsPetMeter
WRITE_4BYTES( 0 ); //biClrUsed
WRITE_4BYTES( 0 ); //biClrImportant
}
END_CHUNK( );
if( afd.audio )
{
START_CHUNK( "LIST" );
{
WRITE_STRING( "strl" );
WRITE_STRING( "strh" );
WRITE_4BYTES( 56 ); //"strh" "chunk" size
WRITE_STRING( "auds" );
WRITE_4BYTES( 0 ); //FCC
WRITE_4BYTES( 0 ); //dwFlags
WRITE_4BYTES( 0 ); //dwPriority
WRITE_4BYTES( 0 ); //dwInitialFrame
WRITE_4BYTES( afd.a.sampleSize ); //dwTimescale
WRITE_4BYTES( afd.a.sampleSize *
afd.a.rate ); //dwDataRate
WRITE_4BYTES( 0 ); //dwStartTime
WRITE_4BYTES( afd.a.totalBytes /
afd.a.sampleSize ); //dwDataLength
WRITE_4BYTES( 0 ); //dwSuggestedBufferSize
WRITE_4BYTES( -1 ); //dwQuality
WRITE_4BYTES( afd.a.sampleSize ); //dwSampleSize
WRITE_2BYTES( 0 ); //rcFrame
WRITE_2BYTES( 0 ); //rcFrame
WRITE_2BYTES( 0 ); //rcFrame
WRITE_2BYTES( 0 ); //rcFrame
WRITE_STRING( "strf" );
WRITE_4BYTES( 18 ); //"strf" "chunk" size
WRITE_2BYTES( afd.a.format ); //wFormatTag
WRITE_2BYTES( afd.a.channels ); //nChannels
WRITE_4BYTES( afd.a.rate ); //nSamplesPerSec
WRITE_4BYTES( afd.a.sampleSize *
afd.a.rate ); //nAvgBytesPerSec
WRITE_2BYTES( afd.a.sampleSize ); //nBlockAlign
WRITE_2BYTES( afd.a.bits ); //wBitsPerSample
WRITE_2BYTES( 0 ); //cbSize
}
END_CHUNK( );
}
}
END_CHUNK( );
afd.moviOffset = bufIndex;
START_CHUNK( "LIST" );
{
WRITE_STRING( "movi" );
}
}
}
}
/*
===============
CL_OpenAVIForWriting
Creates an AVI file and gets it into a state where
writing the actual data can begin
===============
*/
qboolean CL_OpenAVIForWriting( const char *fileName )
{
if( afd.fileOpen )
return qfalse;
Com_Memset( &afd, 0, sizeof( aviFileData_t ) );
// Don't start if a framerate has not been chosen
if( cl_avidemo->integer <= 0 )
{
Com_Printf( S_COLOR_RED "cl_avidemo must be >= 1\n" );
return qfalse;
}
if( ( afd.f = FS_FOpenFileWrite( fileName ) ) <= 0 )
return qfalse;
if( ( afd.idxF = FS_FOpenFileWrite( va( "%s.idx", fileName ) ) ) <= 0 )
{
FS_FCloseFile( afd.f );
return qfalse;
}
Q_strncpyz( afd.fileName, fileName, MAX_QPATH );
afd.frameRate = cl_avidemo->integer;
afd.framePeriod = (int)( 1000000.0f / afd.frameRate );
afd.width = cls.glconfig.vidWidth;
afd.height = cls.glconfig.vidHeight;
if( cl_aviMotionJpeg->integer )
afd.motionJpeg = qtrue;
else
afd.motionJpeg = qfalse;
afd.cBuffer = Z_Malloc( afd.width * afd.height * 4 );
afd.eBuffer = Z_Malloc( afd.width * afd.height * 4 );
afd.a.rate = dma.speed;
afd.a.format = WAV_FORMAT_PCM;
afd.a.channels = dma.channels;
afd.a.bits = dma.samplebits;
afd.a.sampleSize = ( afd.a.bits / 8 ) * afd.a.channels;
if( afd.a.rate % afd.frameRate )
{
int suggestRate = afd.frameRate;
while( ( afd.a.rate % suggestRate ) && suggestRate >= 1 )
suggestRate--;
Com_Printf( S_COLOR_YELLOW "WARNING: cl_avidemo is not a divisor "
"of the audio rate, suggest %d\n", suggestRate );
}
if( !Cvar_VariableIntegerValue( "s_initsound" ) )
{
afd.audio = qfalse;
}
else if( Q_stricmp( Cvar_VariableString( "s_backend" ), "OpenAL" ) )
{
if( afd.a.bits == 16 && afd.a.channels == 2 )
afd.audio = qtrue;
else
afd.audio = qfalse; //FIXME: audio not implemented for this case
}
else
{
afd.audio = qfalse;
Com_Printf( S_COLOR_YELLOW "WARNING: Audio capture is not supported "
"with OpenAL. Set s_useOpenAL to 0 for audio capture\n" );
}
// This doesn't write a real header, but allocates the
// correct amount of space at the beginning of the file
CL_WriteAVIHeader( );
SafeFS_Write( buffer, bufIndex, afd.f );
afd.fileSize = bufIndex;
bufIndex = 0;
START_CHUNK( "idx1" );
SafeFS_Write( buffer, bufIndex, afd.idxF );
afd.moviSize = 4; // For the "movi"
afd.fileOpen = qtrue;
return qtrue;
}
/*
===============
CL_WriteAVIVideoFrame
===============
*/
void CL_WriteAVIVideoFrame( const byte *imageBuffer, int size )
{
int chunkOffset = afd.fileSize - afd.moviOffset - 8;
int chunkSize = 8 + size;
int paddingSize = PAD( size, 2 ) - size;
byte padding[ 4 ] = { 0 };
if( !afd.fileOpen )
return;
bufIndex = 0;
WRITE_STRING( "00dc" );
WRITE_4BYTES( size );
SafeFS_Write( buffer, 8, afd.f );
SafeFS_Write( imageBuffer, size, afd.f );
SafeFS_Write( padding, paddingSize, afd.f );
afd.fileSize += ( chunkSize + paddingSize );
afd.numVideoFrames++;
afd.moviSize += ( chunkSize + paddingSize );
if( size > afd.maxRecordSize )
afd.maxRecordSize = size;
// Index
bufIndex = 0;
WRITE_STRING( "00dc" ); //dwIdentifier
WRITE_4BYTES( 0 ); //dwFlags
WRITE_4BYTES( chunkOffset ); //dwOffset
WRITE_4BYTES( size ); //dwLength
SafeFS_Write( buffer, 16, afd.idxF );
afd.numIndices++;
}
#define PCM_BUFFER_SIZE 44100
/*
===============
CL_WriteAVIAudioFrame
===============
*/
void CL_WriteAVIAudioFrame( const byte *pcmBuffer, int size )
{
static byte pcmCaptureBuffer[ PCM_BUFFER_SIZE ] = { 0 };
static int bytesInBuffer = 0;
if( !afd.audio )
return;
if( !afd.fileOpen )
return;
if( bytesInBuffer + size > PCM_BUFFER_SIZE )
{
Com_Printf( S_COLOR_YELLOW
"WARNING: Audio capture buffer overflow -- truncating\n" );
size = PCM_BUFFER_SIZE - bytesInBuffer;
}
Com_Memcpy( &pcmCaptureBuffer[ bytesInBuffer ], pcmBuffer, size );
bytesInBuffer += size;
// Only write if we have a frame's worth of audio
if( bytesInBuffer >= (int)ceil( afd.a.rate / cl_avidemo->value ) *
afd.a.sampleSize )
{
int chunkOffset = afd.fileSize - afd.moviOffset - 8;
int chunkSize = 8 + bytesInBuffer;
int paddingSize = PAD( bytesInBuffer, 2 ) - bytesInBuffer;
byte padding[ 4 ] = { 0 };
bufIndex = 0;
WRITE_STRING( "01wb" );
WRITE_4BYTES( bytesInBuffer );
SafeFS_Write( buffer, 8, afd.f );
SafeFS_Write( pcmBuffer, bytesInBuffer, afd.f );
SafeFS_Write( padding, paddingSize, afd.f );
afd.fileSize += ( chunkSize + paddingSize );
afd.numAudioFrames++;
afd.moviSize += ( chunkSize + paddingSize );
afd.a.totalBytes =+ bytesInBuffer;
// Index
bufIndex = 0;
WRITE_STRING( "01wb" ); //dwIdentifier
WRITE_4BYTES( 0 ); //dwFlags
WRITE_4BYTES( chunkOffset ); //dwOffset
WRITE_4BYTES( bytesInBuffer ); //dwLength
SafeFS_Write( buffer, 16, afd.idxF );
afd.numIndices++;
bytesInBuffer = 0;
}
}
/*
===============
CL_TakeVideoFrame
===============
*/
void CL_TakeVideoFrame( void )
{
// AVI file isn't open
if( !afd.fileOpen )
return;
re.TakeVideoFrame( afd.width, afd.height,
afd.cBuffer, afd.eBuffer, afd.motionJpeg );
}
/*
===============
CL_CloseAVI
Closes the AVI file and writes an index chunk
===============
*/
qboolean CL_CloseAVI( void )
{
int indexRemainder;
int indexSize = afd.numIndices * 16;
const char *idxFileName = va( "%s.idx", afd.fileName );
// AVI file isn't open
if( !afd.fileOpen )
return qfalse;
afd.fileOpen = qfalse;
FS_Seek( afd.idxF, 4, FS_SEEK_SET );
bufIndex = 0;
WRITE_4BYTES( indexSize );
SafeFS_Write( buffer, bufIndex, afd.idxF );
FS_FCloseFile( afd.idxF );
// Write index
// Open the temp index file
if( ( indexSize = FS_FOpenFileRead( idxFileName,
&afd.idxF, qtrue ) ) <= 0 )
{
FS_FCloseFile( afd.f );
return qfalse;
}
indexRemainder = indexSize;
// Append index to end of avi file
while( indexRemainder > MAX_AVI_BUFFER )
{
FS_Read( buffer, MAX_AVI_BUFFER, afd.idxF );
SafeFS_Write( buffer, MAX_AVI_BUFFER, afd.f );
afd.fileSize += MAX_AVI_BUFFER;
indexRemainder -= MAX_AVI_BUFFER;
}
FS_Read( buffer, indexRemainder, afd.idxF );
SafeFS_Write( buffer, indexRemainder, afd.f );
afd.fileSize += indexRemainder;
FS_FCloseFile( afd.idxF );
// Remove temp index file
FS_HomeRemove( idxFileName );
// Write the real header
FS_Seek( afd.f, 0, FS_SEEK_SET );
CL_WriteAVIHeader( );
bufIndex = 4;
WRITE_4BYTES( afd.fileSize - 8 ); // "RIFF" size
bufIndex = afd.moviOffset + 4; // Skip "LIST"
WRITE_4BYTES( afd.moviSize );
SafeFS_Write( buffer, bufIndex, afd.f );
Z_Free( afd.cBuffer );
Z_Free( afd.eBuffer );
FS_FCloseFile( afd.f );
Com_Printf( "Wrote %d:%d frames to %s\n", afd.numVideoFrames, afd.numAudioFrames, afd.fileName );
return qtrue;
}
/*
===============
CL_VideoRecording
===============
*/
qboolean CL_VideoRecording( void )
{
return afd.fileOpen;
}

View file

@ -44,6 +44,7 @@ cvar_t *cl_shownet;
cvar_t *cl_showSend;
cvar_t *cl_timedemo;
cvar_t *cl_avidemo;
cvar_t *cl_aviMotionJpeg;
cvar_t *cl_forceavidemo;
cvar_t *cl_freelook;
@ -773,6 +774,11 @@ void CL_Disconnect( qboolean showMainMenu ) {
// not connected to a pure server anymore
cl_connectedToPureServer = qfalse;
// Stop recording any video
if( CL_VideoRecording( ) ) {
CL_CloseAVI( );
}
}
@ -1189,6 +1195,11 @@ doesn't know what graphics to reload
*/
void CL_Vid_Restart_f( void ) {
// Settings may have changed so stop recording now
if( CL_VideoRecording( ) ) {
CL_CloseAVI( );
}
// don't let them loop during the restart
S_StopAllSounds();
// shutdown the UI
@ -2014,15 +2025,16 @@ void CL_Frame ( int msec ) {
}
// if recording an avi, lock to a fixed fps
if ( cl_avidemo->integer && msec) {
if ( CL_VideoRecording( ) && cl_avidemo->integer && msec) {
// save the current screen
if ( cls.state == CA_ACTIVE || cl_forceavidemo->integer) {
Cbuf_ExecuteText( EXEC_NOW, "screenshot silent\n" );
}
// fixed time for next frame'
msec = (1000 / cl_avidemo->integer) * com_timescale->value;
if (msec == 0) {
msec = 1;
CL_TakeVideoFrame( );
// fixed time for next frame'
msec = (int)ceil( (1000.0f / cl_avidemo->value) * com_timescale->value );
if (msec == 0) {
msec = 1;
}
}
}
@ -2223,6 +2235,8 @@ void CL_InitRef( void ) {
ri.CIN_PlayCinematic = CIN_PlayCinematic;
ri.CIN_RunCinematic = CIN_RunCinematic;
ri.CL_WriteAVIVideoFrame = CL_WriteAVIVideoFrame;
ret = GetRefAPI( REF_API_VERSION, &ri );
#if defined __USEA3D && defined __A3D_GEOM
@ -2259,6 +2273,72 @@ void CL_SetModel_f( void ) {
}
}
//===========================================================================================
/*
===============
CL_Video_f
video
video [filename]
===============
*/
void CL_Video_f( void )
{
char filename[ MAX_OSPATH ];
int i, last;
if( Cmd_Argc( ) == 2 )
{
// explicit filename
Com_sprintf( filename, MAX_OSPATH, "videos/%s.avi", Cmd_Argv( 1 ) );
}
else
{
// scan for a free filename
for( i = 0; i <= 9999; i++ )
{
int a, b, c, d;
last = i;
a = last / 1000;
last -= a * 1000;
b = last / 100;
last -= b * 100;
c = last / 10;
last -= c * 10;
d = last;
Com_sprintf( filename, MAX_OSPATH, "videos/video%d%d%d%d.avi",
a, b, c, d );
if( !FS_FileExists( filename ) )
break; // file doesn't exist
}
if( i > 9999 )
{
Com_Printf( S_COLOR_RED "ERROR: no free file names to create video\n" );
return;
}
}
CL_OpenAVIForWriting( filename );
}
/*
===============
CL_StopVideo_f
===============
*/
void CL_StopVideo_f( void )
{
CL_CloseAVI( );
}
/*
====================
CL_Init
@ -2294,7 +2374,8 @@ void CL_Init( void ) {
cl_activeAction = Cvar_Get( "activeAction", "", CVAR_TEMP );
cl_timedemo = Cvar_Get ("timedemo", "0", 0);
cl_avidemo = Cvar_Get ("cl_avidemo", "0", 0);
cl_avidemo = Cvar_Get ("cl_avidemo", "25", CVAR_ARCHIVE);
cl_aviMotionJpeg = Cvar_Get ("cl_aviMotionJpeg", "1", CVAR_ARCHIVE);
cl_forceavidemo = Cvar_Get ("cl_forceavidemo", "0", 0);
rconAddress = Cvar_Get ("rconAddress", "", 0);
@ -2395,6 +2476,8 @@ void CL_Init( void ) {
Cmd_AddCommand ("fs_openedList", CL_OpenedPK3List_f );
Cmd_AddCommand ("fs_referencedList", CL_ReferencedPK3List_f );
Cmd_AddCommand ("model", CL_SetModel_f );
Cmd_AddCommand ("video", CL_Video_f );
Cmd_AddCommand ("stopvideo", CL_StopVideo_f );
CL_InitRef();
SCR_Init ();
@ -2450,6 +2533,8 @@ void CL_Shutdown( void ) {
Cmd_RemoveCommand ("serverstatus");
Cmd_RemoveCommand ("showip");
Cmd_RemoveCommand ("model");
Cmd_RemoveCommand ("video");
Cmd_RemoveCommand ("stopvideo");
Cvar_Set( "cl_running", "0" );

View file

@ -343,6 +343,8 @@ extern cvar_t *m_side;
extern cvar_t *m_filter;
extern cvar_t *cl_timedemo;
extern cvar_t *cl_avidemo;
extern cvar_t *cl_aviMotionJpeg;
extern cvar_t *cl_activeAction;
@ -518,3 +520,13 @@ void LAN_SaveServersToCache( void );
void CL_Netchan_Transmit( netchan_t *chan, msg_t* msg); //int length, const byte *data );
void CL_Netchan_TransmitNextFragment( netchan_t *chan );
qboolean CL_Netchan_Process( netchan_t *chan, msg_t *msg );
//
// cl_avi.c
//
qboolean CL_OpenAVIForWriting( const char *filename );
void CL_TakeVideoFrame( void );
void CL_WriteAVIVideoFrame( const byte *imageBuffer, int size );
void CL_WriteAVIAudioFrame( const byte *pcmBuffer, int size );
qboolean CL_CloseAVI( void );
qboolean CL_VideoRecording( void );

View file

@ -1139,6 +1139,12 @@ void S_GetSoundtime(void)
fullsamples = dma.samples / dma.channels;
if( CL_VideoRecording( ) )
{
s_soundtime += (int)ceil( dma.speed / cl_avidemo->value );
return;
}
// it is possible to miscount buffers if it has wrapped twice between
// calls to S_Update. Oh well.
samplepos = SNDDMA_GetDMAPos();

View file

@ -29,6 +29,7 @@ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
cvar_t *s_volume;
cvar_t *s_musicVolume;
cvar_t *s_doppler;
cvar_t *s_backend;
static soundInterface_t si;
@ -370,6 +371,7 @@ void S_Init( void )
s_volume = Cvar_Get( "s_volume", "0.8", CVAR_ARCHIVE );
s_musicVolume = Cvar_Get( "s_musicvolume", "0.25", CVAR_ARCHIVE );
s_doppler = Cvar_Get( "s_doppler", "1", CVAR_ARCHIVE );
s_backend = Cvar_Get( "s_backend", "", CVAR_ROM );
cv = Cvar_Get( "s_initsound", "1", 0 );
if( !cv->integer ) {
@ -388,10 +390,12 @@ void S_Init( void )
if( cv->integer ) {
//OpenAL
started = S_AL_Init( &si );
Cvar_Set( "s_backend", "OpenAL" );
}
if( !started ) {
started = S_Base_Init( &si );
Cvar_Set( "s_backend", "base" );
}
if( started ) {

View file

@ -21,6 +21,7 @@ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
// snd_mix.c -- portable code to mix sounds for snd_dma.c
#include "client.h"
#include "snd_local.h"
#if idppc_altivec && !defined(MACOS_X)
#include <altivec.h>
@ -137,6 +138,9 @@ void S_TransferStereo16 (unsigned long *pbuf, int endtime)
snd_p += snd_linear_count;
ls_paintedtime += (snd_linear_count>>1);
if( CL_VideoRecording( ) )
CL_WriteAVIAudioFrame( (byte *)snd_out, snd_linear_count << 1 );
}
}

View file

@ -565,10 +565,21 @@ FS_Remove
===========
*/
static void FS_Remove( const char *osPath ) {
void FS_Remove( const char *osPath ) {
remove( osPath );
}
/*
===========
FS_HomeRemove
===========
*/
void FS_HomeRemove( const char *homePath ) {
remove( FS_BuildOSPath( fs_homepath->string,
fs_gamedir, homePath ) );
}
/*
================
FS_FileExists

View file

@ -654,6 +654,9 @@ qboolean FS_ComparePaks( char *neededpaks, int len, qboolean dlstring );
void FS_Rename( const char *from, const char *to );
void FS_Remove( const char *osPath );
void FS_HomeRemove( const char *homePath );
/*
==============================================================
@ -899,7 +902,6 @@ void S_ClearSoundBuffer( void );
void SCR_DebugGraph (float value, int color); // FIXME: move logging to common?
//
// server interface
//

View file

@ -1081,6 +1081,9 @@ void RB_ExecuteRenderCommands( const void *data ) {
case RC_SCREENSHOT:
data = RB_TakeScreenshotCmd( data );
break;
case RC_VIDEOFRAME:
data = RB_TakeVideoFrameCmd( data );
break;
case RC_END_OF_LIST:
default:

View file

@ -445,3 +445,30 @@ void RE_EndFrame( int *frontEndMsec, int *backEndMsec ) {
backEnd.pc.msec = 0;
}
/*
=============
RE_TakeVideoFrame
=============
*/
void RE_TakeVideoFrame( int width, int height,
byte *captureBuffer, byte *encodeBuffer, qboolean motionJpeg )
{
videoFrameCommand_t *cmd;
if( !tr.registered ) {
return;
}
cmd = R_GetCommandBuffer( sizeof( *cmd ) );
if( !cmd ) {
return;
}
cmd->commandId = RC_VIDEOFRAME;
cmd->width = width;
cmd->height = height;
cmd->captureBuffer = captureBuffer;
cmd->encodeBuffer = encodeBuffer;
cmd->motionJpeg = motionJpeg;
}

View file

@ -1852,6 +1852,64 @@ void SaveJPG(char * filename, int quality, int image_width, int image_height, un
/* And we're done! */
}
/*
=================
SaveJPGToBuffer
=================
*/
int SaveJPGToBuffer( byte *buffer, int quality,
int image_width, int image_height,
byte *image_buffer )
{
struct jpeg_compress_struct cinfo;
struct jpeg_error_mgr jerr;
JSAMPROW row_pointer[1]; /* pointer to JSAMPLE row[s] */
int row_stride; /* physical row width in image buffer */
/* Step 1: allocate and initialize JPEG compression object */
cinfo.err = jpeg_std_error(&jerr);
/* Now we can initialize the JPEG compression object. */
jpeg_create_compress(&cinfo);
/* Step 2: specify data destination (eg, a file) */
/* Note: steps 2 and 3 can be done in either order. */
jpegDest(&cinfo, buffer, image_width*image_height*4);
/* Step 3: set parameters for compression */
cinfo.image_width = image_width; /* image width and height, in pixels */
cinfo.image_height = image_height;
cinfo.input_components = 4; /* # of color components per pixel */
cinfo.in_color_space = JCS_RGB; /* colorspace of input image */
jpeg_set_defaults(&cinfo);
jpeg_set_quality(&cinfo, quality, TRUE /* limit to baseline-JPEG values */);
/* Step 4: Start compressor */
jpeg_start_compress(&cinfo, TRUE);
/* Step 5: while (scan lines remain to be written) */
/* jpeg_write_scanlines(...); */
row_stride = image_width * 4; /* JSAMPLEs per row in image_buffer */
while (cinfo.next_scanline < cinfo.image_height) {
/* jpeg_write_scanlines expects an array of pointers to scanlines.
* Here the array is only one element long, but you could pass
* more than one scanline at a time if that's more convenient.
*/
row_pointer[0] = & image_buffer[((cinfo.image_height-1)*row_stride)-cinfo.next_scanline * row_stride];
(void) jpeg_write_scanlines(&cinfo, row_pointer, 1);
}
/* Step 6: Finish compression */
jpeg_finish_compress(&cinfo);
/* Step 7: release JPEG compression object */
jpeg_destroy_compress(&cinfo);
/* And we're done! */
return hackSize;
}
//===================================================================
/*

View file

@ -699,6 +699,51 @@ void R_ScreenShotJPEG_f (void) {
//============================================================================
/*
==================
RB_TakeVideoFrameCmd
==================
*/
const void *RB_TakeVideoFrameCmd( const void *data )
{
const videoFrameCommand_t *cmd;
int frameSize;
int i;
cmd = (const videoFrameCommand_t *)data;
qglReadPixels( 0, 0, cmd->width, cmd->height, GL_RGBA,
GL_UNSIGNED_BYTE, cmd->captureBuffer );
// gamma correct
if( ( tr.overbrightBits > 0 ) && glConfig.deviceSupportsGamma )
R_GammaCorrect( cmd->captureBuffer, cmd->width * cmd->height * 4 );
if( cmd->motionJpeg )
{
frameSize = SaveJPGToBuffer( cmd->encodeBuffer, 95,
cmd->width, cmd->height, cmd->captureBuffer );
}
else
{
frameSize = cmd->width * cmd->height * 4;
// Vertically flip the image
for( i = 0; i < cmd->height; i++ )
{
Com_Memcpy( &cmd->encodeBuffer[ i * ( cmd->width * 4 ) ],
&cmd->captureBuffer[ ( cmd->height - i - 1 ) * ( cmd->width * 4 ) ],
cmd->width * 4 );
}
}
ri.CL_WriteAVIVideoFrame( cmd->encodeBuffer, frameSize );
return (const void *)(cmd + 1);
}
//============================================================================
/*
** GL_SetDefaultState
*/
@ -1201,5 +1246,7 @@ refexport_t *GetRefAPI ( int apiVersion, refimport_t *rimp ) {
re.GetEntityToken = R_GetEntityToken;
re.inPVS = R_inPVS;
re.TakeVideoFrame = RE_TakeVideoFrame;
return &re;
}

View file

@ -1215,6 +1215,7 @@ skin_t *R_GetSkinByHandle( qhandle_t hSkin );
int R_ComputeLOD( trRefEntity_t *ent );
const void *RB_TakeVideoFrameCmd( const void *data );
//
// tr_shader.c
@ -1579,6 +1580,15 @@ typedef struct {
qboolean jpeg;
} screenshotCommand_t;
typedef struct {
int commandId;
int width;
int height;
byte *captureBuffer;
byte *encodeBuffer;
qboolean motionJpeg;
} videoFrameCommand_t;
typedef enum {
RC_END_OF_LIST,
RC_SET_COLOR,
@ -1586,7 +1596,8 @@ typedef enum {
RC_DRAW_SURFS,
RC_DRAW_BUFFER,
RC_SWAP_BUFFERS,
RC_SCREENSHOT
RC_SCREENSHOT,
RC_VIDEOFRAME
} renderCommand_t;
@ -1635,6 +1646,11 @@ void RE_StretchPic ( float x, float y, float w, float h,
void RE_BeginFrame( stereoFrame_t stereoFrame );
void RE_EndFrame( int *frontEndMsec, int *backEndMsec );
void SaveJPG(char * filename, int quality, int image_width, int image_height, unsigned char *image_buffer);
int SaveJPGToBuffer( byte *buffer, int quality,
int image_width, int image_height,
byte *image_buffer );
void RE_TakeVideoFrame( int width, int height,
byte *captureBuffer, byte *encodeBuffer, qboolean motionJpeg );
// font stuff
void R_InitFreeType( void );

View file

@ -97,6 +97,8 @@ typedef struct {
void (*RemapShader)(const char *oldShader, const char *newShader, const char *offsetTime);
qboolean (*GetEntityToken)( char *buffer, int size );
qboolean (*inPVS)( const vec3_t p1, const vec3_t p2 );
void (*TakeVideoFrame)( int h, int w, byte* captureBuffer, byte *encodeBuffer, qboolean motionJpeg );
} refexport_t;
//
@ -156,6 +158,7 @@ typedef struct {
int (*CIN_PlayCinematic)( const char *arg0, int xpos, int ypos, int width, int height, int bits);
e_status (*CIN_RunCinematic) (int handle);
void (*CL_WriteAVIVideoFrame)( const byte *buffer, int size );
} refimport_t;

View file

@ -684,6 +684,7 @@ Q3OBJ = \
$(B)/client/cl_parse.o \
$(B)/client/cl_scrn.o \
$(B)/client/cl_ui.o \
$(B)/client/cl_avi.o \
\
$(B)/client/cm_load.o \
$(B)/client/cm_patch.o \
@ -908,6 +909,7 @@ $(B)/client/cl_net_chan.o : $(CDIR)/cl_net_chan.c; $(DO_CC)
$(B)/client/cl_parse.o : $(CDIR)/cl_parse.c; $(DO_CC)
$(B)/client/cl_scrn.o : $(CDIR)/cl_scrn.c; $(DO_CC)
$(B)/client/cl_ui.o : $(CDIR)/cl_ui.c; $(DO_CC)
$(B)/client/cl_avi.o : $(CDIR)/cl_avi.c; $(DO_CC)
$(B)/client/snd_adpcm.o : $(CDIR)/snd_adpcm.c; $(DO_CC)
$(B)/client/snd_dma.o : $(CDIR)/snd_dma.c; $(DO_CC)
$(B)/client/snd_mem.o : $(CDIR)/snd_mem.c; $(DO_CC)