1
0
Fork 0
forked from fte/fteqw

my attempt at linux-friendly plugin code. cross compiling for win32 should be okay now, just need to add the setting to the buildbot.

git-svn-id: https://svn.code.sf.net/p/fteqw/code/trunk@4335 fc73d0e0-1445-4013-8a0c-d673dee63da5
This commit is contained in:
Spoike 2013-05-04 10:40:05 +00:00
parent 11c7f5965a
commit 78185c2721
5 changed files with 839 additions and 747 deletions

View file

@ -1,9 +1,53 @@
#windows is special as always, but we don't support itanium, and microsoft don't support anything else (not even arm with the nt win32 api)
ifeq ($(FTE_TARGET),win32)
PLUG_NATIVE_EXT=x86.dll
PLUG_LDFLAGS=-Lavplug/lib32 -L../engine/libs/mingw-libs -lzlib
endif
ifeq ($(FTE_TARGET),win64)
PLUG_NATIVE_EXT=amd.dll
PLUG_LDFLAGS=-Lavplug/lib64 -L../engine/libs/mingw64-libs -lz -Wl,--support-old-code
endif
PLUG_LDFLAGS?=-L/usr/local/lib -Wl,-R/usr/local/lib -lz
ifneq ($(PLUG_NATIVE_EXT),)
#if we're on windows, we'll put our windows-specific hacks here.
PLUG_DEFFILE=plugin.def
PLUG_CFLAGS=
$(OUT_DIR)/fteplug_avplug$(PLUG_NATIVE_EXT): avplug/libavformat/avformat.h
endif
#if they're not on windows, we'll try asking the compiler directly
#the check to see if its already set is to avoid asking msvc, which would probably break things.
ifeq ($(PLUG_NATIVE_EXT),)
ifneq ($(shell echo|$(CC) -E -dM -|grep __amd64__),)
PLUG_NATIVE_EXT=amd.so
endif
ifneq ($(shell echo|$(CC) -E -dM -|grep __i386__),)
PLUG_NATIVE_EXT=x86.so
endif
ifneq ($(shell echo|$(CC) -E -dM -|grep __arm__),)
PLUG_NATIVE_EXT=arm.so
endif
ifneq ($(shell echo|$(CC) -E -dM -|grep __ppc__),)
PLUG_NATIVE_EXT=ppc.so
endif
endif
#fallback
PLUG_NATIVE_EXT?=unk.so
PLUG_DEFFILE?=
PLUG_CFLAGS?=-fPIC -Wl,--no-undefined
PLUG_LDFLAGS?=
all: ezscript hud irc all: ezscript hud irc
clean: ezscript-clean hud-clean irc-clean clean: ezscript-clean hud-clean irc-clean
.PHONY: all ezscript hud irc .PHONY: all ezscript hud irc native distclean clean
help: help:
@-echo make a subdirectory @-echo make a subdirectory
@ -25,8 +69,38 @@ irc:
irc-clean: irc-clean:
$(MAKE) clean -C irc $(MAKE) clean -C irc
native: #small script to download+install avformat for windows cross compiles.
@echo outdir = $(OUT_DIR) #linux users are expected to have the library installed locally already. If your version is too old or missing, run the following command to install it (to /usr/local), then delete the gz and directory.
$(CC) -o $(OUT_DIR)/fteplug_avplugx86.dll -shared -Iavplug -Iavplug/libavformat -Iavplug/libavcodec -Iavplug/libavutil -Iavplug/libswscale -Iavplug/msvc_lib avplug/avencode.c avplug/avdecode.c plugin.def plugin.c -Lavplug/lib32 -lavcodec -lavformat -lavutil -lswscale -lwinmm #wget http://ffmpeg.org/releases/ffmpeg-1.2.tar.gz && cd tar xvfz ffmpeg-1.2.tar.gz && cd ffmpeg-1.2/ && ./configure --disable-yasm --enable-shared && make && sudo make install
$(CC) $(BASE_CFLAGS) -DFTEPLUGIN -o $(OUT_DIR)/fteplug_mpqx86.dll -shared -Impq mpq/fs_mpq.c mpq/blast.c plugin.def plugin.c qvm_api.c -Lavplug/lib32 -L../engine/libs/mingw-libs -lzlib #we use ffmpeg's version for some reason, as opposed to libav. not sure what the differences are meant to be, but libav seemed to have non-depricated functions defined, docs that say to use them, and these functions missing.
AV7Z_VER=ffmpeg-1.2
AV7Z_W32=$(AV7Z_VER)-win32-dev.7z
AV7Z_URL32=http://ffmpeg.zeranoe.com/builds/win32/dev/$(AV7Z_W32)
AV7Z_PRE32=$(AV7Z_VER)-win32-dev/
AV7Z_W64=$(AV7Z_VER)-win64-dev.7z
AV7Z_URL64=http://ffmpeg.zeranoe.com/builds/win64/dev/$(AV7Z_W64)
AV7Z_PRE64=$(AV7Z_VER)-win64-dev/
avplug/libavformat/avformat.h:
wget $(AV7Z_URL32)
mkdir -p avplug/libavformat && cd avplug/libavformat && 7z e -y ../../$(AV7Z_W32) $(AV7Z_PRE32)include/libavformat/ && cd -
mkdir -p avplug/libavcodec && cd avplug/libavcodec && 7z e -y ../../$(AV7Z_W32) $(AV7Z_PRE32)include/libavcodec/ && cd -
mkdir -p avplug/libavutil && cd avplug/libavutil && 7z e -y ../../$(AV7Z_W32) $(AV7Z_PRE32)include/libavutil/ && cd -
mkdir -p avplug/libswscale && cd avplug/libswscale && 7z e -y ../../$(AV7Z_W32) $(AV7Z_PRE32)include/libswscale/ && cd -
mkdir -p avplug/lib32 && cd avplug/lib32 && 7z e -y ../../$(AV7Z_W32) $(AV7Z_PRE32)lib/avformat.lib $(AV7Z_PRE32)lib/avcodec.lib $(AV7Z_PRE32)lib/avutil.lib $(AV7Z_PRE32)lib/swscale.lib && cd -
rm $(AV7Z_W32)
wget $(AV7Z_URL64)
mkdir -p avplug/lib64 && cd avplug/lib64 && 7z e -y ../../$(AV7Z_W64) $(AV7Z_PRE64)lib/avformat.lib $(AV7Z_PRE64)lib/avcodec.lib $(AV7Z_PRE64)lib/avutil.lib $(AV7Z_PRE64)lib/swscale.lib && cd -
rm $(AV7Z_W64)
distclean:
rm avplug/libavformat/avformat.h
$(OUT_DIR)/fteplug_avplug$(PLUG_NATIVE_EXT): avplug/avencode.c avplug/avdecode.c plugin.c
$(CC) $(BASE_CFLAGS) -DFTEPLUGIN -s -o $(OUT_DIR)/fteplug_avplug$(PLUG_NATIVE_EXT) -shared $(PLUG_CFLAGS) -Iavplug/msvc_lib $^ $(PLUG_DEFFILE) $(PLUG_LDFLAGS) -lavcodec -lavformat -lavutil -lswscale
native: $(OUT_DIR)/fteplug_avplug$(PLUG_NATIVE_EXT)
$(OUT_DIR)/fteplug_mpq$(PLUG_NATIVE_EXT): mpq/fs_mpq.c mpq/blast.c plugin.c qvm_api.c
$(CC) $(BASE_CFLAGS) -DFTEPLUGIN -o $(OUT_DIR)/fteplug_mpq$(PLUG_NATIVE_EXT) -shared $(PLUG_CFLAGS) -Impq $^ $(PLUG_DEFFILE) $(PLUG_LDFLAGS)
native: $(OUT_DIR)/fteplug_mpq$(PLUG_NATIVE_EXT)
native:

View file

@ -1,83 +1,95 @@
#include "../plugin.h" #include "../plugin.h"
#include "../engine.h" #include "../engine.h"
#include <avcodec.h> #include <libavcodec/avcodec.h>
#include <avformat.h> #include <libavformat/avformat.h>
#include <swscale.h> #include <libswscale/swscale.h>
#include <windows.h>
//between av 52.31 and 54.35, lots of constants etc got renamed to gain an extra AV_ prefix.
#define ARGNAMES ,sourceid, data, speed, samples, channels, width /*
BUILTIN(void, S_RawAudio, (int sourceid, void *data, int speed, int samples, int channels, int width)); #define AV_PIX_FMT_BGRA PIX_FMT_BGRA
#undef ARGNAMES #define AVMEDIA_TYPE_AUDIO CODEC_TYPE_AUDIO
#define AVMEDIA_TYPE_VIDEO CODEC_TYPE_VIDEO
/*should probably try threading this*/ #define AV_PIX_FMT_BGRA PIX_FMT_BGRA
/*timing is based upon the start time. this means overflow issues with rtsp etc*/ #define AV_SAMPLE_FMT_U8 SAMPLE_FMT_U8
#define AV_SAMPLE_FMT_S16 SAMPLE_FMT_S16
struct decctx #define AV_SAMPLE_FMT_FLT SAMPLE_FMT_FLT
{ #define AVIOContext ByteIOContext
unsigned int width, height; #define avio_alloc_context av_alloc_put_byte
*/
qhandle_t file;
int64_t fileofs; #define ARGNAMES ,sourceid, data, speed, samples, channels, width
int64_t filelen; BUILTIN(void, S_RawAudio, (int sourceid, void *data, int speed, int samples, int channels, int width));
AVFormatContext *pFormatCtx; #undef ARGNAMES
int audioStream; /*should probably try threading this*/
AVCodecContext *pACodecCtx; /*timing is based upon the start time. this means overflow issues with rtsp etc*/
AVFrame *pAFrame;
struct decctx
int videoStream; {
AVCodecContext *pVCodecCtx; unsigned int width, height;
AVFrame *pVFrame;
int64_t num, denum; qhandle_t file;
int64_t fileofs;
AVPicture pFrameRGB; int64_t filelen;
struct SwsContext *pScaleCtx; AVFormatContext *pFormatCtx;
unsigned int starttime; int audioStream;
unsigned int lastframe; AVCodecContext *pACodecCtx;
}; AVFrame *pAFrame;
static qboolean AVDec_SetSize (void *vctx, int width, int height) int videoStream;
{ AVCodecContext *pVCodecCtx;
struct decctx *ctx = (struct decctx*)vctx; AVFrame *pVFrame;
AVPicture newscaled; int64_t num, denum;
//colourspace conversions will be fastest if we AVPicture pFrameRGB;
// if (width > ctx->pCodecCtx->width) struct SwsContext *pScaleCtx;
width = ctx->pVCodecCtx->width;
// if (height > ctx->pCodecCtx->height) unsigned int starttime;
height = ctx->pVCodecCtx->height; unsigned int lastframe;
};
//is this a no-op?
if (width == ctx->width && height == ctx->height && ctx->pScaleCtx) static qboolean AVDec_SetSize (void *vctx, int width, int height)
return true; {
struct decctx *ctx = (struct decctx*)vctx;
if (avpicture_alloc(&newscaled, AV_PIX_FMT_BGRA, width, height) >= 0) AVPicture newscaled;
{
//update the scale context as required //colourspace conversions will be fastest if we
//clear the old stuff out // if (width > ctx->pCodecCtx->width)
avpicture_free(&ctx->pFrameRGB); width = ctx->pVCodecCtx->width;
// if (height > ctx->pCodecCtx->height)
ctx->width = width; height = ctx->pVCodecCtx->height;
ctx->height = height;
ctx->pFrameRGB = newscaled; //is this a no-op?
return qtrue; if (width == ctx->width && height == ctx->height && ctx->pScaleCtx)
} return true;
return qfalse; //unsupported
} if (avpicture_alloc(&newscaled, AV_PIX_FMT_BGRA, width, height) >= 0)
{
//update the scale context as required
//clear the old stuff out
avpicture_free(&ctx->pFrameRGB);
ctx->width = width;
ctx->height = height;
ctx->pFrameRGB = newscaled;
return qtrue;
}
return qfalse; //unsupported
}
static int AVIO_Read(void *opaque, uint8_t *buf, int buf_size) static int AVIO_Read(void *opaque, uint8_t *buf, int buf_size)
{ {
struct decctx *ctx = opaque; struct decctx *ctx = opaque;
int ammount; int ammount;
ammount = FS_Read(ctx->file, buf, buf_size); ammount = pFS_Read(ctx->file, buf, buf_size);
if (ammount > 0) if (ammount > 0)
ctx->fileofs += ammount; ctx->fileofs += ammount;
return ammount; return ammount;
} }
static int64_t AVIO_Seek(void *opaque, int64_t offset, int whence) static int64_t AVIO_Seek(void *opaque, int64_t offset, int whence)
{ {
struct decctx *ctx = opaque; struct decctx *ctx = opaque;
int64_t ret = ctx->fileofs; int64_t ret = ctx->fileofs;
switch(whence) switch(whence)
@ -95,214 +107,214 @@ static int64_t AVIO_Seek(void *opaque, int64_t offset, int whence)
case AVSEEK_SIZE: case AVSEEK_SIZE:
return ctx->filelen; return ctx->filelen;
} }
FS_Seek(ctx->file, ctx->fileofs & 0xffffffff, ctx->fileofs>>32); pFS_Seek(ctx->file, ctx->fileofs & 0xffffffff, ctx->fileofs>>32);
return ret; return ret;
} }
static void AVDec_Destroy(void *vctx) static void AVDec_Destroy(void *vctx)
{ {
struct decctx *ctx = (struct decctx*)vctx; struct decctx *ctx = (struct decctx*)vctx;
// Free the video stuff // Free the video stuff
avpicture_free(&ctx->pFrameRGB); avpicture_free(&ctx->pFrameRGB);
av_free(ctx->pVFrame); av_free(ctx->pVFrame);
avcodec_close(ctx->pVCodecCtx); avcodec_close(ctx->pVCodecCtx);
// Free the audio decoder // Free the audio decoder
av_free(ctx->pAFrame); av_free(ctx->pAFrame);
avcodec_close(ctx->pACodecCtx); avcodec_close(ctx->pACodecCtx);
// Close the video file // Close the video file
av_close_input_file(ctx->pFormatCtx); avformat_close_input(&ctx->pFormatCtx);
if (ctx->file >= 0) if (ctx->file >= 0)
FS_Close(ctx->file); pFS_Close(ctx->file);
free(ctx); free(ctx);
} }
static void *AVDec_Create(char *medianame) static void *AVDec_Create(char *medianame)
{ {
struct decctx *ctx; struct decctx *ctx;
unsigned int i; unsigned int i;
AVCodec *pCodec; AVCodec *pCodec;
qboolean useioctx = false; qboolean useioctx = false;
/*only respond to av: media prefixes*/ /*only respond to av: media prefixes*/
if (!strncmp(medianame, "av:", 3)) if (!strncmp(medianame, "av:", 3))
{ {
medianame = medianame + 3; medianame = medianame + 3;
useioctx = true; useioctx = true;
} }
else if (!strncmp(medianame, "avs:", 4)) else if (!strncmp(medianame, "avs:", 4))
{ {
medianame = medianame + 4; medianame = medianame + 4;
//let avformat do its own avio context stuff //let avformat do its own avio context stuff
} }
else else
return NULL; return NULL;
ctx = malloc(sizeof(*ctx)); ctx = malloc(sizeof(*ctx));
memset(ctx, 0, sizeof(*ctx)); memset(ctx, 0, sizeof(*ctx));
//so we always decode the first frame instantly. //so we always decode the first frame instantly.
ctx->starttime = timeGetTime(); ctx->starttime = pSys_Milliseconds();
ctx->file = -1; ctx->file = -1;
if (useioctx) if (useioctx)
{ {
// Create internal Buffer for FFmpeg: // Create internal Buffer for FFmpeg:
const int iBufSize = 32 * 1024; const int iBufSize = 32 * 1024;
BYTE *pBuffer = malloc(iBufSize); char *pBuffer = malloc(iBufSize);
AVIOContext *ioctx; AVIOContext *ioctx;
ctx->filelen = FS_Open(medianame, &ctx->file, 1); ctx->filelen = pFS_Open(medianame, &ctx->file, 1);
if (ctx->filelen < 0) if (ctx->filelen < 0)
{ {
Con_Printf("Unable to open %s\n", medianame); Con_Printf("Unable to open %s\n", medianame);
free(ctx); free(ctx);
free(pBuffer); free(pBuffer);
return NULL; return NULL;
} }
ioctx = avio_alloc_context(pBuffer, iBufSize, 0, ctx, AVIO_Read, 0, AVIO_Seek); ioctx = avio_alloc_context(pBuffer, iBufSize, 0, ctx, AVIO_Read, 0, AVIO_Seek);
ctx->pFormatCtx = avformat_alloc_context(); ctx->pFormatCtx = avformat_alloc_context();
ctx->pFormatCtx->pb = ioctx; ctx->pFormatCtx->pb = ioctx;
} }
// Open video file // Open video file
if(avformat_open_input(&ctx->pFormatCtx, medianame, NULL, NULL)==0) if(avformat_open_input(&ctx->pFormatCtx, medianame, NULL, NULL)==0)
{ {
// Retrieve stream information // Retrieve stream information
if(av_find_stream_info(ctx->pFormatCtx)>=0) if(avformat_find_stream_info(ctx->pFormatCtx, NULL)>=0)
{ {
ctx->audioStream=-1; ctx->audioStream=-1;
for(i=0; i<ctx->pFormatCtx->nb_streams; i++) for(i=0; i<ctx->pFormatCtx->nb_streams; i++)
if(ctx->pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_AUDIO) if(ctx->pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_AUDIO)
{ {
ctx->audioStream=i; ctx->audioStream=i;
break; break;
} }
if(ctx->audioStream!=-1) if(ctx->audioStream!=-1)
{ {
ctx->pACodecCtx=ctx->pFormatCtx->streams[ctx->audioStream]->codec; ctx->pACodecCtx=ctx->pFormatCtx->streams[ctx->audioStream]->codec;
pCodec=avcodec_find_decoder(ctx->pACodecCtx->codec_id); pCodec=avcodec_find_decoder(ctx->pACodecCtx->codec_id);
ctx->pAFrame=avcodec_alloc_frame(); ctx->pAFrame=avcodec_alloc_frame();
if(pCodec!=NULL && ctx->pAFrame && avcodec_open(ctx->pACodecCtx, pCodec) >= 0) if(pCodec!=NULL && ctx->pAFrame && avcodec_open2(ctx->pACodecCtx, pCodec, NULL) >= 0)
{ {
} }
else else
ctx->audioStream = -1; ctx->audioStream = -1;
} }
ctx->videoStream=-1; ctx->videoStream=-1;
for(i=0; i<ctx->pFormatCtx->nb_streams; i++) for(i=0; i<ctx->pFormatCtx->nb_streams; i++)
if(ctx->pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO) if(ctx->pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO)
{ {
ctx->videoStream=i; ctx->videoStream=i;
break; break;
} }
if(ctx->videoStream!=-1) if(ctx->videoStream!=-1)
{ {
// Get a pointer to the codec context for the video stream // Get a pointer to the codec context for the video stream
ctx->pVCodecCtx=ctx->pFormatCtx->streams[ctx->videoStream]->codec; ctx->pVCodecCtx=ctx->pFormatCtx->streams[ctx->videoStream]->codec;
ctx->num = ctx->pFormatCtx->streams[ctx->videoStream]->time_base.num; ctx->num = ctx->pFormatCtx->streams[ctx->videoStream]->time_base.num;
ctx->denum = ctx->pFormatCtx->streams[ctx->videoStream]->time_base.den; ctx->denum = ctx->pFormatCtx->streams[ctx->videoStream]->time_base.den;
// Find the decoder for the video stream // Find the decoder for the video stream
pCodec=avcodec_find_decoder(ctx->pVCodecCtx->codec_id); pCodec=avcodec_find_decoder(ctx->pVCodecCtx->codec_id);
// Open codec // Open codec
if(pCodec!=NULL && avcodec_open(ctx->pVCodecCtx, pCodec) >= 0) if(pCodec!=NULL && avcodec_open2(ctx->pVCodecCtx, pCodec, NULL) >= 0)
{ {
// Allocate video frame // Allocate video frame
ctx->pVFrame=avcodec_alloc_frame(); ctx->pVFrame=avcodec_alloc_frame();
if(ctx->pVFrame!=NULL) if(ctx->pVFrame!=NULL)
{ {
if (AVDec_SetSize(ctx, ctx->pVCodecCtx->width, ctx->pVCodecCtx->height)) if (AVDec_SetSize(ctx, ctx->pVCodecCtx->width, ctx->pVCodecCtx->height))
{ {
return ctx; return ctx;
} }
} }
} }
} }
} }
} }
AVDec_Destroy(ctx); AVDec_Destroy(ctx);
return NULL; return NULL;
} }
static void *AVDec_DisplayFrame(void *vctx, qboolean nosound, enum uploadfmt_e *fmt, int *width, int *height) static void *AVDec_DisplayFrame(void *vctx, qboolean nosound, uploadfmt_t *fmt, int *width, int *height)
{ {
struct decctx *ctx = (struct decctx*)vctx; struct decctx *ctx = (struct decctx*)vctx;
AVPacket packet; AVPacket packet;
int frameFinished; int frameFinished;
qboolean repainted = false; qboolean repainted = false;
int64_t curtime, lasttime; int64_t curtime, lasttime;
curtime = ((timeGetTime() - ctx->starttime) * ctx->denum); curtime = ((pSys_Milliseconds() - ctx->starttime) * ctx->denum);
curtime /= (ctx->num * 1000); curtime /= (ctx->num * 1000);
*fmt = TF_BGRA32; *fmt = TF_BGRA32;
while (1) while (1)
{ {
lasttime = av_frame_get_best_effort_timestamp(ctx->pVFrame); lasttime = av_frame_get_best_effort_timestamp(ctx->pVFrame);
if (lasttime > curtime) if (lasttime > curtime)
break; break;
// We're ahead of the previous frame. try and read the next. // We're ahead of the previous frame. try and read the next.
if (av_read_frame(ctx->pFormatCtx, &packet) < 0) if (av_read_frame(ctx->pFormatCtx, &packet) < 0)
{ {
*fmt = TF_INVALID; *fmt = TF_INVALID;
break; break;
} }
// Is this a packet from the video stream? // Is this a packet from the video stream?
if(packet.stream_index==ctx->videoStream) if(packet.stream_index==ctx->videoStream)
{ {
// Decode video frame // Decode video frame
avcodec_decode_video2(ctx->pVCodecCtx, ctx->pVFrame, &frameFinished, &packet); avcodec_decode_video2(ctx->pVCodecCtx, ctx->pVFrame, &frameFinished, &packet);
// Did we get a video frame? // Did we get a video frame?
if(frameFinished) if(frameFinished)
{ {
ctx->pScaleCtx = sws_getCachedContext(ctx->pScaleCtx, ctx->pVCodecCtx->width, ctx->pVCodecCtx->height, ctx->pVCodecCtx->pix_fmt, ctx->width, ctx->height, AV_PIX_FMT_BGRA, SWS_POINT, 0, 0, 0); ctx->pScaleCtx = sws_getCachedContext(ctx->pScaleCtx, ctx->pVCodecCtx->width, ctx->pVCodecCtx->height, ctx->pVCodecCtx->pix_fmt, ctx->width, ctx->height, AV_PIX_FMT_BGRA, SWS_POINT, 0, 0, 0);
// Convert the image from its native format to RGB // Convert the image from its native format to RGB
sws_scale(ctx->pScaleCtx, ctx->pVFrame->data, ctx->pVFrame->linesize, 0, ctx->pVCodecCtx->height, ctx->pFrameRGB.data, ctx->pFrameRGB.linesize); sws_scale(ctx->pScaleCtx, (void*)ctx->pVFrame->data, ctx->pVFrame->linesize, 0, ctx->pVCodecCtx->height, ctx->pFrameRGB.data, ctx->pFrameRGB.linesize);
repainted = true; repainted = true;
} }
} }
else if(packet.stream_index==ctx->audioStream && !nosound) else if(packet.stream_index==ctx->audioStream && !nosound)
{ {
int okay; int okay;
int len; int len;
void *odata = packet.data; void *odata = packet.data;
while (packet.size > 0) while (packet.size > 0)
{ {
okay = false; okay = false;
len = avcodec_decode_audio4(ctx->pACodecCtx, ctx->pAFrame, &okay, &packet); len = avcodec_decode_audio4(ctx->pACodecCtx, ctx->pAFrame, &okay, &packet);
if (len < 0) if (len < 0)
break; break;
packet.size -= len; packet.size -= len;
packet.data += len; packet.data += len;
if (okay) if (okay)
{ {
int width = 2; int width = 2;
unsigned int auddatasize = av_samples_get_buffer_size(NULL, ctx->pACodecCtx->channels, ctx->pAFrame->nb_samples, ctx->pACodecCtx->sample_fmt, 1); unsigned int auddatasize = av_samples_get_buffer_size(NULL, ctx->pACodecCtx->channels, ctx->pAFrame->nb_samples, ctx->pACodecCtx->sample_fmt, 1);
void *auddata = ctx->pAFrame->data[0]; void *auddata = ctx->pAFrame->data[0];
switch(ctx->pACodecCtx->sample_fmt) switch(ctx->pACodecCtx->sample_fmt)
{ {
default: default:
auddatasize = 0; auddatasize = 0;
break; break;
case AV_SAMPLE_FMT_U8: case AV_SAMPLE_FMT_U8:
width = 1; width = 1;
break; break;
@ -321,100 +333,100 @@ static void *AVDec_DisplayFrame(void *vctx, qboolean nosound, enum uploadfmt_e *
auddatasize/=2; auddatasize/=2;
width = 2; width = 2;
} }
break; break;
} }
S_RawAudio(-1, auddata, ctx->pACodecCtx->sample_rate, auddatasize/(ctx->pACodecCtx->channels*width), ctx->pACodecCtx->channels, width); pS_RawAudio(-1, auddata, ctx->pACodecCtx->sample_rate, auddatasize/(ctx->pACodecCtx->channels*width), ctx->pACodecCtx->channels, width);
} }
} }
packet.data = odata; packet.data = odata;
} }
// Free the packet that was allocated by av_read_frame // Free the packet that was allocated by av_read_frame
av_free_packet(&packet); av_free_packet(&packet);
} }
*width = ctx->width; *width = ctx->width;
*height = ctx->height; *height = ctx->height;
if (!repainted) if (!repainted)
return NULL; return NULL;
return ctx->pFrameRGB.data[0]; return ctx->pFrameRGB.data[0];
} }
static void AVDec_GetSize (void *vctx, int *width, int *height) static void AVDec_GetSize (void *vctx, int *width, int *height)
{ {
struct decctx *ctx = (struct decctx*)vctx; struct decctx *ctx = (struct decctx*)vctx;
*width = ctx->width; *width = ctx->width;
*height = ctx->height; *height = ctx->height;
} }
static void AVDec_CursorMove (void *vctx, float posx, float posy) static void AVDec_CursorMove (void *vctx, float posx, float posy)
{ {
//its a video, dumbass //its a video, dumbass
} }
static void AVDec_Key (void *vctx, int code, int unicode, int isup) static void AVDec_Key (void *vctx, int code, int unicode, int isup)
{ {
//its a video, dumbass //its a video, dumbass
} }
static void AVDec_ChangeStream(void *vctx, char *newstream) static void AVDec_ChangeStream(void *vctx, char *newstream)
{ {
} }
static void AVDec_Rewind(void *vctx) static void AVDec_Rewind(void *vctx)
{ {
struct decctx *ctx = (struct decctx*)vctx; struct decctx *ctx = (struct decctx*)vctx;
if (ctx->videoStream >= 0) if (ctx->videoStream >= 0)
av_seek_frame(ctx->pFormatCtx, ctx->videoStream, 0, AVSEEK_FLAG_BACKWARD); av_seek_frame(ctx->pFormatCtx, ctx->videoStream, 0, AVSEEK_FLAG_BACKWARD);
if (ctx->audioStream >= 0) if (ctx->audioStream >= 0)
av_seek_frame(ctx->pFormatCtx, ctx->audioStream, 0, AVSEEK_FLAG_BACKWARD); av_seek_frame(ctx->pFormatCtx, ctx->audioStream, 0, AVSEEK_FLAG_BACKWARD);
ctx->starttime = timeGetTime(); ctx->starttime = pSys_Milliseconds();
} }
/* /*
//avcodec has no way to shut down properly. //avcodec has no way to shut down properly.
static qintptr_t AVDec_Shutdown(qintptr_t *args) static qintptr_t AVDec_Shutdown(qintptr_t *args)
{ {
return 0; return 0;
} }
*/ */
static media_decoder_funcs_t decoderfuncs = static media_decoder_funcs_t decoderfuncs =
{ {
AVDec_Create, AVDec_Create,
AVDec_DisplayFrame, AVDec_DisplayFrame,
NULL,//doneframe NULL,//doneframe
AVDec_Destroy, AVDec_Destroy,
AVDec_Rewind, AVDec_Rewind,
NULL,//AVDec_CursorMove, NULL,//AVDec_CursorMove,
NULL,//AVDec_Key, NULL,//AVDec_Key,
NULL,//AVDec_SetSize, NULL,//AVDec_SetSize,
AVDec_GetSize, AVDec_GetSize,
NULL,//AVDec_ChangeStream NULL,//AVDec_ChangeStream
}; };
static qboolean AVDec_Init(void) static qboolean AVDec_Init(void)
{ {
if (!Plug_ExportNative("Media_VideoDecoder", &decoderfuncs)) if (!pPlug_ExportNative("Media_VideoDecoder", &decoderfuncs))
{ {
Con_Printf("avplug: Engine doesn't support media decoder plugins\n"); Con_Printf("avplug: Engine doesn't support media decoder plugins\n");
return false; return false;
} }
CHECKBUILTIN(S_RawAudio); CHECKBUILTIN(S_RawAudio);
CHECKBUILTIN(FS_Seek); CHECKBUILTIN(FS_Seek);
return true; return true;
} }
//get the encoder/decoders to register themselves with the engine, then make sure avformat/avcodec have registered all they have to give. //get the encoder/decoders to register themselves with the engine, then make sure avformat/avcodec have registered all they have to give.
qboolean AVEnc_Init(void); qboolean AVEnc_Init(void);
qintptr_t Plug_Init(qintptr_t *args) qintptr_t Plug_Init(qintptr_t *args)
{ {
qboolean okay = false; qboolean okay = false;
okay |= AVDec_Init(); okay |= AVDec_Init();
okay |= AVEnc_Init(); okay |= AVEnc_Init();
if (okay) if (okay)
av_register_all(); av_register_all();
return okay; return okay;
} }

View file

@ -1,10 +1,11 @@
#include "../plugin.h" #include "../plugin.h"
#include "../engine.h" #include "../engine.h"
#include "avformat.h" #include <libavformat/avformat.h>
#include "avio.h" #include <libavformat/avio.h>
#include "avcodec.h" #include <libavcodec/avcodec.h>
#include "swscale.h" #include <libswscale/swscale.h>
//#include <libavutil/channel_layout.h>
/* /*
Most of the logic in here came from here: Most of the logic in here came from here:
@ -18,8 +19,8 @@ struct encctx
AVStream *video_st; AVStream *video_st;
struct SwsContext *scale_ctx; struct SwsContext *scale_ctx;
AVFrame *picture; AVFrame *picture;
uint8_t *video_outbuf; uint8_t *video_outbuf;
int video_outbuf_size; int video_outbuf_size;
AVStream *audio_st; AVStream *audio_st;
@ -28,113 +29,113 @@ struct encctx
static void AVEnc_End (void *ctx); static void AVEnc_End (void *ctx);
static AVFrame *alloc_frame(enum PixelFormat pix_fmt, int width, int height) static AVFrame *alloc_frame(enum PixelFormat pix_fmt, int width, int height)
{ {
AVFrame *picture; AVFrame *picture;
uint8_t *picture_buf; uint8_t *picture_buf;
int size; int size;
picture = avcodec_alloc_frame(); picture = avcodec_alloc_frame();
if(!picture) if(!picture)
return NULL; return NULL;
size = avpicture_get_size(pix_fmt, width, height); size = avpicture_get_size(pix_fmt, width, height);
picture_buf = (uint8_t*)(av_malloc(size)); picture_buf = (uint8_t*)(av_malloc(size));
if (!picture_buf) if (!picture_buf)
{ {
av_free(picture); av_free(picture);
return NULL; return NULL;
} }
avpicture_fill((AVPicture *) picture, picture_buf, pix_fmt, width, height); avpicture_fill((AVPicture *) picture, picture_buf, pix_fmt, width, height);
picture->width = width; picture->width = width;
picture->height = height; picture->height = height;
return picture; return picture;
} }
AVStream *add_video_stream(struct encctx *ctx, AVCodec *codec, int fps, int width, int height) AVStream *add_video_stream(struct encctx *ctx, AVCodec *codec, int fps, int width, int height)
{ {
AVCodecContext *c; AVCodecContext *c;
AVStream *st; AVStream *st;
char prof[128]; char prof[128];
int bitrate = (int)Cvar_GetFloat("avplug_videobitrate"); int bitrate = (int)pCvar_GetFloat("avplug_videobitrate");
int forcewidth = (int)Cvar_GetFloat("avplug_videoforcewidth"); int forcewidth = (int)pCvar_GetFloat("avplug_videoforcewidth");
int forceheight = (int)Cvar_GetFloat("avplug_videoforceheight"); int forceheight = (int)pCvar_GetFloat("avplug_videoforceheight");
st = avformat_new_stream(ctx->fc, codec); st = avformat_new_stream(ctx->fc, codec);
if (!st) if (!st)
{ {
fprintf(stderr, "Could not alloc stream\n"); fprintf(stderr, "Could not alloc stream\n");
exit(1); exit(1);
} }
c = st->codec; c = st->codec;
c->codec_id = codec->id; c->codec_id = codec->id;
c->codec_type = codec->type; c->codec_type = codec->type;
/* put sample parameters */ /* put sample parameters */
c->bit_rate = bitrate; c->bit_rate = bitrate;
/* resolution must be a multiple of two */ /* resolution must be a multiple of two */
c->width = forcewidth?forcewidth:width; c->width = forcewidth?forcewidth:width;
c->height = forceheight?forceheight:height; c->height = forceheight?forceheight:height;
/* frames per second */ /* frames per second */
c->time_base.num = 1; c->time_base.num = 1;
c->time_base.den = fps; c->time_base.den = fps;
c->gop_size = 12; /* emit one intra frame every twelve frames at most */ c->gop_size = 12; /* emit one intra frame every twelve frames at most */
c->pix_fmt = PIX_FMT_YUV420P; c->pix_fmt = PIX_FMT_YUV420P;
if (c->codec_id == CODEC_ID_MPEG2VIDEO) if (c->codec_id == CODEC_ID_MPEG2VIDEO)
{ {
/* just for testing, we also add B frames */ /* just for testing, we also add B frames */
c->max_b_frames = 2; c->max_b_frames = 2;
} }
if (c->codec_id == CODEC_ID_MPEG1VIDEO) if (c->codec_id == CODEC_ID_MPEG1VIDEO)
{ {
/* needed to avoid using macroblocks in which some coeffs overflow /* needed to avoid using macroblocks in which some coeffs overflow
this doesnt happen with normal video, it just happens here as the this doesnt happen with normal video, it just happens here as the
motion of the chroma plane doesnt match the luma plane */ motion of the chroma plane doesnt match the luma plane */
// c->mb_decision=2; // c->mb_decision=2;
} }
// some formats want stream headers to be seperate // some formats want stream headers to be seperate
if (ctx->fc->oformat->flags & AVFMT_GLOBALHEADER) if (ctx->fc->oformat->flags & AVFMT_GLOBALHEADER)
c->flags |= CODEC_FLAG_GLOBAL_HEADER; c->flags |= CODEC_FLAG_GLOBAL_HEADER;
*prof = 0; *prof = 0;
Cvar_GetString("avplug_format", prof, sizeof(prof)); pCvar_GetString("avplug_format", prof, sizeof(prof));
// av_opt_set(c->priv_data, "profile", prof, AV_OPT_SEARCH_CHILDREN); // av_opt_set(c->priv_data, "profile", prof, AV_OPT_SEARCH_CHILDREN);
return st; return st;
} }
void close_video(struct encctx *ctx) void close_video(struct encctx *ctx)
{ {
if (!ctx->video_st) if (!ctx->video_st)
return; return;
avcodec_close(ctx->video_st->codec); avcodec_close(ctx->video_st->codec);
if (ctx->picture) if (ctx->picture)
{ {
av_free(ctx->picture->data[0]); av_free(ctx->picture->data[0]);
av_free(ctx->picture); av_free(ctx->picture);
} }
av_free(ctx->video_outbuf); av_free(ctx->video_outbuf);
} }
static void AVEnc_Video (void *vctx, void *data, int frame, int width, int height) static void AVEnc_Video (void *vctx, void *data, int frame, int width, int height)
{ {
struct encctx *ctx = vctx; struct encctx *ctx = vctx;
//weird maths to flip it. //weird maths to flip it.
uint8_t *srcslices[2] = {(uint8_t*)data + (height-1)*width*3, NULL}; const uint8_t *srcslices[2] = {(uint8_t*)data + (height-1)*width*3, NULL};
int srcstride[2] = {-width*3, 0}; int srcstride[2] = {-width*3, 0};
int success; int success;
AVPacket pkt; AVPacket pkt;
if (!ctx->video_st) if (!ctx->video_st)
return; return;
//convert RGB to whatever the codec needs (ie: yuv...). //convert RGB to whatever the codec needs (ie: yuv...).
ctx->scale_ctx = sws_getCachedContext(ctx->scale_ctx, width, height, AV_PIX_FMT_RGB24, ctx->picture->width, ctx->picture->height, ctx->video_st->codec->pix_fmt, SWS_POINT, 0, 0, 0); ctx->scale_ctx = sws_getCachedContext(ctx->scale_ctx, width, height, AV_PIX_FMT_RGB24, ctx->picture->width, ctx->picture->height, ctx->video_st->codec->pix_fmt, SWS_POINT, 0, 0, 0);
sws_scale(ctx->scale_ctx, srcslices, srcstride, 0, height, ctx->picture->data, ctx->picture->linesize); sws_scale(ctx->scale_ctx, srcslices, srcstride, 0, height, ctx->picture->data, ctx->picture->linesize);
av_init_packet(&pkt); av_init_packet(&pkt);
ctx->picture->pts = av_rescale_q(frame, ctx->video_st->codec->time_base, ctx->video_st->time_base); ctx->picture->pts = av_rescale_q(frame, ctx->video_st->codec->time_base, ctx->video_st->time_base);
success = 0; success = 0;
pkt.data = ctx->video_outbuf; pkt.data = ctx->video_outbuf;
pkt.size = ctx->video_outbuf_size; pkt.size = ctx->video_outbuf_size;
if (avcodec_encode_video2(ctx->video_st->codec, &pkt, ctx->picture, &success) == 0 && success) if (avcodec_encode_video2(ctx->video_st->codec, &pkt, ctx->picture, &success) == 0 && success)
{ {
pkt.pts = ctx->video_st->codec->coded_frame->pts; pkt.pts = ctx->video_st->codec->coded_frame->pts;
@ -144,72 +145,72 @@ static void AVEnc_Video (void *vctx, void *data, int frame, int width, int heigh
pkt.data = ctx->video_outbuf; pkt.data = ctx->video_outbuf;
// pkt.size = psize; // pkt.size = psize;
av_write_frame(ctx->fc, &pkt); av_write_frame(ctx->fc, &pkt);
} }
} }
AVStream *add_audio_stream(struct encctx *ctx, AVCodec *codec, int samplerate, int bits, int channels) AVStream *add_audio_stream(struct encctx *ctx, AVCodec *codec, int samplerate, int bits, int channels)
{ {
AVCodecContext *c; AVCodecContext *c;
AVStream *st; AVStream *st;
int bitrate = (int)Cvar_GetFloat("avplug_audiobitrate"); int bitrate = (int)pCvar_GetFloat("avplug_audiobitrate");
st = avformat_new_stream(ctx->fc, codec); st = avformat_new_stream(ctx->fc, codec);
if (!st) if (!st)
{ {
fprintf(stderr, "Could not alloc stream\n"); fprintf(stderr, "Could not alloc stream\n");
exit(1); exit(1);
} }
c = st->codec; c = st->codec;
c->codec_id = codec->id; c->codec_id = codec->id;
c->codec_type = codec->type; c->codec_type = codec->type;
/* put sample parameters */ /* put sample parameters */
c->bit_rate = bitrate; c->bit_rate = bitrate;
/* frames per second */ /* frames per second */
c->sample_fmt = ((bits==16)?AV_SAMPLE_FMT_S16:AV_SAMPLE_FMT_U8); c->sample_fmt = ((bits==16)?AV_SAMPLE_FMT_S16:AV_SAMPLE_FMT_U8);
c->sample_rate = samplerate; c->sample_rate = samplerate;
c->channels = channels; c->channels = channels;
switch(channels) switch(channels)
{ {
case 1: case 1:
c->channel_layout = AV_CH_FRONT_CENTER; c->channel_layout = AV_CH_FRONT_CENTER;
break; break;
case 2: case 2:
c->channel_layout = AV_CH_FRONT_LEFT | AV_CH_FRONT_RIGHT; c->channel_layout = AV_CH_FRONT_LEFT | AV_CH_FRONT_RIGHT;
break; break;
default: default:
break; break;
} }
// some formats want stream headers to be seperate // some formats want stream headers to be seperate
if (ctx->fc->oformat->flags & AVFMT_GLOBALHEADER) if (ctx->fc->oformat->flags & AVFMT_GLOBALHEADER)
c->flags |= CODEC_FLAG_GLOBAL_HEADER; c->flags |= CODEC_FLAG_GLOBAL_HEADER;
return st; return st;
} }
void close_audio(struct encctx *ctx) void close_audio(struct encctx *ctx)
{ {
if (!ctx->audio_st) if (!ctx->audio_st)
return; return;
avcodec_close(ctx->audio_st->codec); avcodec_close(ctx->audio_st->codec);
} }
static void AVEnc_Audio (void *vctx, void *data, int bytes) static void AVEnc_Audio (void *vctx, void *data, int bytes)
{ {
struct encctx *ctx = vctx; struct encctx *ctx = vctx;
int success; int success;
AVPacket pkt; AVPacket pkt;
ctx->audio->nb_samples = ctx->audio_st->codec->frame_size; ctx->audio->nb_samples = ctx->audio_st->codec->frame_size;
if (avcodec_fill_audio_frame(ctx->audio, ctx->audio_st->codec->channels, ctx->audio_st->codec->sample_fmt, data, bytes, 0) < 0) if (avcodec_fill_audio_frame(ctx->audio, ctx->audio_st->codec->channels, ctx->audio_st->codec->sample_fmt, data, bytes, 0) < 0)
return; return;
av_init_packet(&pkt); av_init_packet(&pkt);
pkt.data = NULL; pkt.data = NULL;
pkt.size = 0; pkt.size = 0;
success = 0; success = 0;
if (avcodec_encode_audio2(ctx->audio_st->codec, &pkt, ctx->audio, &success) == 0 && success) if (avcodec_encode_audio2(ctx->audio_st->codec, &pkt, ctx->audio, &success) == 0 && success)
{ {
pkt.pts = ctx->audio_st->codec->coded_frame->pts; pkt.pts = ctx->audio_st->codec->coded_frame->pts;
@ -219,172 +220,172 @@ static void AVEnc_Audio (void *vctx, void *data, int bytes)
// pkt.data = ctx->video_outbuf; // pkt.data = ctx->video_outbuf;
// pkt.size = psize; // pkt.size = psize;
av_write_frame(ctx->fc, &pkt); av_write_frame(ctx->fc, &pkt);
} }
} }
static void *AVEnc_Begin (char *streamname, int videorate, int width, int height, int *sndkhz, int *sndchannels, int *sndbits) static void *AVEnc_Begin (char *streamname, int videorate, int width, int height, int *sndkhz, int *sndchannels, int *sndbits)
{ {
struct encctx *ctx; struct encctx *ctx;
AVOutputFormat *fmt = NULL; AVOutputFormat *fmt = NULL;
AVCodec *videocodec = NULL; AVCodec *videocodec = NULL;
AVCodec *audiocodec = NULL; AVCodec *audiocodec = NULL;
char formatname[64]; char formatname[64];
formatname[0] = 0; formatname[0] = 0;
Cvar_GetString("avplug_format", formatname, sizeof(formatname)); pCvar_GetString("avplug_format", formatname, sizeof(formatname));
if (*formatname) if (*formatname)
{ {
fmt = av_guess_format(formatname, NULL, NULL); fmt = av_guess_format(formatname, NULL, NULL);
if (!fmt) if (!fmt)
{ {
Con_Printf("Unknown format specified.\n"); Con_Printf("Unknown format specified.\n");
return NULL; return NULL;
} }
} }
if (!fmt) if (!fmt)
fmt = av_guess_format(NULL, streamname, NULL); fmt = av_guess_format(NULL, streamname, NULL);
if (!fmt) if (!fmt)
{ {
Con_DPrintf("Could not deduce output format from file extension: using MPEG.\n"); Con_DPrintf("Could not deduce output format from file extension: using MPEG.\n");
fmt = av_guess_format("mpeg", NULL, NULL); fmt = av_guess_format("mpeg", NULL, NULL);
} }
if (!fmt) if (!fmt)
{ {
Con_Printf("Format not known\n"); Con_Printf("Format not known\n");
return NULL; return NULL;
} }
if (videorate) if (videorate)
{ {
char codecname[64]; char codecname[64];
codecname[0] = 0; codecname[0] = 0;
Cvar_GetString("avplug_videocodec", codecname, sizeof(codecname)); pCvar_GetString("avplug_videocodec", codecname, sizeof(codecname));
if (strcmp(codecname, "none")) if (strcmp(codecname, "none"))
{ {
if (codecname[0]) if (codecname[0])
{ {
videocodec = avcodec_find_encoder_by_name(codecname); videocodec = avcodec_find_encoder_by_name(codecname);
if (!videocodec) if (!videocodec)
{ {
Con_Printf("Unsupported avplug_codec \"%s\"\n", codecname); Con_Printf("Unsupported avplug_codec \"%s\"\n", codecname);
return NULL; return NULL;
} }
} }
if (!videocodec && fmt->video_codec != AV_CODEC_ID_NONE) if (!videocodec && fmt->video_codec != AV_CODEC_ID_NONE)
videocodec = avcodec_find_encoder(fmt->video_codec); videocodec = avcodec_find_encoder(fmt->video_codec);
} }
} }
if (*sndkhz) if (*sndkhz)
{ {
char codecname[64]; char codecname[64];
codecname[0] = 0; codecname[0] = 0;
Cvar_GetString("avplug_audiocodec", codecname, sizeof(codecname)); pCvar_GetString("avplug_audiocodec", codecname, sizeof(codecname));
if (strcmp(codecname, "none")) if (strcmp(codecname, "none"))
{ {
if (codecname[0]) if (codecname[0])
{ {
audiocodec = avcodec_find_encoder_by_name(codecname); audiocodec = avcodec_find_encoder_by_name(codecname);
if (!audiocodec) if (!audiocodec)
{ {
Con_Printf("Unsupported avplug_codec \"%s\"\n", codecname); Con_Printf("Unsupported avplug_codec \"%s\"\n", codecname);
return NULL; return NULL;
} }
} }
if (!audiocodec && fmt->audio_codec != AV_CODEC_ID_NONE) if (!audiocodec && fmt->audio_codec != AV_CODEC_ID_NONE)
audiocodec = avcodec_find_encoder(fmt->audio_codec); audiocodec = avcodec_find_encoder(fmt->audio_codec);
} }
} }
Con_DPrintf("Using format \"%s\"\n", fmt->name); Con_DPrintf("Using format \"%s\"\n", fmt->name);
if (videocodec) if (videocodec)
Con_DPrintf("Using Video Codec \"%s\"\n", videocodec->name); Con_DPrintf("Using Video Codec \"%s\"\n", videocodec->name);
else else
Con_DPrintf("Not encoding video\n"); Con_DPrintf("Not encoding video\n");
if (audiocodec) if (audiocodec)
Con_DPrintf("Using Audio Codec \"%s\"\n", audiocodec->name); Con_DPrintf("Using Audio Codec \"%s\"\n", audiocodec->name);
else else
Con_DPrintf("Not encoding audio\n"); Con_DPrintf("Not encoding audio\n");
if (!videocodec && !audiocodec) if (!videocodec && !audiocodec)
{ {
Con_DPrintf("Nothing to encode!\n"); Con_DPrintf("Nothing to encode!\n");
return NULL; return NULL;
} }
if (!audiocodec) if (!audiocodec)
*sndkhz = 0; *sndkhz = 0;
ctx = malloc(sizeof(*ctx)); ctx = malloc(sizeof(*ctx));
if (!ctx) if (!ctx)
return NULL; return NULL;
memset(ctx, 0, sizeof(*ctx)); memset(ctx, 0, sizeof(*ctx));
ctx->fc = avformat_alloc_context(); ctx->fc = avformat_alloc_context();
ctx->fc->oformat = fmt; ctx->fc->oformat = fmt;
snprintf(ctx->fc->filename, sizeof(ctx->fc->filename), "%s", streamname); snprintf(ctx->fc->filename, sizeof(ctx->fc->filename), "%s", streamname);
//pick default codecs //pick default codecs
ctx->video_st = NULL; ctx->video_st = NULL;
if (videocodec) if (videocodec)
{ {
ctx->video_st = add_video_stream(ctx, videocodec, videorate, width, height); ctx->video_st = add_video_stream(ctx, videocodec, videorate, width, height);
if (ctx->video_st) if (ctx->video_st)
{ {
AVCodecContext *c = ctx->video_st->codec; AVCodecContext *c = ctx->video_st->codec;
if (avcodec_open2(c, videocodec, NULL) < 0) if (avcodec_open2(c, videocodec, NULL) < 0)
{ {
Con_Printf("Could not init codec instance \"%s\". Maybe try a different framerate/resolution/bitrate\n", videocodec->name); Con_Printf("Could not init codec instance \"%s\". Maybe try a different framerate/resolution/bitrate\n", videocodec->name);
AVEnc_End(ctx); AVEnc_End(ctx);
return NULL; return NULL;
} }
ctx->picture = alloc_frame(c->pix_fmt, c->width, c->height); ctx->picture = alloc_frame(c->pix_fmt, c->width, c->height);
ctx->video_outbuf_size = 200000; ctx->video_outbuf_size = 200000;
ctx->video_outbuf = av_malloc(ctx->video_outbuf_size); ctx->video_outbuf = av_malloc(ctx->video_outbuf_size);
if (!ctx->video_outbuf) if (!ctx->video_outbuf)
ctx->video_outbuf_size = 0; ctx->video_outbuf_size = 0;
} }
} }
if (audiocodec) if (audiocodec)
{ {
ctx->audio_st = add_audio_stream(ctx, audiocodec, *sndkhz, *sndbits, *sndchannels); ctx->audio_st = add_audio_stream(ctx, audiocodec, *sndkhz, *sndbits, *sndchannels);
if (ctx->audio_st) if (ctx->audio_st)
{ {
AVCodecContext *c = ctx->audio_st->codec; AVCodecContext *c = ctx->audio_st->codec;
if (avcodec_open2(c, audiocodec, NULL) < 0) if (avcodec_open2(c, audiocodec, NULL) < 0)
{ {
Con_Printf("Could not init codec instance \"%s\".\n", audiocodec->name); Con_Printf("Could not init codec instance \"%s\".\n", audiocodec->name);
AVEnc_End(ctx); AVEnc_End(ctx);
return NULL; return NULL;
} }
ctx->audio = avcodec_alloc_frame(); ctx->audio = avcodec_alloc_frame();
} }
} }
av_dump_format(ctx->fc, 0, streamname, 1); av_dump_format(ctx->fc, 0, streamname, 1);
if (!(fmt->flags & AVFMT_NOFILE)) if (!(fmt->flags & AVFMT_NOFILE))
{ {
if (avio_open(&ctx->fc->pb, streamname, AVIO_FLAG_WRITE) < 0) if (avio_open(&ctx->fc->pb, streamname, AVIO_FLAG_WRITE) < 0)
{ {
Con_Printf("Could not open '%s'\n", streamname); Con_Printf("Could not open '%s'\n", streamname);
AVEnc_End(ctx); AVEnc_End(ctx);
return NULL; return NULL;
} }
} }
//nearly complete, can make the file dirty now. //nearly complete, can make the file dirty now.
avformat_write_header(ctx->fc, NULL); avformat_write_header(ctx->fc, NULL);
ctx->doneheaders = true; ctx->doneheaders = true;
return ctx; return ctx;
} }
static void AVEnc_End (void *vctx) static void AVEnc_End (void *vctx)
{ {
struct encctx *ctx = vctx; struct encctx *ctx = vctx;
@ -396,40 +397,39 @@ static void AVEnc_End (void *vctx)
if (ctx->doneheaders) if (ctx->doneheaders)
av_write_trailer(ctx->fc); av_write_trailer(ctx->fc);
for(i = 0; i < ctx->fc->nb_streams; i++) for(i = 0; i < ctx->fc->nb_streams; i++)
av_freep(&ctx->fc->streams[i]); av_freep(&ctx->fc->streams[i]);
// if (!(fmt->flags & AVFMT_NOFILE)) // if (!(fmt->flags & AVFMT_NOFILE))
avio_close(ctx->fc->pb); avio_close(ctx->fc->pb);
av_free(ctx->fc); av_free(ctx->fc);
free(ctx); free(ctx);
} }
static media_encoder_funcs_t encoderfuncs = static media_encoder_funcs_t encoderfuncs =
{ {
AVEnc_Begin, AVEnc_Begin,
AVEnc_Video, AVEnc_Video,
AVEnc_Audio, AVEnc_Audio,
AVEnc_End AVEnc_End
}; };
qboolean AVEnc_Init(void)
{
pCvar_Register("avplug_format", "", 0, "avplug");
pCvar_Register("avplug_videocodec", "", 0, "avplug");
pCvar_Register("avplug_videocodecprofile", "", 0, "avplug");
pCvar_Register("avplug_videobitrate", "4000000", 0, "avplug");
pCvar_Register("avplug_videoforcewidth", "", 0, "avplug");
pCvar_Register("avplug_videoforceheight", "", 0, "avplug");
pCvar_Register("avplug_audiocodec", "", 0, "avplug");
pCvar_Register("avplug_audiobitrate", "64000", 0, "avplug");
if (!pPlug_ExportNative("Media_VideoEncoder", &encoderfuncs))
{
Con_Printf("avplug: Engine doesn't support media encoder plugins\n");
return false;
}
return true;
}
qboolean AVEnc_Init(void)
{
Cvar_Register("avplug_format", "", 0, "avplug");
Cvar_Register("avplug_videocodec", "", 0, "avplug");
Cvar_Register("avplug_videocodecprofile", "", 0, "avplug");
Cvar_Register("avplug_videobitrate", "4000000", 0, "avplug");
Cvar_Register("avplug_videoforcewidth", "", 0, "avplug");
Cvar_Register("avplug_videoforceheight", "", 0, "avplug");
Cvar_Register("avplug_audiocodec", "", 0, "avplug");
Cvar_Register("avplug_audiobitrate", "64000", 0, "avplug");
if (!Plug_ExportNative("Media_VideoEncoder", &encoderfuncs))
{
Con_Printf("avplug: Engine doesn't support media encoder plugins\n");
return false;
}
return true;
}

View file

@ -1,3 +1,4 @@
#ifndef FTEPLUGIN
typedef enum uploadfmt_e typedef enum uploadfmt_e
{ {
TF_INVALID, TF_INVALID,
@ -10,7 +11,7 @@ typedef enum uploadfmt_e
typedef struct typedef struct
{ {
void *(QDECL *createdecoder)(char *name); //needed void *(QDECL *createdecoder)(char *name); //needed
void *(QDECL *decodeframe)(void *ctx, qboolean nosound, enum uploadfmt_e *fmt, int *width, int *height); //needed void *(QDECL *decodeframe)(void *ctx, qboolean nosound, uploadfmt_t *fmt, int *width, int *height); //needed
void (QDECL *doneframe)(void *ctx, void *img); //basically a free() void (QDECL *doneframe)(void *ctx, void *img); //basically a free()
void (QDECL *shutdown)(void *ctx); //probably needed... void (QDECL *shutdown)(void *ctx); //probably needed...
void (QDECL *rewind)(void *ctx); void (QDECL *rewind)(void *ctx);
@ -30,3 +31,5 @@ typedef struct
void (QDECL *capture_audio) (void *ctx, void *data, int bytes); void (QDECL *capture_audio) (void *ctx, void *data, int bytes);
void (QDECL *capture_end) (void *ctx); void (QDECL *capture_end) (void *ctx);
} media_encoder_funcs_t; } media_encoder_funcs_t;
#endif

View file

@ -129,7 +129,10 @@ void strlcpy(char *d, const char *s, int n);
#endif #endif
#ifndef FTEPLUGIN #ifdef FTEPLUGIN
#define qfalse false
#define qtrue true
#else
#ifdef __cplusplus #ifdef __cplusplus
typedef enum {qfalse, qtrue} qboolean; typedef enum {qfalse, qtrue} qboolean;
#else #else
@ -140,7 +143,7 @@ typedef enum {qfalse, qtrue} qboolean;
typedef float vec3_t[3]; typedef float vec3_t[3];
typedef unsigned char qbyte; typedef unsigned char qbyte;
#endif #endif
typedef void *qhandle_t; typedef int qhandle_t;
typedef void* funcptr_t; typedef void* funcptr_t;