my attempt at linux-friendly plugin code. cross compiling for win32 should be okay now, just need to add the setting to the buildbot.
git-svn-id: https://svn.code.sf.net/p/fteqw/code/trunk@4335 fc73d0e0-1445-4013-8a0c-d673dee63da5
This commit is contained in:
parent
11c7f5965a
commit
78185c2721
5 changed files with 839 additions and 747 deletions
|
@ -1,9 +1,53 @@
|
|||
|
||||
#windows is special as always, but we don't support itanium, and microsoft don't support anything else (not even arm with the nt win32 api)
|
||||
ifeq ($(FTE_TARGET),win32)
|
||||
PLUG_NATIVE_EXT=x86.dll
|
||||
PLUG_LDFLAGS=-Lavplug/lib32 -L../engine/libs/mingw-libs -lzlib
|
||||
endif
|
||||
ifeq ($(FTE_TARGET),win64)
|
||||
PLUG_NATIVE_EXT=amd.dll
|
||||
PLUG_LDFLAGS=-Lavplug/lib64 -L../engine/libs/mingw64-libs -lz -Wl,--support-old-code
|
||||
endif
|
||||
|
||||
PLUG_LDFLAGS?=-L/usr/local/lib -Wl,-R/usr/local/lib -lz
|
||||
|
||||
ifneq ($(PLUG_NATIVE_EXT),)
|
||||
#if we're on windows, we'll put our windows-specific hacks here.
|
||||
PLUG_DEFFILE=plugin.def
|
||||
PLUG_CFLAGS=
|
||||
|
||||
$(OUT_DIR)/fteplug_avplug$(PLUG_NATIVE_EXT): avplug/libavformat/avformat.h
|
||||
endif
|
||||
|
||||
#if they're not on windows, we'll try asking the compiler directly
|
||||
#the check to see if its already set is to avoid asking msvc, which would probably break things.
|
||||
ifeq ($(PLUG_NATIVE_EXT),)
|
||||
ifneq ($(shell echo|$(CC) -E -dM -|grep __amd64__),)
|
||||
PLUG_NATIVE_EXT=amd.so
|
||||
endif
|
||||
ifneq ($(shell echo|$(CC) -E -dM -|grep __i386__),)
|
||||
PLUG_NATIVE_EXT=x86.so
|
||||
endif
|
||||
ifneq ($(shell echo|$(CC) -E -dM -|grep __arm__),)
|
||||
PLUG_NATIVE_EXT=arm.so
|
||||
endif
|
||||
ifneq ($(shell echo|$(CC) -E -dM -|grep __ppc__),)
|
||||
PLUG_NATIVE_EXT=ppc.so
|
||||
endif
|
||||
endif
|
||||
|
||||
#fallback
|
||||
PLUG_NATIVE_EXT?=unk.so
|
||||
|
||||
PLUG_DEFFILE?=
|
||||
PLUG_CFLAGS?=-fPIC -Wl,--no-undefined
|
||||
PLUG_LDFLAGS?=
|
||||
|
||||
all: ezscript hud irc
|
||||
|
||||
clean: ezscript-clean hud-clean irc-clean
|
||||
|
||||
.PHONY: all ezscript hud irc
|
||||
.PHONY: all ezscript hud irc native distclean clean
|
||||
|
||||
help:
|
||||
@-echo make a subdirectory
|
||||
|
@ -25,8 +69,38 @@ irc:
|
|||
|
||||
irc-clean:
|
||||
$(MAKE) clean -C irc
|
||||
|
||||
native:
|
||||
@echo outdir = $(OUT_DIR)
|
||||
$(CC) -o $(OUT_DIR)/fteplug_avplugx86.dll -shared -Iavplug -Iavplug/libavformat -Iavplug/libavcodec -Iavplug/libavutil -Iavplug/libswscale -Iavplug/msvc_lib avplug/avencode.c avplug/avdecode.c plugin.def plugin.c -Lavplug/lib32 -lavcodec -lavformat -lavutil -lswscale -lwinmm
|
||||
$(CC) $(BASE_CFLAGS) -DFTEPLUGIN -o $(OUT_DIR)/fteplug_mpqx86.dll -shared -Impq mpq/fs_mpq.c mpq/blast.c plugin.def plugin.c qvm_api.c -Lavplug/lib32 -L../engine/libs/mingw-libs -lzlib
|
||||
|
||||
#small script to download+install avformat for windows cross compiles.
|
||||
#linux users are expected to have the library installed locally already. If your version is too old or missing, run the following command to install it (to /usr/local), then delete the gz and directory.
|
||||
#wget http://ffmpeg.org/releases/ffmpeg-1.2.tar.gz && cd tar xvfz ffmpeg-1.2.tar.gz && cd ffmpeg-1.2/ && ./configure --disable-yasm --enable-shared && make && sudo make install
|
||||
#we use ffmpeg's version for some reason, as opposed to libav. not sure what the differences are meant to be, but libav seemed to have non-depricated functions defined, docs that say to use them, and these functions missing.
|
||||
AV7Z_VER=ffmpeg-1.2
|
||||
AV7Z_W32=$(AV7Z_VER)-win32-dev.7z
|
||||
AV7Z_URL32=http://ffmpeg.zeranoe.com/builds/win32/dev/$(AV7Z_W32)
|
||||
AV7Z_PRE32=$(AV7Z_VER)-win32-dev/
|
||||
AV7Z_W64=$(AV7Z_VER)-win64-dev.7z
|
||||
AV7Z_URL64=http://ffmpeg.zeranoe.com/builds/win64/dev/$(AV7Z_W64)
|
||||
AV7Z_PRE64=$(AV7Z_VER)-win64-dev/
|
||||
avplug/libavformat/avformat.h:
|
||||
wget $(AV7Z_URL32)
|
||||
mkdir -p avplug/libavformat && cd avplug/libavformat && 7z e -y ../../$(AV7Z_W32) $(AV7Z_PRE32)include/libavformat/ && cd -
|
||||
mkdir -p avplug/libavcodec && cd avplug/libavcodec && 7z e -y ../../$(AV7Z_W32) $(AV7Z_PRE32)include/libavcodec/ && cd -
|
||||
mkdir -p avplug/libavutil && cd avplug/libavutil && 7z e -y ../../$(AV7Z_W32) $(AV7Z_PRE32)include/libavutil/ && cd -
|
||||
mkdir -p avplug/libswscale && cd avplug/libswscale && 7z e -y ../../$(AV7Z_W32) $(AV7Z_PRE32)include/libswscale/ && cd -
|
||||
mkdir -p avplug/lib32 && cd avplug/lib32 && 7z e -y ../../$(AV7Z_W32) $(AV7Z_PRE32)lib/avformat.lib $(AV7Z_PRE32)lib/avcodec.lib $(AV7Z_PRE32)lib/avutil.lib $(AV7Z_PRE32)lib/swscale.lib && cd -
|
||||
rm $(AV7Z_W32)
|
||||
wget $(AV7Z_URL64)
|
||||
mkdir -p avplug/lib64 && cd avplug/lib64 && 7z e -y ../../$(AV7Z_W64) $(AV7Z_PRE64)lib/avformat.lib $(AV7Z_PRE64)lib/avcodec.lib $(AV7Z_PRE64)lib/avutil.lib $(AV7Z_PRE64)lib/swscale.lib && cd -
|
||||
rm $(AV7Z_W64)
|
||||
distclean:
|
||||
rm avplug/libavformat/avformat.h
|
||||
|
||||
$(OUT_DIR)/fteplug_avplug$(PLUG_NATIVE_EXT): avplug/avencode.c avplug/avdecode.c plugin.c
|
||||
$(CC) $(BASE_CFLAGS) -DFTEPLUGIN -s -o $(OUT_DIR)/fteplug_avplug$(PLUG_NATIVE_EXT) -shared $(PLUG_CFLAGS) -Iavplug/msvc_lib $^ $(PLUG_DEFFILE) $(PLUG_LDFLAGS) -lavcodec -lavformat -lavutil -lswscale
|
||||
native: $(OUT_DIR)/fteplug_avplug$(PLUG_NATIVE_EXT)
|
||||
|
||||
$(OUT_DIR)/fteplug_mpq$(PLUG_NATIVE_EXT): mpq/fs_mpq.c mpq/blast.c plugin.c qvm_api.c
|
||||
$(CC) $(BASE_CFLAGS) -DFTEPLUGIN -o $(OUT_DIR)/fteplug_mpq$(PLUG_NATIVE_EXT) -shared $(PLUG_CFLAGS) -Impq $^ $(PLUG_DEFFILE) $(PLUG_LDFLAGS)
|
||||
native: $(OUT_DIR)/fteplug_mpq$(PLUG_NATIVE_EXT)
|
||||
|
||||
native:
|
||||
|
|
|
@ -1,83 +1,95 @@
|
|||
#include "../plugin.h"
|
||||
#include "../engine.h"
|
||||
|
||||
#include <avcodec.h>
|
||||
#include <avformat.h>
|
||||
#include <swscale.h>
|
||||
#include <windows.h>
|
||||
|
||||
#define ARGNAMES ,sourceid, data, speed, samples, channels, width
|
||||
BUILTIN(void, S_RawAudio, (int sourceid, void *data, int speed, int samples, int channels, int width));
|
||||
#undef ARGNAMES
|
||||
|
||||
/*should probably try threading this*/
|
||||
/*timing is based upon the start time. this means overflow issues with rtsp etc*/
|
||||
|
||||
struct decctx
|
||||
{
|
||||
unsigned int width, height;
|
||||
|
||||
qhandle_t file;
|
||||
int64_t fileofs;
|
||||
int64_t filelen;
|
||||
AVFormatContext *pFormatCtx;
|
||||
|
||||
int audioStream;
|
||||
AVCodecContext *pACodecCtx;
|
||||
AVFrame *pAFrame;
|
||||
|
||||
int videoStream;
|
||||
AVCodecContext *pVCodecCtx;
|
||||
AVFrame *pVFrame;
|
||||
int64_t num, denum;
|
||||
|
||||
AVPicture pFrameRGB;
|
||||
struct SwsContext *pScaleCtx;
|
||||
|
||||
unsigned int starttime;
|
||||
unsigned int lastframe;
|
||||
};
|
||||
|
||||
static qboolean AVDec_SetSize (void *vctx, int width, int height)
|
||||
{
|
||||
struct decctx *ctx = (struct decctx*)vctx;
|
||||
AVPicture newscaled;
|
||||
|
||||
//colourspace conversions will be fastest if we
|
||||
// if (width > ctx->pCodecCtx->width)
|
||||
width = ctx->pVCodecCtx->width;
|
||||
// if (height > ctx->pCodecCtx->height)
|
||||
height = ctx->pVCodecCtx->height;
|
||||
|
||||
//is this a no-op?
|
||||
if (width == ctx->width && height == ctx->height && ctx->pScaleCtx)
|
||||
return true;
|
||||
|
||||
if (avpicture_alloc(&newscaled, AV_PIX_FMT_BGRA, width, height) >= 0)
|
||||
{
|
||||
//update the scale context as required
|
||||
//clear the old stuff out
|
||||
avpicture_free(&ctx->pFrameRGB);
|
||||
|
||||
ctx->width = width;
|
||||
ctx->height = height;
|
||||
ctx->pFrameRGB = newscaled;
|
||||
return qtrue;
|
||||
}
|
||||
return qfalse; //unsupported
|
||||
}
|
||||
|
||||
#include "../plugin.h"
|
||||
#include "../engine.h"
|
||||
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libswscale/swscale.h>
|
||||
|
||||
//between av 52.31 and 54.35, lots of constants etc got renamed to gain an extra AV_ prefix.
|
||||
/*
|
||||
#define AV_PIX_FMT_BGRA PIX_FMT_BGRA
|
||||
#define AVMEDIA_TYPE_AUDIO CODEC_TYPE_AUDIO
|
||||
#define AVMEDIA_TYPE_VIDEO CODEC_TYPE_VIDEO
|
||||
#define AV_PIX_FMT_BGRA PIX_FMT_BGRA
|
||||
#define AV_SAMPLE_FMT_U8 SAMPLE_FMT_U8
|
||||
#define AV_SAMPLE_FMT_S16 SAMPLE_FMT_S16
|
||||
#define AV_SAMPLE_FMT_FLT SAMPLE_FMT_FLT
|
||||
#define AVIOContext ByteIOContext
|
||||
#define avio_alloc_context av_alloc_put_byte
|
||||
*/
|
||||
|
||||
#define ARGNAMES ,sourceid, data, speed, samples, channels, width
|
||||
BUILTIN(void, S_RawAudio, (int sourceid, void *data, int speed, int samples, int channels, int width));
|
||||
#undef ARGNAMES
|
||||
|
||||
/*should probably try threading this*/
|
||||
/*timing is based upon the start time. this means overflow issues with rtsp etc*/
|
||||
|
||||
struct decctx
|
||||
{
|
||||
unsigned int width, height;
|
||||
|
||||
qhandle_t file;
|
||||
int64_t fileofs;
|
||||
int64_t filelen;
|
||||
AVFormatContext *pFormatCtx;
|
||||
|
||||
int audioStream;
|
||||
AVCodecContext *pACodecCtx;
|
||||
AVFrame *pAFrame;
|
||||
|
||||
int videoStream;
|
||||
AVCodecContext *pVCodecCtx;
|
||||
AVFrame *pVFrame;
|
||||
int64_t num, denum;
|
||||
|
||||
AVPicture pFrameRGB;
|
||||
struct SwsContext *pScaleCtx;
|
||||
|
||||
unsigned int starttime;
|
||||
unsigned int lastframe;
|
||||
};
|
||||
|
||||
static qboolean AVDec_SetSize (void *vctx, int width, int height)
|
||||
{
|
||||
struct decctx *ctx = (struct decctx*)vctx;
|
||||
AVPicture newscaled;
|
||||
|
||||
//colourspace conversions will be fastest if we
|
||||
// if (width > ctx->pCodecCtx->width)
|
||||
width = ctx->pVCodecCtx->width;
|
||||
// if (height > ctx->pCodecCtx->height)
|
||||
height = ctx->pVCodecCtx->height;
|
||||
|
||||
//is this a no-op?
|
||||
if (width == ctx->width && height == ctx->height && ctx->pScaleCtx)
|
||||
return true;
|
||||
|
||||
if (avpicture_alloc(&newscaled, AV_PIX_FMT_BGRA, width, height) >= 0)
|
||||
{
|
||||
//update the scale context as required
|
||||
//clear the old stuff out
|
||||
avpicture_free(&ctx->pFrameRGB);
|
||||
|
||||
ctx->width = width;
|
||||
ctx->height = height;
|
||||
ctx->pFrameRGB = newscaled;
|
||||
return qtrue;
|
||||
}
|
||||
return qfalse; //unsupported
|
||||
}
|
||||
|
||||
static int AVIO_Read(void *opaque, uint8_t *buf, int buf_size)
|
||||
{
|
||||
struct decctx *ctx = opaque;
|
||||
int ammount;
|
||||
ammount = FS_Read(ctx->file, buf, buf_size);
|
||||
ammount = pFS_Read(ctx->file, buf, buf_size);
|
||||
if (ammount > 0)
|
||||
ctx->fileofs += ammount;
|
||||
return ammount;
|
||||
}
|
||||
static int64_t AVIO_Seek(void *opaque, int64_t offset, int whence)
|
||||
{
|
||||
static int64_t AVIO_Seek(void *opaque, int64_t offset, int whence)
|
||||
{
|
||||
struct decctx *ctx = opaque;
|
||||
int64_t ret = ctx->fileofs;
|
||||
switch(whence)
|
||||
|
@ -95,214 +107,214 @@ static int64_t AVIO_Seek(void *opaque, int64_t offset, int whence)
|
|||
case AVSEEK_SIZE:
|
||||
return ctx->filelen;
|
||||
}
|
||||
FS_Seek(ctx->file, ctx->fileofs & 0xffffffff, ctx->fileofs>>32);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void AVDec_Destroy(void *vctx)
|
||||
{
|
||||
struct decctx *ctx = (struct decctx*)vctx;
|
||||
|
||||
// Free the video stuff
|
||||
avpicture_free(&ctx->pFrameRGB);
|
||||
av_free(ctx->pVFrame);
|
||||
avcodec_close(ctx->pVCodecCtx);
|
||||
|
||||
// Free the audio decoder
|
||||
av_free(ctx->pAFrame);
|
||||
avcodec_close(ctx->pACodecCtx);
|
||||
|
||||
// Close the video file
|
||||
av_close_input_file(ctx->pFormatCtx);
|
||||
|
||||
if (ctx->file >= 0)
|
||||
FS_Close(ctx->file);
|
||||
|
||||
free(ctx);
|
||||
}
|
||||
|
||||
static void *AVDec_Create(char *medianame)
|
||||
{
|
||||
struct decctx *ctx;
|
||||
|
||||
unsigned int i;
|
||||
AVCodec *pCodec;
|
||||
qboolean useioctx = false;
|
||||
|
||||
/*only respond to av: media prefixes*/
|
||||
if (!strncmp(medianame, "av:", 3))
|
||||
{
|
||||
medianame = medianame + 3;
|
||||
useioctx = true;
|
||||
}
|
||||
else if (!strncmp(medianame, "avs:", 4))
|
||||
{
|
||||
medianame = medianame + 4;
|
||||
//let avformat do its own avio context stuff
|
||||
}
|
||||
else
|
||||
return NULL;
|
||||
|
||||
ctx = malloc(sizeof(*ctx));
|
||||
memset(ctx, 0, sizeof(*ctx));
|
||||
|
||||
//so we always decode the first frame instantly.
|
||||
|
||||
ctx->starttime = timeGetTime();
|
||||
|
||||
ctx->file = -1;
|
||||
if (useioctx)
|
||||
{
|
||||
// Create internal Buffer for FFmpeg:
|
||||
const int iBufSize = 32 * 1024;
|
||||
BYTE *pBuffer = malloc(iBufSize);
|
||||
AVIOContext *ioctx;
|
||||
|
||||
ctx->filelen = FS_Open(medianame, &ctx->file, 1);
|
||||
if (ctx->filelen < 0)
|
||||
{
|
||||
Con_Printf("Unable to open %s\n", medianame);
|
||||
free(ctx);
|
||||
free(pBuffer);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
ioctx = avio_alloc_context(pBuffer, iBufSize, 0, ctx, AVIO_Read, 0, AVIO_Seek);
|
||||
ctx->pFormatCtx = avformat_alloc_context();
|
||||
|
||||
ctx->pFormatCtx->pb = ioctx;
|
||||
}
|
||||
|
||||
// Open video file
|
||||
if(avformat_open_input(&ctx->pFormatCtx, medianame, NULL, NULL)==0)
|
||||
{
|
||||
// Retrieve stream information
|
||||
if(av_find_stream_info(ctx->pFormatCtx)>=0)
|
||||
{
|
||||
ctx->audioStream=-1;
|
||||
for(i=0; i<ctx->pFormatCtx->nb_streams; i++)
|
||||
if(ctx->pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_AUDIO)
|
||||
{
|
||||
ctx->audioStream=i;
|
||||
break;
|
||||
}
|
||||
if(ctx->audioStream!=-1)
|
||||
{
|
||||
ctx->pACodecCtx=ctx->pFormatCtx->streams[ctx->audioStream]->codec;
|
||||
pCodec=avcodec_find_decoder(ctx->pACodecCtx->codec_id);
|
||||
|
||||
ctx->pAFrame=avcodec_alloc_frame();
|
||||
if(pCodec!=NULL && ctx->pAFrame && avcodec_open(ctx->pACodecCtx, pCodec) >= 0)
|
||||
{
|
||||
|
||||
}
|
||||
else
|
||||
ctx->audioStream = -1;
|
||||
}
|
||||
|
||||
ctx->videoStream=-1;
|
||||
for(i=0; i<ctx->pFormatCtx->nb_streams; i++)
|
||||
if(ctx->pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO)
|
||||
{
|
||||
ctx->videoStream=i;
|
||||
break;
|
||||
}
|
||||
if(ctx->videoStream!=-1)
|
||||
{
|
||||
// Get a pointer to the codec context for the video stream
|
||||
ctx->pVCodecCtx=ctx->pFormatCtx->streams[ctx->videoStream]->codec;
|
||||
ctx->num = ctx->pFormatCtx->streams[ctx->videoStream]->time_base.num;
|
||||
ctx->denum = ctx->pFormatCtx->streams[ctx->videoStream]->time_base.den;
|
||||
|
||||
// Find the decoder for the video stream
|
||||
pCodec=avcodec_find_decoder(ctx->pVCodecCtx->codec_id);
|
||||
|
||||
// Open codec
|
||||
if(pCodec!=NULL && avcodec_open(ctx->pVCodecCtx, pCodec) >= 0)
|
||||
{
|
||||
// Allocate video frame
|
||||
ctx->pVFrame=avcodec_alloc_frame();
|
||||
if(ctx->pVFrame!=NULL)
|
||||
{
|
||||
if (AVDec_SetSize(ctx, ctx->pVCodecCtx->width, ctx->pVCodecCtx->height))
|
||||
{
|
||||
return ctx;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
AVDec_Destroy(ctx);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void *AVDec_DisplayFrame(void *vctx, qboolean nosound, enum uploadfmt_e *fmt, int *width, int *height)
|
||||
{
|
||||
struct decctx *ctx = (struct decctx*)vctx;
|
||||
AVPacket packet;
|
||||
int frameFinished;
|
||||
qboolean repainted = false;
|
||||
int64_t curtime, lasttime;
|
||||
|
||||
curtime = ((timeGetTime() - ctx->starttime) * ctx->denum);
|
||||
curtime /= (ctx->num * 1000);
|
||||
|
||||
*fmt = TF_BGRA32;
|
||||
while (1)
|
||||
{
|
||||
lasttime = av_frame_get_best_effort_timestamp(ctx->pVFrame);
|
||||
|
||||
if (lasttime > curtime)
|
||||
break;
|
||||
|
||||
// We're ahead of the previous frame. try and read the next.
|
||||
if (av_read_frame(ctx->pFormatCtx, &packet) < 0)
|
||||
{
|
||||
*fmt = TF_INVALID;
|
||||
break;
|
||||
}
|
||||
|
||||
// Is this a packet from the video stream?
|
||||
if(packet.stream_index==ctx->videoStream)
|
||||
{
|
||||
// Decode video frame
|
||||
avcodec_decode_video2(ctx->pVCodecCtx, ctx->pVFrame, &frameFinished, &packet);
|
||||
|
||||
// Did we get a video frame?
|
||||
if(frameFinished)
|
||||
{
|
||||
ctx->pScaleCtx = sws_getCachedContext(ctx->pScaleCtx, ctx->pVCodecCtx->width, ctx->pVCodecCtx->height, ctx->pVCodecCtx->pix_fmt, ctx->width, ctx->height, AV_PIX_FMT_BGRA, SWS_POINT, 0, 0, 0);
|
||||
|
||||
// Convert the image from its native format to RGB
|
||||
sws_scale(ctx->pScaleCtx, ctx->pVFrame->data, ctx->pVFrame->linesize, 0, ctx->pVCodecCtx->height, ctx->pFrameRGB.data, ctx->pFrameRGB.linesize);
|
||||
|
||||
repainted = true;
|
||||
}
|
||||
}
|
||||
else if(packet.stream_index==ctx->audioStream && !nosound)
|
||||
{
|
||||
int okay;
|
||||
int len;
|
||||
void *odata = packet.data;
|
||||
while (packet.size > 0)
|
||||
{
|
||||
okay = false;
|
||||
len = avcodec_decode_audio4(ctx->pACodecCtx, ctx->pAFrame, &okay, &packet);
|
||||
if (len < 0)
|
||||
break;
|
||||
packet.size -= len;
|
||||
packet.data += len;
|
||||
if (okay)
|
||||
{
|
||||
int width = 2;
|
||||
unsigned int auddatasize = av_samples_get_buffer_size(NULL, ctx->pACodecCtx->channels, ctx->pAFrame->nb_samples, ctx->pACodecCtx->sample_fmt, 1);
|
||||
void *auddata = ctx->pAFrame->data[0];
|
||||
switch(ctx->pACodecCtx->sample_fmt)
|
||||
{
|
||||
pFS_Seek(ctx->file, ctx->fileofs & 0xffffffff, ctx->fileofs>>32);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void AVDec_Destroy(void *vctx)
|
||||
{
|
||||
struct decctx *ctx = (struct decctx*)vctx;
|
||||
|
||||
// Free the video stuff
|
||||
avpicture_free(&ctx->pFrameRGB);
|
||||
av_free(ctx->pVFrame);
|
||||
avcodec_close(ctx->pVCodecCtx);
|
||||
|
||||
// Free the audio decoder
|
||||
av_free(ctx->pAFrame);
|
||||
avcodec_close(ctx->pACodecCtx);
|
||||
|
||||
// Close the video file
|
||||
avformat_close_input(&ctx->pFormatCtx);
|
||||
|
||||
if (ctx->file >= 0)
|
||||
pFS_Close(ctx->file);
|
||||
|
||||
free(ctx);
|
||||
}
|
||||
|
||||
static void *AVDec_Create(char *medianame)
|
||||
{
|
||||
struct decctx *ctx;
|
||||
|
||||
unsigned int i;
|
||||
AVCodec *pCodec;
|
||||
qboolean useioctx = false;
|
||||
|
||||
/*only respond to av: media prefixes*/
|
||||
if (!strncmp(medianame, "av:", 3))
|
||||
{
|
||||
medianame = medianame + 3;
|
||||
useioctx = true;
|
||||
}
|
||||
else if (!strncmp(medianame, "avs:", 4))
|
||||
{
|
||||
medianame = medianame + 4;
|
||||
//let avformat do its own avio context stuff
|
||||
}
|
||||
else
|
||||
return NULL;
|
||||
|
||||
ctx = malloc(sizeof(*ctx));
|
||||
memset(ctx, 0, sizeof(*ctx));
|
||||
|
||||
//so we always decode the first frame instantly.
|
||||
|
||||
ctx->starttime = pSys_Milliseconds();
|
||||
|
||||
ctx->file = -1;
|
||||
if (useioctx)
|
||||
{
|
||||
// Create internal Buffer for FFmpeg:
|
||||
const int iBufSize = 32 * 1024;
|
||||
char *pBuffer = malloc(iBufSize);
|
||||
AVIOContext *ioctx;
|
||||
|
||||
ctx->filelen = pFS_Open(medianame, &ctx->file, 1);
|
||||
if (ctx->filelen < 0)
|
||||
{
|
||||
Con_Printf("Unable to open %s\n", medianame);
|
||||
free(ctx);
|
||||
free(pBuffer);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
ioctx = avio_alloc_context(pBuffer, iBufSize, 0, ctx, AVIO_Read, 0, AVIO_Seek);
|
||||
ctx->pFormatCtx = avformat_alloc_context();
|
||||
|
||||
ctx->pFormatCtx->pb = ioctx;
|
||||
}
|
||||
|
||||
// Open video file
|
||||
if(avformat_open_input(&ctx->pFormatCtx, medianame, NULL, NULL)==0)
|
||||
{
|
||||
// Retrieve stream information
|
||||
if(avformat_find_stream_info(ctx->pFormatCtx, NULL)>=0)
|
||||
{
|
||||
ctx->audioStream=-1;
|
||||
for(i=0; i<ctx->pFormatCtx->nb_streams; i++)
|
||||
if(ctx->pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_AUDIO)
|
||||
{
|
||||
ctx->audioStream=i;
|
||||
break;
|
||||
}
|
||||
if(ctx->audioStream!=-1)
|
||||
{
|
||||
ctx->pACodecCtx=ctx->pFormatCtx->streams[ctx->audioStream]->codec;
|
||||
pCodec=avcodec_find_decoder(ctx->pACodecCtx->codec_id);
|
||||
|
||||
ctx->pAFrame=avcodec_alloc_frame();
|
||||
if(pCodec!=NULL && ctx->pAFrame && avcodec_open2(ctx->pACodecCtx, pCodec, NULL) >= 0)
|
||||
{
|
||||
|
||||
}
|
||||
else
|
||||
ctx->audioStream = -1;
|
||||
}
|
||||
|
||||
ctx->videoStream=-1;
|
||||
for(i=0; i<ctx->pFormatCtx->nb_streams; i++)
|
||||
if(ctx->pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO)
|
||||
{
|
||||
ctx->videoStream=i;
|
||||
break;
|
||||
}
|
||||
if(ctx->videoStream!=-1)
|
||||
{
|
||||
// Get a pointer to the codec context for the video stream
|
||||
ctx->pVCodecCtx=ctx->pFormatCtx->streams[ctx->videoStream]->codec;
|
||||
ctx->num = ctx->pFormatCtx->streams[ctx->videoStream]->time_base.num;
|
||||
ctx->denum = ctx->pFormatCtx->streams[ctx->videoStream]->time_base.den;
|
||||
|
||||
// Find the decoder for the video stream
|
||||
pCodec=avcodec_find_decoder(ctx->pVCodecCtx->codec_id);
|
||||
|
||||
// Open codec
|
||||
if(pCodec!=NULL && avcodec_open2(ctx->pVCodecCtx, pCodec, NULL) >= 0)
|
||||
{
|
||||
// Allocate video frame
|
||||
ctx->pVFrame=avcodec_alloc_frame();
|
||||
if(ctx->pVFrame!=NULL)
|
||||
{
|
||||
if (AVDec_SetSize(ctx, ctx->pVCodecCtx->width, ctx->pVCodecCtx->height))
|
||||
{
|
||||
return ctx;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
AVDec_Destroy(ctx);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void *AVDec_DisplayFrame(void *vctx, qboolean nosound, uploadfmt_t *fmt, int *width, int *height)
|
||||
{
|
||||
struct decctx *ctx = (struct decctx*)vctx;
|
||||
AVPacket packet;
|
||||
int frameFinished;
|
||||
qboolean repainted = false;
|
||||
int64_t curtime, lasttime;
|
||||
|
||||
curtime = ((pSys_Milliseconds() - ctx->starttime) * ctx->denum);
|
||||
curtime /= (ctx->num * 1000);
|
||||
|
||||
*fmt = TF_BGRA32;
|
||||
while (1)
|
||||
{
|
||||
lasttime = av_frame_get_best_effort_timestamp(ctx->pVFrame);
|
||||
|
||||
if (lasttime > curtime)
|
||||
break;
|
||||
|
||||
// We're ahead of the previous frame. try and read the next.
|
||||
if (av_read_frame(ctx->pFormatCtx, &packet) < 0)
|
||||
{
|
||||
*fmt = TF_INVALID;
|
||||
break;
|
||||
}
|
||||
|
||||
// Is this a packet from the video stream?
|
||||
if(packet.stream_index==ctx->videoStream)
|
||||
{
|
||||
// Decode video frame
|
||||
avcodec_decode_video2(ctx->pVCodecCtx, ctx->pVFrame, &frameFinished, &packet);
|
||||
|
||||
// Did we get a video frame?
|
||||
if(frameFinished)
|
||||
{
|
||||
ctx->pScaleCtx = sws_getCachedContext(ctx->pScaleCtx, ctx->pVCodecCtx->width, ctx->pVCodecCtx->height, ctx->pVCodecCtx->pix_fmt, ctx->width, ctx->height, AV_PIX_FMT_BGRA, SWS_POINT, 0, 0, 0);
|
||||
|
||||
// Convert the image from its native format to RGB
|
||||
sws_scale(ctx->pScaleCtx, (void*)ctx->pVFrame->data, ctx->pVFrame->linesize, 0, ctx->pVCodecCtx->height, ctx->pFrameRGB.data, ctx->pFrameRGB.linesize);
|
||||
|
||||
repainted = true;
|
||||
}
|
||||
}
|
||||
else if(packet.stream_index==ctx->audioStream && !nosound)
|
||||
{
|
||||
int okay;
|
||||
int len;
|
||||
void *odata = packet.data;
|
||||
while (packet.size > 0)
|
||||
{
|
||||
okay = false;
|
||||
len = avcodec_decode_audio4(ctx->pACodecCtx, ctx->pAFrame, &okay, &packet);
|
||||
if (len < 0)
|
||||
break;
|
||||
packet.size -= len;
|
||||
packet.data += len;
|
||||
if (okay)
|
||||
{
|
||||
int width = 2;
|
||||
unsigned int auddatasize = av_samples_get_buffer_size(NULL, ctx->pACodecCtx->channels, ctx->pAFrame->nb_samples, ctx->pACodecCtx->sample_fmt, 1);
|
||||
void *auddata = ctx->pAFrame->data[0];
|
||||
switch(ctx->pACodecCtx->sample_fmt)
|
||||
{
|
||||
default:
|
||||
auddatasize = 0;
|
||||
break;
|
||||
break;
|
||||
case AV_SAMPLE_FMT_U8:
|
||||
width = 1;
|
||||
break;
|
||||
|
@ -321,100 +333,100 @@ static void *AVDec_DisplayFrame(void *vctx, qboolean nosound, enum uploadfmt_e *
|
|||
auddatasize/=2;
|
||||
width = 2;
|
||||
}
|
||||
break;
|
||||
}
|
||||
S_RawAudio(-1, auddata, ctx->pACodecCtx->sample_rate, auddatasize/(ctx->pACodecCtx->channels*width), ctx->pACodecCtx->channels, width);
|
||||
}
|
||||
}
|
||||
packet.data = odata;
|
||||
}
|
||||
|
||||
// Free the packet that was allocated by av_read_frame
|
||||
av_free_packet(&packet);
|
||||
}
|
||||
|
||||
*width = ctx->width;
|
||||
*height = ctx->height;
|
||||
if (!repainted)
|
||||
return NULL;
|
||||
return ctx->pFrameRGB.data[0];
|
||||
}
|
||||
static void AVDec_GetSize (void *vctx, int *width, int *height)
|
||||
{
|
||||
struct decctx *ctx = (struct decctx*)vctx;
|
||||
*width = ctx->width;
|
||||
*height = ctx->height;
|
||||
}
|
||||
|
||||
static void AVDec_CursorMove (void *vctx, float posx, float posy)
|
||||
{
|
||||
//its a video, dumbass
|
||||
}
|
||||
static void AVDec_Key (void *vctx, int code, int unicode, int isup)
|
||||
{
|
||||
//its a video, dumbass
|
||||
}
|
||||
static void AVDec_ChangeStream(void *vctx, char *newstream)
|
||||
{
|
||||
}
|
||||
static void AVDec_Rewind(void *vctx)
|
||||
{
|
||||
struct decctx *ctx = (struct decctx*)vctx;
|
||||
if (ctx->videoStream >= 0)
|
||||
av_seek_frame(ctx->pFormatCtx, ctx->videoStream, 0, AVSEEK_FLAG_BACKWARD);
|
||||
if (ctx->audioStream >= 0)
|
||||
av_seek_frame(ctx->pFormatCtx, ctx->audioStream, 0, AVSEEK_FLAG_BACKWARD);
|
||||
|
||||
ctx->starttime = timeGetTime();
|
||||
}
|
||||
|
||||
/*
|
||||
//avcodec has no way to shut down properly.
|
||||
static qintptr_t AVDec_Shutdown(qintptr_t *args)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
*/
|
||||
|
||||
static media_decoder_funcs_t decoderfuncs =
|
||||
{
|
||||
AVDec_Create,
|
||||
AVDec_DisplayFrame,
|
||||
NULL,//doneframe
|
||||
AVDec_Destroy,
|
||||
AVDec_Rewind,
|
||||
|
||||
NULL,//AVDec_CursorMove,
|
||||
NULL,//AVDec_Key,
|
||||
NULL,//AVDec_SetSize,
|
||||
AVDec_GetSize,
|
||||
NULL,//AVDec_ChangeStream
|
||||
};
|
||||
|
||||
static qboolean AVDec_Init(void)
|
||||
{
|
||||
if (!Plug_ExportNative("Media_VideoDecoder", &decoderfuncs))
|
||||
{
|
||||
Con_Printf("avplug: Engine doesn't support media decoder plugins\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
CHECKBUILTIN(S_RawAudio);
|
||||
CHECKBUILTIN(FS_Seek);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
//get the encoder/decoders to register themselves with the engine, then make sure avformat/avcodec have registered all they have to give.
|
||||
qboolean AVEnc_Init(void);
|
||||
qintptr_t Plug_Init(qintptr_t *args)
|
||||
{
|
||||
qboolean okay = false;
|
||||
|
||||
okay |= AVDec_Init();
|
||||
okay |= AVEnc_Init();
|
||||
if (okay)
|
||||
av_register_all();
|
||||
return okay;
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
pS_RawAudio(-1, auddata, ctx->pACodecCtx->sample_rate, auddatasize/(ctx->pACodecCtx->channels*width), ctx->pACodecCtx->channels, width);
|
||||
}
|
||||
}
|
||||
packet.data = odata;
|
||||
}
|
||||
|
||||
// Free the packet that was allocated by av_read_frame
|
||||
av_free_packet(&packet);
|
||||
}
|
||||
|
||||
*width = ctx->width;
|
||||
*height = ctx->height;
|
||||
if (!repainted)
|
||||
return NULL;
|
||||
return ctx->pFrameRGB.data[0];
|
||||
}
|
||||
static void AVDec_GetSize (void *vctx, int *width, int *height)
|
||||
{
|
||||
struct decctx *ctx = (struct decctx*)vctx;
|
||||
*width = ctx->width;
|
||||
*height = ctx->height;
|
||||
}
|
||||
|
||||
static void AVDec_CursorMove (void *vctx, float posx, float posy)
|
||||
{
|
||||
//its a video, dumbass
|
||||
}
|
||||
static void AVDec_Key (void *vctx, int code, int unicode, int isup)
|
||||
{
|
||||
//its a video, dumbass
|
||||
}
|
||||
static void AVDec_ChangeStream(void *vctx, char *newstream)
|
||||
{
|
||||
}
|
||||
static void AVDec_Rewind(void *vctx)
|
||||
{
|
||||
struct decctx *ctx = (struct decctx*)vctx;
|
||||
if (ctx->videoStream >= 0)
|
||||
av_seek_frame(ctx->pFormatCtx, ctx->videoStream, 0, AVSEEK_FLAG_BACKWARD);
|
||||
if (ctx->audioStream >= 0)
|
||||
av_seek_frame(ctx->pFormatCtx, ctx->audioStream, 0, AVSEEK_FLAG_BACKWARD);
|
||||
|
||||
ctx->starttime = pSys_Milliseconds();
|
||||
}
|
||||
|
||||
/*
|
||||
//avcodec has no way to shut down properly.
|
||||
static qintptr_t AVDec_Shutdown(qintptr_t *args)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
*/
|
||||
|
||||
static media_decoder_funcs_t decoderfuncs =
|
||||
{
|
||||
AVDec_Create,
|
||||
AVDec_DisplayFrame,
|
||||
NULL,//doneframe
|
||||
AVDec_Destroy,
|
||||
AVDec_Rewind,
|
||||
|
||||
NULL,//AVDec_CursorMove,
|
||||
NULL,//AVDec_Key,
|
||||
NULL,//AVDec_SetSize,
|
||||
AVDec_GetSize,
|
||||
NULL,//AVDec_ChangeStream
|
||||
};
|
||||
|
||||
static qboolean AVDec_Init(void)
|
||||
{
|
||||
if (!pPlug_ExportNative("Media_VideoDecoder", &decoderfuncs))
|
||||
{
|
||||
Con_Printf("avplug: Engine doesn't support media decoder plugins\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
CHECKBUILTIN(S_RawAudio);
|
||||
CHECKBUILTIN(FS_Seek);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
//get the encoder/decoders to register themselves with the engine, then make sure avformat/avcodec have registered all they have to give.
|
||||
qboolean AVEnc_Init(void);
|
||||
qintptr_t Plug_Init(qintptr_t *args)
|
||||
{
|
||||
qboolean okay = false;
|
||||
|
||||
okay |= AVDec_Init();
|
||||
okay |= AVEnc_Init();
|
||||
if (okay)
|
||||
av_register_all();
|
||||
return okay;
|
||||
}
|
||||
|
||||
|
|
|
@ -1,10 +1,11 @@
|
|||
#include "../plugin.h"
|
||||
#include "../plugin.h"
|
||||
#include "../engine.h"
|
||||
|
||||
#include "avformat.h"
|
||||
#include "avio.h"
|
||||
#include "avcodec.h"
|
||||
#include "swscale.h"
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libavformat/avio.h>
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libswscale/swscale.h>
|
||||
//#include <libavutil/channel_layout.h>
|
||||
|
||||
/*
|
||||
Most of the logic in here came from here:
|
||||
|
@ -18,8 +19,8 @@ struct encctx
|
|||
|
||||
AVStream *video_st;
|
||||
struct SwsContext *scale_ctx;
|
||||
AVFrame *picture;
|
||||
uint8_t *video_outbuf;
|
||||
AVFrame *picture;
|
||||
uint8_t *video_outbuf;
|
||||
int video_outbuf_size;
|
||||
|
||||
AVStream *audio_st;
|
||||
|
@ -28,113 +29,113 @@ struct encctx
|
|||
|
||||
static void AVEnc_End (void *ctx);
|
||||
|
||||
static AVFrame *alloc_frame(enum PixelFormat pix_fmt, int width, int height)
|
||||
{
|
||||
AVFrame *picture;
|
||||
uint8_t *picture_buf;
|
||||
int size;
|
||||
|
||||
picture = avcodec_alloc_frame();
|
||||
if(!picture)
|
||||
return NULL;
|
||||
size = avpicture_get_size(pix_fmt, width, height);
|
||||
picture_buf = (uint8_t*)(av_malloc(size));
|
||||
if (!picture_buf)
|
||||
{
|
||||
av_free(picture);
|
||||
return NULL;
|
||||
}
|
||||
avpicture_fill((AVPicture *) picture, picture_buf, pix_fmt, width, height);
|
||||
picture->width = width;
|
||||
picture->height = height;
|
||||
return picture;
|
||||
static AVFrame *alloc_frame(enum PixelFormat pix_fmt, int width, int height)
|
||||
{
|
||||
AVFrame *picture;
|
||||
uint8_t *picture_buf;
|
||||
int size;
|
||||
|
||||
picture = avcodec_alloc_frame();
|
||||
if(!picture)
|
||||
return NULL;
|
||||
size = avpicture_get_size(pix_fmt, width, height);
|
||||
picture_buf = (uint8_t*)(av_malloc(size));
|
||||
if (!picture_buf)
|
||||
{
|
||||
av_free(picture);
|
||||
return NULL;
|
||||
}
|
||||
avpicture_fill((AVPicture *) picture, picture_buf, pix_fmt, width, height);
|
||||
picture->width = width;
|
||||
picture->height = height;
|
||||
return picture;
|
||||
}
|
||||
AVStream *add_video_stream(struct encctx *ctx, AVCodec *codec, int fps, int width, int height)
|
||||
{
|
||||
AVCodecContext *c;
|
||||
AVStream *st;
|
||||
char prof[128];
|
||||
int bitrate = (int)Cvar_GetFloat("avplug_videobitrate");
|
||||
int forcewidth = (int)Cvar_GetFloat("avplug_videoforcewidth");
|
||||
int forceheight = (int)Cvar_GetFloat("avplug_videoforceheight");
|
||||
|
||||
st = avformat_new_stream(ctx->fc, codec);
|
||||
if (!st)
|
||||
{
|
||||
fprintf(stderr, "Could not alloc stream\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
c = st->codec;
|
||||
c->codec_id = codec->id;
|
||||
c->codec_type = codec->type;
|
||||
|
||||
/* put sample parameters */
|
||||
c->bit_rate = bitrate;
|
||||
/* resolution must be a multiple of two */
|
||||
c->width = forcewidth?forcewidth:width;
|
||||
c->height = forceheight?forceheight:height;
|
||||
/* frames per second */
|
||||
c->time_base.num = 1;
|
||||
c->time_base.den = fps;
|
||||
c->gop_size = 12; /* emit one intra frame every twelve frames at most */
|
||||
c->pix_fmt = PIX_FMT_YUV420P;
|
||||
if (c->codec_id == CODEC_ID_MPEG2VIDEO)
|
||||
{
|
||||
/* just for testing, we also add B frames */
|
||||
c->max_b_frames = 2;
|
||||
}
|
||||
if (c->codec_id == CODEC_ID_MPEG1VIDEO)
|
||||
{
|
||||
/* needed to avoid using macroblocks in which some coeffs overflow
|
||||
this doesnt happen with normal video, it just happens here as the
|
||||
motion of the chroma plane doesnt match the luma plane */
|
||||
// c->mb_decision=2;
|
||||
}
|
||||
// some formats want stream headers to be seperate
|
||||
if (ctx->fc->oformat->flags & AVFMT_GLOBALHEADER)
|
||||
c->flags |= CODEC_FLAG_GLOBAL_HEADER;
|
||||
|
||||
*prof = 0;
|
||||
Cvar_GetString("avplug_format", prof, sizeof(prof));
|
||||
// av_opt_set(c->priv_data, "profile", prof, AV_OPT_SEARCH_CHILDREN);
|
||||
|
||||
return st;
|
||||
AVStream *add_video_stream(struct encctx *ctx, AVCodec *codec, int fps, int width, int height)
|
||||
{
|
||||
AVCodecContext *c;
|
||||
AVStream *st;
|
||||
char prof[128];
|
||||
int bitrate = (int)pCvar_GetFloat("avplug_videobitrate");
|
||||
int forcewidth = (int)pCvar_GetFloat("avplug_videoforcewidth");
|
||||
int forceheight = (int)pCvar_GetFloat("avplug_videoforceheight");
|
||||
|
||||
st = avformat_new_stream(ctx->fc, codec);
|
||||
if (!st)
|
||||
{
|
||||
fprintf(stderr, "Could not alloc stream\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
c = st->codec;
|
||||
c->codec_id = codec->id;
|
||||
c->codec_type = codec->type;
|
||||
|
||||
/* put sample parameters */
|
||||
c->bit_rate = bitrate;
|
||||
/* resolution must be a multiple of two */
|
||||
c->width = forcewidth?forcewidth:width;
|
||||
c->height = forceheight?forceheight:height;
|
||||
/* frames per second */
|
||||
c->time_base.num = 1;
|
||||
c->time_base.den = fps;
|
||||
c->gop_size = 12; /* emit one intra frame every twelve frames at most */
|
||||
c->pix_fmt = PIX_FMT_YUV420P;
|
||||
if (c->codec_id == CODEC_ID_MPEG2VIDEO)
|
||||
{
|
||||
/* just for testing, we also add B frames */
|
||||
c->max_b_frames = 2;
|
||||
}
|
||||
if (c->codec_id == CODEC_ID_MPEG1VIDEO)
|
||||
{
|
||||
/* needed to avoid using macroblocks in which some coeffs overflow
|
||||
this doesnt happen with normal video, it just happens here as the
|
||||
motion of the chroma plane doesnt match the luma plane */
|
||||
// c->mb_decision=2;
|
||||
}
|
||||
// some formats want stream headers to be seperate
|
||||
if (ctx->fc->oformat->flags & AVFMT_GLOBALHEADER)
|
||||
c->flags |= CODEC_FLAG_GLOBAL_HEADER;
|
||||
|
||||
*prof = 0;
|
||||
pCvar_GetString("avplug_format", prof, sizeof(prof));
|
||||
// av_opt_set(c->priv_data, "profile", prof, AV_OPT_SEARCH_CHILDREN);
|
||||
|
||||
return st;
|
||||
}
|
||||
void close_video(struct encctx *ctx)
|
||||
{
|
||||
if (!ctx->video_st)
|
||||
return;
|
||||
|
||||
avcodec_close(ctx->video_st->codec);
|
||||
if (ctx->picture)
|
||||
{
|
||||
av_free(ctx->picture->data[0]);
|
||||
av_free(ctx->picture);
|
||||
}
|
||||
av_free(ctx->video_outbuf);
|
||||
void close_video(struct encctx *ctx)
|
||||
{
|
||||
if (!ctx->video_st)
|
||||
return;
|
||||
|
||||
avcodec_close(ctx->video_st->codec);
|
||||
if (ctx->picture)
|
||||
{
|
||||
av_free(ctx->picture->data[0]);
|
||||
av_free(ctx->picture);
|
||||
}
|
||||
av_free(ctx->video_outbuf);
|
||||
}
|
||||
static void AVEnc_Video (void *vctx, void *data, int frame, int width, int height)
|
||||
{
|
||||
struct encctx *ctx = vctx;
|
||||
//weird maths to flip it.
|
||||
uint8_t *srcslices[2] = {(uint8_t*)data + (height-1)*width*3, NULL};
|
||||
int srcstride[2] = {-width*3, 0};
|
||||
int success;
|
||||
AVPacket pkt;
|
||||
|
||||
if (!ctx->video_st)
|
||||
return;
|
||||
|
||||
//convert RGB to whatever the codec needs (ie: yuv...).
|
||||
ctx->scale_ctx = sws_getCachedContext(ctx->scale_ctx, width, height, AV_PIX_FMT_RGB24, ctx->picture->width, ctx->picture->height, ctx->video_st->codec->pix_fmt, SWS_POINT, 0, 0, 0);
|
||||
sws_scale(ctx->scale_ctx, srcslices, srcstride, 0, height, ctx->picture->data, ctx->picture->linesize);
|
||||
|
||||
av_init_packet(&pkt);
|
||||
ctx->picture->pts = av_rescale_q(frame, ctx->video_st->codec->time_base, ctx->video_st->time_base);
|
||||
success = 0;
|
||||
static void AVEnc_Video (void *vctx, void *data, int frame, int width, int height)
|
||||
{
|
||||
struct encctx *ctx = vctx;
|
||||
//weird maths to flip it.
|
||||
const uint8_t *srcslices[2] = {(uint8_t*)data + (height-1)*width*3, NULL};
|
||||
int srcstride[2] = {-width*3, 0};
|
||||
int success;
|
||||
AVPacket pkt;
|
||||
|
||||
if (!ctx->video_st)
|
||||
return;
|
||||
|
||||
//convert RGB to whatever the codec needs (ie: yuv...).
|
||||
ctx->scale_ctx = sws_getCachedContext(ctx->scale_ctx, width, height, AV_PIX_FMT_RGB24, ctx->picture->width, ctx->picture->height, ctx->video_st->codec->pix_fmt, SWS_POINT, 0, 0, 0);
|
||||
sws_scale(ctx->scale_ctx, srcslices, srcstride, 0, height, ctx->picture->data, ctx->picture->linesize);
|
||||
|
||||
av_init_packet(&pkt);
|
||||
ctx->picture->pts = av_rescale_q(frame, ctx->video_st->codec->time_base, ctx->video_st->time_base);
|
||||
success = 0;
|
||||
pkt.data = ctx->video_outbuf;
|
||||
pkt.size = ctx->video_outbuf_size;
|
||||
pkt.size = ctx->video_outbuf_size;
|
||||
if (avcodec_encode_video2(ctx->video_st->codec, &pkt, ctx->picture, &success) == 0 && success)
|
||||
{
|
||||
pkt.pts = ctx->video_st->codec->coded_frame->pts;
|
||||
|
@ -144,72 +145,72 @@ static void AVEnc_Video (void *vctx, void *data, int frame, int width, int heigh
|
|||
pkt.data = ctx->video_outbuf;
|
||||
// pkt.size = psize;
|
||||
|
||||
av_write_frame(ctx->fc, &pkt);
|
||||
}
|
||||
av_write_frame(ctx->fc, &pkt);
|
||||
}
|
||||
}
|
||||
|
||||
AVStream *add_audio_stream(struct encctx *ctx, AVCodec *codec, int samplerate, int bits, int channels)
|
||||
{
|
||||
AVCodecContext *c;
|
||||
AVStream *st;
|
||||
int bitrate = (int)Cvar_GetFloat("avplug_audiobitrate");
|
||||
|
||||
st = avformat_new_stream(ctx->fc, codec);
|
||||
if (!st)
|
||||
{
|
||||
fprintf(stderr, "Could not alloc stream\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
c = st->codec;
|
||||
c->codec_id = codec->id;
|
||||
c->codec_type = codec->type;
|
||||
|
||||
/* put sample parameters */
|
||||
c->bit_rate = bitrate;
|
||||
/* frames per second */
|
||||
c->sample_fmt = ((bits==16)?AV_SAMPLE_FMT_S16:AV_SAMPLE_FMT_U8);
|
||||
c->sample_rate = samplerate;
|
||||
c->channels = channels;
|
||||
switch(channels)
|
||||
{
|
||||
case 1:
|
||||
c->channel_layout = AV_CH_FRONT_CENTER;
|
||||
break;
|
||||
case 2:
|
||||
c->channel_layout = AV_CH_FRONT_LEFT | AV_CH_FRONT_RIGHT;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
// some formats want stream headers to be seperate
|
||||
if (ctx->fc->oformat->flags & AVFMT_GLOBALHEADER)
|
||||
c->flags |= CODEC_FLAG_GLOBAL_HEADER;
|
||||
|
||||
return st;
|
||||
AVStream *add_audio_stream(struct encctx *ctx, AVCodec *codec, int samplerate, int bits, int channels)
|
||||
{
|
||||
AVCodecContext *c;
|
||||
AVStream *st;
|
||||
int bitrate = (int)pCvar_GetFloat("avplug_audiobitrate");
|
||||
|
||||
st = avformat_new_stream(ctx->fc, codec);
|
||||
if (!st)
|
||||
{
|
||||
fprintf(stderr, "Could not alloc stream\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
c = st->codec;
|
||||
c->codec_id = codec->id;
|
||||
c->codec_type = codec->type;
|
||||
|
||||
/* put sample parameters */
|
||||
c->bit_rate = bitrate;
|
||||
/* frames per second */
|
||||
c->sample_fmt = ((bits==16)?AV_SAMPLE_FMT_S16:AV_SAMPLE_FMT_U8);
|
||||
c->sample_rate = samplerate;
|
||||
c->channels = channels;
|
||||
switch(channels)
|
||||
{
|
||||
case 1:
|
||||
c->channel_layout = AV_CH_FRONT_CENTER;
|
||||
break;
|
||||
case 2:
|
||||
c->channel_layout = AV_CH_FRONT_LEFT | AV_CH_FRONT_RIGHT;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
// some formats want stream headers to be seperate
|
||||
if (ctx->fc->oformat->flags & AVFMT_GLOBALHEADER)
|
||||
c->flags |= CODEC_FLAG_GLOBAL_HEADER;
|
||||
|
||||
return st;
|
||||
}
|
||||
void close_audio(struct encctx *ctx)
|
||||
{
|
||||
if (!ctx->audio_st)
|
||||
return;
|
||||
|
||||
if (!ctx->audio_st)
|
||||
return;
|
||||
|
||||
avcodec_close(ctx->audio_st->codec);
|
||||
}
|
||||
static void AVEnc_Audio (void *vctx, void *data, int bytes)
|
||||
{
|
||||
struct encctx *ctx = vctx;
|
||||
int success;
|
||||
AVPacket pkt;
|
||||
|
||||
ctx->audio->nb_samples = ctx->audio_st->codec->frame_size;
|
||||
if (avcodec_fill_audio_frame(ctx->audio, ctx->audio_st->codec->channels, ctx->audio_st->codec->sample_fmt, data, bytes, 0) < 0)
|
||||
return;
|
||||
|
||||
av_init_packet(&pkt);
|
||||
pkt.data = NULL;
|
||||
pkt.size = 0;
|
||||
success = 0;
|
||||
static void AVEnc_Audio (void *vctx, void *data, int bytes)
|
||||
{
|
||||
struct encctx *ctx = vctx;
|
||||
int success;
|
||||
AVPacket pkt;
|
||||
|
||||
ctx->audio->nb_samples = ctx->audio_st->codec->frame_size;
|
||||
if (avcodec_fill_audio_frame(ctx->audio, ctx->audio_st->codec->channels, ctx->audio_st->codec->sample_fmt, data, bytes, 0) < 0)
|
||||
return;
|
||||
|
||||
av_init_packet(&pkt);
|
||||
pkt.data = NULL;
|
||||
pkt.size = 0;
|
||||
success = 0;
|
||||
if (avcodec_encode_audio2(ctx->audio_st->codec, &pkt, ctx->audio, &success) == 0 && success)
|
||||
{
|
||||
pkt.pts = ctx->audio_st->codec->coded_frame->pts;
|
||||
|
@ -219,172 +220,172 @@ static void AVEnc_Audio (void *vctx, void *data, int bytes)
|
|||
// pkt.data = ctx->video_outbuf;
|
||||
// pkt.size = psize;
|
||||
|
||||
av_write_frame(ctx->fc, &pkt);
|
||||
}
|
||||
av_write_frame(ctx->fc, &pkt);
|
||||
}
|
||||
}
|
||||
|
||||
static void *AVEnc_Begin (char *streamname, int videorate, int width, int height, int *sndkhz, int *sndchannels, int *sndbits)
|
||||
{
|
||||
struct encctx *ctx;
|
||||
AVOutputFormat *fmt = NULL;
|
||||
AVCodec *videocodec = NULL;
|
||||
AVCodec *audiocodec = NULL;
|
||||
char formatname[64];
|
||||
formatname[0] = 0;
|
||||
Cvar_GetString("avplug_format", formatname, sizeof(formatname));
|
||||
|
||||
if (*formatname)
|
||||
{
|
||||
fmt = av_guess_format(formatname, NULL, NULL);
|
||||
if (!fmt)
|
||||
{
|
||||
Con_Printf("Unknown format specified.\n");
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
if (!fmt)
|
||||
fmt = av_guess_format(NULL, streamname, NULL);
|
||||
if (!fmt)
|
||||
{
|
||||
Con_DPrintf("Could not deduce output format from file extension: using MPEG.\n");
|
||||
fmt = av_guess_format("mpeg", NULL, NULL);
|
||||
}
|
||||
if (!fmt)
|
||||
{
|
||||
Con_Printf("Format not known\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (videorate)
|
||||
{
|
||||
char codecname[64];
|
||||
codecname[0] = 0;
|
||||
Cvar_GetString("avplug_videocodec", codecname, sizeof(codecname));
|
||||
|
||||
if (strcmp(codecname, "none"))
|
||||
{
|
||||
if (codecname[0])
|
||||
{
|
||||
videocodec = avcodec_find_encoder_by_name(codecname);
|
||||
if (!videocodec)
|
||||
{
|
||||
Con_Printf("Unsupported avplug_codec \"%s\"\n", codecname);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
if (!videocodec && fmt->video_codec != AV_CODEC_ID_NONE)
|
||||
videocodec = avcodec_find_encoder(fmt->video_codec);
|
||||
}
|
||||
}
|
||||
if (*sndkhz)
|
||||
{
|
||||
char codecname[64];
|
||||
codecname[0] = 0;
|
||||
Cvar_GetString("avplug_audiocodec", codecname, sizeof(codecname));
|
||||
|
||||
if (strcmp(codecname, "none"))
|
||||
{
|
||||
if (codecname[0])
|
||||
{
|
||||
audiocodec = avcodec_find_encoder_by_name(codecname);
|
||||
if (!audiocodec)
|
||||
{
|
||||
Con_Printf("Unsupported avplug_codec \"%s\"\n", codecname);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
if (!audiocodec && fmt->audio_codec != AV_CODEC_ID_NONE)
|
||||
audiocodec = avcodec_find_encoder(fmt->audio_codec);
|
||||
}
|
||||
}
|
||||
|
||||
Con_DPrintf("Using format \"%s\"\n", fmt->name);
|
||||
if (videocodec)
|
||||
Con_DPrintf("Using Video Codec \"%s\"\n", videocodec->name);
|
||||
else
|
||||
Con_DPrintf("Not encoding video\n");
|
||||
if (audiocodec)
|
||||
Con_DPrintf("Using Audio Codec \"%s\"\n", audiocodec->name);
|
||||
else
|
||||
Con_DPrintf("Not encoding audio\n");
|
||||
|
||||
if (!videocodec && !audiocodec)
|
||||
{
|
||||
Con_DPrintf("Nothing to encode!\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (!audiocodec)
|
||||
*sndkhz = 0;
|
||||
|
||||
ctx = malloc(sizeof(*ctx));
|
||||
if (!ctx)
|
||||
return NULL;
|
||||
memset(ctx, 0, sizeof(*ctx));
|
||||
|
||||
ctx->fc = avformat_alloc_context();
|
||||
ctx->fc->oformat = fmt;
|
||||
snprintf(ctx->fc->filename, sizeof(ctx->fc->filename), "%s", streamname);
|
||||
|
||||
|
||||
//pick default codecs
|
||||
ctx->video_st = NULL;
|
||||
if (videocodec)
|
||||
{
|
||||
ctx->video_st = add_video_stream(ctx, videocodec, videorate, width, height);
|
||||
|
||||
if (ctx->video_st)
|
||||
{
|
||||
AVCodecContext *c = ctx->video_st->codec;
|
||||
if (avcodec_open2(c, videocodec, NULL) < 0)
|
||||
{
|
||||
Con_Printf("Could not init codec instance \"%s\". Maybe try a different framerate/resolution/bitrate\n", videocodec->name);
|
||||
AVEnc_End(ctx);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
ctx->picture = alloc_frame(c->pix_fmt, c->width, c->height);
|
||||
|
||||
ctx->video_outbuf_size = 200000;
|
||||
ctx->video_outbuf = av_malloc(ctx->video_outbuf_size);
|
||||
if (!ctx->video_outbuf)
|
||||
ctx->video_outbuf_size = 0;
|
||||
}
|
||||
}
|
||||
if (audiocodec)
|
||||
{
|
||||
ctx->audio_st = add_audio_stream(ctx, audiocodec, *sndkhz, *sndbits, *sndchannels);
|
||||
if (ctx->audio_st)
|
||||
{
|
||||
AVCodecContext *c = ctx->audio_st->codec;
|
||||
if (avcodec_open2(c, audiocodec, NULL) < 0)
|
||||
{
|
||||
Con_Printf("Could not init codec instance \"%s\".\n", audiocodec->name);
|
||||
AVEnc_End(ctx);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
ctx->audio = avcodec_alloc_frame();
|
||||
}
|
||||
}
|
||||
|
||||
av_dump_format(ctx->fc, 0, streamname, 1);
|
||||
|
||||
if (!(fmt->flags & AVFMT_NOFILE))
|
||||
{
|
||||
if (avio_open(&ctx->fc->pb, streamname, AVIO_FLAG_WRITE) < 0)
|
||||
{
|
||||
Con_Printf("Could not open '%s'\n", streamname);
|
||||
AVEnc_End(ctx);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
//nearly complete, can make the file dirty now.
|
||||
avformat_write_header(ctx->fc, NULL);
|
||||
ctx->doneheaders = true;
|
||||
return ctx;
|
||||
}
|
||||
static void *AVEnc_Begin (char *streamname, int videorate, int width, int height, int *sndkhz, int *sndchannels, int *sndbits)
|
||||
{
|
||||
struct encctx *ctx;
|
||||
AVOutputFormat *fmt = NULL;
|
||||
AVCodec *videocodec = NULL;
|
||||
AVCodec *audiocodec = NULL;
|
||||
char formatname[64];
|
||||
formatname[0] = 0;
|
||||
pCvar_GetString("avplug_format", formatname, sizeof(formatname));
|
||||
|
||||
if (*formatname)
|
||||
{
|
||||
fmt = av_guess_format(formatname, NULL, NULL);
|
||||
if (!fmt)
|
||||
{
|
||||
Con_Printf("Unknown format specified.\n");
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
if (!fmt)
|
||||
fmt = av_guess_format(NULL, streamname, NULL);
|
||||
if (!fmt)
|
||||
{
|
||||
Con_DPrintf("Could not deduce output format from file extension: using MPEG.\n");
|
||||
fmt = av_guess_format("mpeg", NULL, NULL);
|
||||
}
|
||||
if (!fmt)
|
||||
{
|
||||
Con_Printf("Format not known\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (videorate)
|
||||
{
|
||||
char codecname[64];
|
||||
codecname[0] = 0;
|
||||
pCvar_GetString("avplug_videocodec", codecname, sizeof(codecname));
|
||||
|
||||
if (strcmp(codecname, "none"))
|
||||
{
|
||||
if (codecname[0])
|
||||
{
|
||||
videocodec = avcodec_find_encoder_by_name(codecname);
|
||||
if (!videocodec)
|
||||
{
|
||||
Con_Printf("Unsupported avplug_codec \"%s\"\n", codecname);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
if (!videocodec && fmt->video_codec != AV_CODEC_ID_NONE)
|
||||
videocodec = avcodec_find_encoder(fmt->video_codec);
|
||||
}
|
||||
}
|
||||
if (*sndkhz)
|
||||
{
|
||||
char codecname[64];
|
||||
codecname[0] = 0;
|
||||
pCvar_GetString("avplug_audiocodec", codecname, sizeof(codecname));
|
||||
|
||||
if (strcmp(codecname, "none"))
|
||||
{
|
||||
if (codecname[0])
|
||||
{
|
||||
audiocodec = avcodec_find_encoder_by_name(codecname);
|
||||
if (!audiocodec)
|
||||
{
|
||||
Con_Printf("Unsupported avplug_codec \"%s\"\n", codecname);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
if (!audiocodec && fmt->audio_codec != AV_CODEC_ID_NONE)
|
||||
audiocodec = avcodec_find_encoder(fmt->audio_codec);
|
||||
}
|
||||
}
|
||||
|
||||
Con_DPrintf("Using format \"%s\"\n", fmt->name);
|
||||
if (videocodec)
|
||||
Con_DPrintf("Using Video Codec \"%s\"\n", videocodec->name);
|
||||
else
|
||||
Con_DPrintf("Not encoding video\n");
|
||||
if (audiocodec)
|
||||
Con_DPrintf("Using Audio Codec \"%s\"\n", audiocodec->name);
|
||||
else
|
||||
Con_DPrintf("Not encoding audio\n");
|
||||
|
||||
if (!videocodec && !audiocodec)
|
||||
{
|
||||
Con_DPrintf("Nothing to encode!\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (!audiocodec)
|
||||
*sndkhz = 0;
|
||||
|
||||
ctx = malloc(sizeof(*ctx));
|
||||
if (!ctx)
|
||||
return NULL;
|
||||
memset(ctx, 0, sizeof(*ctx));
|
||||
|
||||
ctx->fc = avformat_alloc_context();
|
||||
ctx->fc->oformat = fmt;
|
||||
snprintf(ctx->fc->filename, sizeof(ctx->fc->filename), "%s", streamname);
|
||||
|
||||
|
||||
//pick default codecs
|
||||
ctx->video_st = NULL;
|
||||
if (videocodec)
|
||||
{
|
||||
ctx->video_st = add_video_stream(ctx, videocodec, videorate, width, height);
|
||||
|
||||
if (ctx->video_st)
|
||||
{
|
||||
AVCodecContext *c = ctx->video_st->codec;
|
||||
if (avcodec_open2(c, videocodec, NULL) < 0)
|
||||
{
|
||||
Con_Printf("Could not init codec instance \"%s\". Maybe try a different framerate/resolution/bitrate\n", videocodec->name);
|
||||
AVEnc_End(ctx);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
ctx->picture = alloc_frame(c->pix_fmt, c->width, c->height);
|
||||
|
||||
ctx->video_outbuf_size = 200000;
|
||||
ctx->video_outbuf = av_malloc(ctx->video_outbuf_size);
|
||||
if (!ctx->video_outbuf)
|
||||
ctx->video_outbuf_size = 0;
|
||||
}
|
||||
}
|
||||
if (audiocodec)
|
||||
{
|
||||
ctx->audio_st = add_audio_stream(ctx, audiocodec, *sndkhz, *sndbits, *sndchannels);
|
||||
if (ctx->audio_st)
|
||||
{
|
||||
AVCodecContext *c = ctx->audio_st->codec;
|
||||
if (avcodec_open2(c, audiocodec, NULL) < 0)
|
||||
{
|
||||
Con_Printf("Could not init codec instance \"%s\".\n", audiocodec->name);
|
||||
AVEnc_End(ctx);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
ctx->audio = avcodec_alloc_frame();
|
||||
}
|
||||
}
|
||||
|
||||
av_dump_format(ctx->fc, 0, streamname, 1);
|
||||
|
||||
if (!(fmt->flags & AVFMT_NOFILE))
|
||||
{
|
||||
if (avio_open(&ctx->fc->pb, streamname, AVIO_FLAG_WRITE) < 0)
|
||||
{
|
||||
Con_Printf("Could not open '%s'\n", streamname);
|
||||
AVEnc_End(ctx);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
//nearly complete, can make the file dirty now.
|
||||
avformat_write_header(ctx->fc, NULL);
|
||||
ctx->doneheaders = true;
|
||||
return ctx;
|
||||
}
|
||||
static void AVEnc_End (void *vctx)
|
||||
{
|
||||
struct encctx *ctx = vctx;
|
||||
|
@ -396,40 +397,39 @@ static void AVEnc_End (void *vctx)
|
|||
if (ctx->doneheaders)
|
||||
av_write_trailer(ctx->fc);
|
||||
|
||||
for(i = 0; i < ctx->fc->nb_streams; i++)
|
||||
av_freep(&ctx->fc->streams[i]);
|
||||
// if (!(fmt->flags & AVFMT_NOFILE))
|
||||
avio_close(ctx->fc->pb);
|
||||
for(i = 0; i < ctx->fc->nb_streams; i++)
|
||||
av_freep(&ctx->fc->streams[i]);
|
||||
// if (!(fmt->flags & AVFMT_NOFILE))
|
||||
avio_close(ctx->fc->pb);
|
||||
av_free(ctx->fc);
|
||||
free(ctx);
|
||||
}
|
||||
static media_encoder_funcs_t encoderfuncs =
|
||||
{
|
||||
AVEnc_Begin,
|
||||
AVEnc_Video,
|
||||
AVEnc_Audio,
|
||||
AVEnc_End
|
||||
static media_encoder_funcs_t encoderfuncs =
|
||||
{
|
||||
AVEnc_Begin,
|
||||
AVEnc_Video,
|
||||
AVEnc_Audio,
|
||||
AVEnc_End
|
||||
};
|
||||
|
||||
qboolean AVEnc_Init(void)
|
||||
{
|
||||
pCvar_Register("avplug_format", "", 0, "avplug");
|
||||
|
||||
pCvar_Register("avplug_videocodec", "", 0, "avplug");
|
||||
pCvar_Register("avplug_videocodecprofile", "", 0, "avplug");
|
||||
pCvar_Register("avplug_videobitrate", "4000000", 0, "avplug");
|
||||
pCvar_Register("avplug_videoforcewidth", "", 0, "avplug");
|
||||
pCvar_Register("avplug_videoforceheight", "", 0, "avplug");
|
||||
pCvar_Register("avplug_audiocodec", "", 0, "avplug");
|
||||
pCvar_Register("avplug_audiobitrate", "64000", 0, "avplug");
|
||||
|
||||
if (!pPlug_ExportNative("Media_VideoEncoder", &encoderfuncs))
|
||||
{
|
||||
Con_Printf("avplug: Engine doesn't support media encoder plugins\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
qboolean AVEnc_Init(void)
|
||||
{
|
||||
Cvar_Register("avplug_format", "", 0, "avplug");
|
||||
|
||||
Cvar_Register("avplug_videocodec", "", 0, "avplug");
|
||||
Cvar_Register("avplug_videocodecprofile", "", 0, "avplug");
|
||||
Cvar_Register("avplug_videobitrate", "4000000", 0, "avplug");
|
||||
Cvar_Register("avplug_videoforcewidth", "", 0, "avplug");
|
||||
Cvar_Register("avplug_videoforceheight", "", 0, "avplug");
|
||||
Cvar_Register("avplug_audiocodec", "", 0, "avplug");
|
||||
Cvar_Register("avplug_audiobitrate", "64000", 0, "avplug");
|
||||
|
||||
if (!Plug_ExportNative("Media_VideoEncoder", &encoderfuncs))
|
||||
{
|
||||
Con_Printf("avplug: Engine doesn't support media encoder plugins\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
#ifndef FTEPLUGIN
|
||||
typedef enum uploadfmt_e
|
||||
{
|
||||
TF_INVALID,
|
||||
|
@ -10,7 +11,7 @@ typedef enum uploadfmt_e
|
|||
typedef struct
|
||||
{
|
||||
void *(QDECL *createdecoder)(char *name); //needed
|
||||
void *(QDECL *decodeframe)(void *ctx, qboolean nosound, enum uploadfmt_e *fmt, int *width, int *height); //needed
|
||||
void *(QDECL *decodeframe)(void *ctx, qboolean nosound, uploadfmt_t *fmt, int *width, int *height); //needed
|
||||
void (QDECL *doneframe)(void *ctx, void *img); //basically a free()
|
||||
void (QDECL *shutdown)(void *ctx); //probably needed...
|
||||
void (QDECL *rewind)(void *ctx);
|
||||
|
@ -30,3 +31,5 @@ typedef struct
|
|||
void (QDECL *capture_audio) (void *ctx, void *data, int bytes);
|
||||
void (QDECL *capture_end) (void *ctx);
|
||||
} media_encoder_funcs_t;
|
||||
#endif
|
||||
|
||||
|
|
|
@ -129,7 +129,10 @@ void strlcpy(char *d, const char *s, int n);
|
|||
#endif
|
||||
|
||||
|
||||
#ifndef FTEPLUGIN
|
||||
#ifdef FTEPLUGIN
|
||||
#define qfalse false
|
||||
#define qtrue true
|
||||
#else
|
||||
#ifdef __cplusplus
|
||||
typedef enum {qfalse, qtrue} qboolean;
|
||||
#else
|
||||
|
@ -140,7 +143,7 @@ typedef enum {qfalse, qtrue} qboolean;
|
|||
typedef float vec3_t[3];
|
||||
typedef unsigned char qbyte;
|
||||
#endif
|
||||
typedef void *qhandle_t;
|
||||
typedef int qhandle_t;
|
||||
typedef void* funcptr_t;
|
||||
|
||||
|
||||
|
|
Loading…
Reference in a new issue