You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
352 lines
16 KiB
352 lines
16 KiB
Description: Replace deprecated FFmpeg API
|
|
Author: Andreas Cadhalpun <Andreas.Cadhalpun@googlemail.com>
|
|
Last-Update: <2015-11-02>
|
|
|
|
--- libquicktime-1.2.4.orig/plugins/ffmpeg/audio.c
|
|
+++ libquicktime-1.2.4/plugins/ffmpeg/audio.c
|
|
@@ -1267,7 +1267,7 @@ static int lqt_ffmpeg_encode_audio(quick
|
|
pkt.data = codec->chunk_buffer;
|
|
pkt.size = codec->chunk_buffer_alloc;
|
|
|
|
- avcodec_get_frame_defaults(&f);
|
|
+ av_frame_unref(&f);
|
|
f.nb_samples = codec->avctx->frame_size;
|
|
|
|
avcodec_fill_audio_frame(&f, channels, codec->avctx->sample_fmt,
|
|
--- libquicktime-1.2.4.orig/plugins/ffmpeg/params.c
|
|
+++ libquicktime-1.2.4/plugins/ffmpeg/params.c
|
|
@@ -158,7 +158,6 @@ enum_t coder_type[] =
|
|
{ "Arithmetic", FF_CODER_TYPE_AC },
|
|
{ "Raw", FF_CODER_TYPE_RAW },
|
|
{ "RLE", FF_CODER_TYPE_RLE },
|
|
- { "Deflate", FF_CODER_TYPE_DEFLATE },
|
|
};
|
|
|
|
#define PARAM_ENUM(name, var, arr) \
|
|
@@ -253,15 +252,13 @@ void lqt_ffmpeg_set_parameter(AVCodecCon
|
|
PARAM_INT("ff_me_penalty_compensation",me_penalty_compensation);
|
|
PARAM_INT("ff_bidir_refine",bidir_refine);
|
|
PARAM_INT("ff_brd_scale",brd_scale);
|
|
- PARAM_INT("ff_scenechange_factor",scenechange_factor);
|
|
PARAM_FLAG("ff_flag_qscale",CODEC_FLAG_QSCALE);
|
|
PARAM_FLAG("ff_flag_4mv",CODEC_FLAG_4MV);
|
|
PARAM_FLAG("ff_flag_qpel",CODEC_FLAG_QPEL);
|
|
- PARAM_FLAG("ff_flag_gmc",CODEC_FLAG_GMC);
|
|
+ PARAM_DICT_FLAG("ff_flag_gmc", "gmc");
|
|
PARAM_FLAG("ff_flag_mv0",CODEC_FLAG_MV0);
|
|
// PARAM_FLAG("ff_flag_part",CODEC_FLAG_PART); // Unused
|
|
PARAM_FLAG("ff_flag_gray",CODEC_FLAG_GRAY);
|
|
- PARAM_FLAG("ff_flag_emu_edge",CODEC_FLAG_EMU_EDGE);
|
|
PARAM_FLAG("ff_flag_normalize_aqp",CODEC_FLAG_NORMALIZE_AQP);
|
|
// PARAM_FLAG("ff_flag_alt_scan",CODEC_FLAG_ALT_SCAN); // Unused
|
|
#if LIBAVCODEC_VERSION_INT < ((52<<16)+(0<<8)+0)
|
|
--- libquicktime-1.2.4.orig/plugins/ffmpeg/params.h
|
|
+++ libquicktime-1.2.4/plugins/ffmpeg/params.h
|
|
@@ -149,7 +149,7 @@ the reference. Unused for constant quant
|
|
.type = LQT_PARAMETER_INT, \
|
|
.val_default = { .val_int = 0 }, \
|
|
.val_min = { .val_int = 0 }, \
|
|
- .val_max = { .val_int = FF_MAX_B_FRAMES }, \
|
|
+ .val_max = { .val_int = INT_MAX }, \
|
|
.help_string = TRS("Maximum number of B-frames between non B-frames") \
|
|
}
|
|
|
|
--- libquicktime-1.2.4.orig/plugins/ffmpeg/video.c
|
|
+++ libquicktime-1.2.4/plugins/ffmpeg/video.c
|
|
@@ -37,10 +37,10 @@
|
|
#endif
|
|
|
|
|
|
-#ifdef PIX_FMT_YUV422P10
|
|
-#define PIX_FMT_YUV422P10_OR_DUMMY PIX_FMT_YUV422P10
|
|
+#ifdef AV_PIX_FMT_YUV422P10
|
|
+#define AV_PIX_FMT_YUV422P10_OR_DUMMY AV_PIX_FMT_YUV422P10
|
|
#else
|
|
-#define PIX_FMT_YUV422P10_OR_DUMMY -1234
|
|
+#define AV_PIX_FMT_YUV422P10_OR_DUMMY -1234
|
|
#endif
|
|
|
|
#if LIBAVCODEC_VERSION_INT >= ((54<<16)|(1<<8)|0)
|
|
@@ -90,9 +90,9 @@ typedef struct
|
|
int imx_bitrate;
|
|
int imx_strip_vbi;
|
|
|
|
- /* In some cases FFMpeg would report something like PIX_FMT_YUV422P, while
|
|
- we would like to treat it as PIX_FMT_YUVJ422P. It's only used for decoding */
|
|
- enum PixelFormat reinterpret_pix_fmt;
|
|
+ /* In some cases FFMpeg would report something like AV_PIX_FMT_YUV422P, while
|
|
+ we would like to treat it as AV_PIX_FMT_YUVJ422P. It's only used for decoding */
|
|
+ enum AVPixelFormat reinterpret_pix_fmt;
|
|
|
|
int is_imx;
|
|
int y_offset;
|
|
@@ -137,42 +137,42 @@ typedef struct
|
|
|
|
static const struct
|
|
{
|
|
- enum PixelFormat ffmpeg_id;
|
|
+ enum AVPixelFormat ffmpeg_id;
|
|
int lqt_id;
|
|
int exact;
|
|
}
|
|
colormodels[] =
|
|
{
|
|
- { PIX_FMT_YUV420P, BC_YUV420P, 1 }, ///< Planar YUV 4:2:0 (1 Cr & Cb sample per 2x2 Y samples)
|
|
+ { AV_PIX_FMT_YUV420P, BC_YUV420P, 1 }, ///< Planar YUV 4:2:0 (1 Cr & Cb sample per 2x2 Y samples)
|
|
#if LIBAVUTIL_VERSION_INT < (50<<16)
|
|
- { PIX_FMT_YUV422, BC_YUV422, 1 },
|
|
+ { AV_PIX_FMT_YUV422, BC_YUV422, 1 },
|
|
#else
|
|
- { PIX_FMT_YUYV422, BC_YUV422, 1 },
|
|
+ { AV_PIX_FMT_YUYV422, BC_YUV422, 1 },
|
|
#endif
|
|
- { PIX_FMT_RGB24, BC_RGB888, 1 }, ///< Packed pixel, 3 bytes per pixel, RGBRGB...
|
|
- { PIX_FMT_BGR24, BC_BGR888, 1 }, ///< Packed pixel, 3 bytes per pixel, BGRBGR...
|
|
- { PIX_FMT_YUV422P, BC_YUV422P, 1 }, ///< Planar YUV 4:2:2 (1 Cr & Cb sample per 2x1 Y samples)
|
|
- { PIX_FMT_YUV444P, BC_YUV444P, 1 }, ///< Planar YUV 4:4:4 (1 Cr & Cb sample per 1x1 Y samples)
|
|
- { PIX_FMT_YUV411P, BC_YUV411P, 1 }, ///< Planar YUV 4:1:1 (1 Cr & Cb sample per 4x1 Y samples)
|
|
- { PIX_FMT_YUV422P16, BC_YUV422P16, 1 }, ///< Planar 16 bit YUV 4:2:2 (1 Cr & Cb sample per 2x1 Y samples)
|
|
-#ifdef PIX_FMT_YUV422P10
|
|
- { PIX_FMT_YUV422P10, BC_YUV422P10, 1 }, ///< 10 bit samples in uint16_t containers, planar 4:2:2
|
|
-#endif
|
|
- { PIX_FMT_RGB565, BC_RGB565, 1 }, ///< always stored in cpu endianness
|
|
- { PIX_FMT_YUVJ420P, BC_YUVJ420P, 1 }, ///< Planar YUV 4:2:0 full scale (jpeg)
|
|
- { PIX_FMT_YUVJ422P, BC_YUVJ422P, 1 }, ///< Planar YUV 4:2:2 full scale (jpeg)
|
|
- { PIX_FMT_YUVJ444P, BC_YUVJ444P, 1 }, ///< Planar YUV 4:4:4 full scale (jpeg)
|
|
+ { AV_PIX_FMT_RGB24, BC_RGB888, 1 }, ///< Packed pixel, 3 bytes per pixel, RGBRGB...
|
|
+ { AV_PIX_FMT_BGR24, BC_BGR888, 1 }, ///< Packed pixel, 3 bytes per pixel, BGRBGR...
|
|
+ { AV_PIX_FMT_YUV422P, BC_YUV422P, 1 }, ///< Planar YUV 4:2:2 (1 Cr & Cb sample per 2x1 Y samples)
|
|
+ { AV_PIX_FMT_YUV444P, BC_YUV444P, 1 }, ///< Planar YUV 4:4:4 (1 Cr & Cb sample per 1x1 Y samples)
|
|
+ { AV_PIX_FMT_YUV411P, BC_YUV411P, 1 }, ///< Planar YUV 4:1:1 (1 Cr & Cb sample per 4x1 Y samples)
|
|
+ { AV_PIX_FMT_YUV422P16, BC_YUV422P16, 1 }, ///< Planar 16 bit YUV 4:2:2 (1 Cr & Cb sample per 2x1 Y samples)
|
|
+#ifdef AV_PIX_FMT_YUV422P10
|
|
+ { AV_PIX_FMT_YUV422P10, BC_YUV422P10, 1 }, ///< 10 bit samples in uint16_t containers, planar 4:2:2
|
|
+#endif
|
|
+ { AV_PIX_FMT_RGB565, BC_RGB565, 1 }, ///< always stored in cpu endianness
|
|
+ { AV_PIX_FMT_YUVJ420P, BC_YUVJ420P, 1 }, ///< Planar YUV 4:2:0 full scale (jpeg)
|
|
+ { AV_PIX_FMT_YUVJ422P, BC_YUVJ422P, 1 }, ///< Planar YUV 4:2:2 full scale (jpeg)
|
|
+ { AV_PIX_FMT_YUVJ444P, BC_YUVJ444P, 1 }, ///< Planar YUV 4:4:4 full scale (jpeg)
|
|
#if LIBAVUTIL_VERSION_INT < (50<<16)
|
|
- { PIX_FMT_RGBA32, BC_RGBA8888, 0 }, ///< Packed pixel, 4 bytes per pixel, BGRABGRA...
|
|
+ { AV_PIX_FMT_RGBA32, BC_RGBA8888, 0 }, ///< Packed pixel, 4 bytes per pixel, BGRABGRA...
|
|
#else
|
|
- { PIX_FMT_RGB32, BC_RGBA8888, 0 }, ///< Packed pixel, 4 bytes per pixel, BGRABGRA...
|
|
+ { AV_PIX_FMT_RGB32, BC_RGBA8888, 0 }, ///< Packed pixel, 4 bytes per pixel, BGRABGRA...
|
|
#endif
|
|
- { PIX_FMT_RGB555, BC_RGB888, 0 }, ///< always stored in cpu endianness, most significant bit to 1
|
|
- { PIX_FMT_GRAY8, BC_RGB888, 0 },
|
|
- { PIX_FMT_MONOWHITE, BC_RGB888, 0 }, ///< 0 is white
|
|
- { PIX_FMT_MONOBLACK, BC_RGB888, 0 }, ///< 0 is black
|
|
- { PIX_FMT_PAL8, BC_RGB888, 0 }, ///< 8 bit with RGBA palette
|
|
- { PIX_FMT_YUV410P, BC_YUV420P, 0 }, ///< Planar YUV 4:1:0 (1 Cr & Cb sample per 4x4 Y samples)
|
|
+ { AV_PIX_FMT_RGB555, BC_RGB888, 0 }, ///< always stored in cpu endianness, most significant bit to 1
|
|
+ { AV_PIX_FMT_GRAY8, BC_RGB888, 0 },
|
|
+ { AV_PIX_FMT_MONOWHITE, BC_RGB888, 0 }, ///< 0 is white
|
|
+ { AV_PIX_FMT_MONOBLACK, BC_RGB888, 0 }, ///< 0 is black
|
|
+ { AV_PIX_FMT_PAL8, BC_RGB888, 0 }, ///< 8 bit with RGBA palette
|
|
+ { AV_PIX_FMT_YUV410P, BC_YUV420P, 0 }, ///< Planar YUV 4:1:0 (1 Cr & Cb sample per 4x4 Y samples)
|
|
};
|
|
|
|
static const struct
|
|
@@ -248,7 +248,7 @@ static int lqt_ffmpeg_delete_video(quick
|
|
if(codec->frame_buffer) free(codec->frame_buffer);
|
|
if(codec->buffer) free(codec->buffer);
|
|
|
|
- if(codec->frame) av_free(codec->frame);
|
|
+ if(codec->frame) av_frame_free(&codec->frame);
|
|
|
|
#ifdef HAVE_LIBSWSCALE
|
|
if(codec->swsContext)
|
|
@@ -343,16 +343,16 @@ static int lqt_tenbit_dnxhd_supported(AV
|
|
if (!codec->pix_fmts)
|
|
return 0;
|
|
|
|
- for (i = 0; codec->pix_fmts[i] != PIX_FMT_NONE; ++i)
|
|
+ for (i = 0; codec->pix_fmts[i] != AV_PIX_FMT_NONE; ++i)
|
|
{
|
|
- if (codec->pix_fmts[i] == PIX_FMT_YUV422P10_OR_DUMMY)
|
|
+ if (codec->pix_fmts[i] == AV_PIX_FMT_YUV422P10_OR_DUMMY)
|
|
return 1;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
-static enum PixelFormat lqt_ffmpeg_get_ffmpeg_colormodel(int id)
|
|
+static enum AVPixelFormat lqt_ffmpeg_get_ffmpeg_colormodel(int id)
|
|
{
|
|
int i;
|
|
|
|
@@ -361,10 +361,10 @@ static enum PixelFormat lqt_ffmpeg_get_f
|
|
if(colormodels[i].lqt_id == id)
|
|
return colormodels[i].ffmpeg_id;
|
|
}
|
|
- return PIX_FMT_NB;
|
|
+ return AV_PIX_FMT_NB;
|
|
}
|
|
|
|
-static int lqt_ffmpeg_get_lqt_colormodel(enum PixelFormat id, int * exact)
|
|
+static int lqt_ffmpeg_get_lqt_colormodel(enum AVPixelFormat id, int * exact)
|
|
{
|
|
int i;
|
|
|
|
@@ -405,31 +405,31 @@ static void lqt_ffmpeg_setup_decoding_co
|
|
if (lqt_ffmpeg_get_avid_yuv_range(vtrack->track) == AVID_FULL_YUV_RANGE)
|
|
{
|
|
vtrack->stream_cmodel = BC_YUVJ422P;
|
|
- codec->reinterpret_pix_fmt = PIX_FMT_YUVJ422P;
|
|
+ codec->reinterpret_pix_fmt = AV_PIX_FMT_YUVJ422P;
|
|
*exact = 1;
|
|
return;
|
|
}
|
|
}
|
|
else if(codec->decoder->id == AV_CODEC_ID_DNXHD)
|
|
{
|
|
- /* FFMpeg supports PIX_FMT_YUV422P and PIX_FMT_YUV422P10 for DNxHD, which
|
|
- we sometimes interpret as PIX_FMT_YUVJ422P and PIX_FMT_YUVJ422P10. */
|
|
- if (codec->avctx->pix_fmt == PIX_FMT_YUV422P || codec->avctx->pix_fmt == PIX_FMT_YUV422P10_OR_DUMMY)
|
|
+ /* FFMpeg supports AV_PIX_FMT_YUV422P and AV_PIX_FMT_YUV422P10 for DNxHD, which
|
|
+ we sometimes interpret as AV_PIX_FMT_YUVJ422P and AV_PIX_FMT_YUVJ422P10. */
|
|
+ if (codec->avctx->pix_fmt == AV_PIX_FMT_YUV422P || codec->avctx->pix_fmt == AV_PIX_FMT_YUV422P10_OR_DUMMY)
|
|
{
|
|
- int p10 = (codec->avctx->pix_fmt == PIX_FMT_YUV422P10_OR_DUMMY);
|
|
+ int p10 = (codec->avctx->pix_fmt == AV_PIX_FMT_YUV422P10_OR_DUMMY);
|
|
*exact = 1;
|
|
if (lqt_ffmpeg_get_avid_yuv_range(vtrack->track) == AVID_FULL_YUV_RANGE)
|
|
{
|
|
vtrack->stream_cmodel = p10 ? BC_YUVJ422P10 : BC_YUVJ422P;
|
|
- codec->reinterpret_pix_fmt = p10 ? PIX_FMT_YUV422P10_OR_DUMMY : PIX_FMT_YUVJ422P;
|
|
- // Note: reinterpret_pix_fmt should really be PIX_FMT_YUVJ422P10, except
|
|
+ codec->reinterpret_pix_fmt = p10 ? AV_PIX_FMT_YUV422P10_OR_DUMMY : AV_PIX_FMT_YUVJ422P;
|
|
+ // Note: reinterpret_pix_fmt should really be AV_PIX_FMT_YUVJ422P10, except
|
|
// there is no such colormodel in FFMpeg. Fortunately, it's not a problem
|
|
// in this case, as reinterpret_pix_fmt is only used when *exact == 0.
|
|
}
|
|
else
|
|
{
|
|
vtrack->stream_cmodel = p10 ? BC_YUV422P10 : BC_YUV422P;
|
|
- codec->reinterpret_pix_fmt = p10 ? PIX_FMT_YUV422P10_OR_DUMMY : PIX_FMT_YUV422P;
|
|
+ codec->reinterpret_pix_fmt = p10 ? AV_PIX_FMT_YUV422P10_OR_DUMMY : AV_PIX_FMT_YUV422P;
|
|
}
|
|
return;
|
|
}
|
|
@@ -440,14 +440,14 @@ static void lqt_ffmpeg_setup_encoding_co
|
|
|
|
if (codec->encoder->id == AV_CODEC_ID_DNXHD)
|
|
{
|
|
- /* FFMpeg's DNxHD encoder only supports PIX_FMT_YUV422P and PIX_FMT_YUV422P10
|
|
- and doesn't know anything about PIX_FMT_YUVJ422P and PIX_FMT_YUVJ422P10
|
|
+ /* FFMpeg's DNxHD encoder only supports AV_PIX_FMT_YUV422P and AV_PIX_FMT_YUV422P10
|
|
+ and doesn't know anything about AV_PIX_FMT_YUVJ422P and AV_PIX_FMT_YUVJ422P10
|
|
(in fact, the latter doesn't even exist) */
|
|
- codec->avctx->pix_fmt = PIX_FMT_YUV422P;
|
|
+ codec->avctx->pix_fmt = AV_PIX_FMT_YUV422P;
|
|
if (vtrack->stream_cmodel == BC_YUV422P10 || vtrack->stream_cmodel == BC_YUVJ422P10)
|
|
{
|
|
if (lqt_tenbit_dnxhd_supported(codec->encoder))
|
|
- codec->avctx->pix_fmt = PIX_FMT_YUV422P10_OR_DUMMY;
|
|
+ codec->avctx->pix_fmt = AV_PIX_FMT_YUV422P10_OR_DUMMY;
|
|
}
|
|
}
|
|
}
|
|
@@ -458,7 +458,7 @@ static void lqt_ffmpeg_setup_encoding_co
|
|
/* From avcodec.h: */
|
|
|
|
/*
|
|
- * PIX_FMT_RGBA32 is handled in an endian-specific manner. A RGBA
|
|
+ * AV_PIX_FMT_RGBA32 is handled in an endian-specific manner. A RGBA
|
|
* color is put together as:
|
|
* (A << 24) | (R << 16) | (G << 8) | B
|
|
* This is stored as BGRA on little endian CPU architectures and ARGB on
|
|
@@ -530,7 +530,7 @@ static void convert_rgba_to_argb(uint8_t
|
|
*/
|
|
|
|
static void convert_image_decode(quicktime_ffmpeg_video_codec_t *codec,
|
|
- AVFrame * in_frame, enum PixelFormat in_format,
|
|
+ AVFrame * in_frame, enum AVPixelFormat in_format,
|
|
unsigned char ** out_frame, int out_format,
|
|
int width, int height, int row_span, int row_span_uv)
|
|
{
|
|
@@ -547,9 +547,9 @@ static void convert_image_decode(quickti
|
|
* RGBA format like in ffmpeg??
|
|
*/
|
|
#if LIBAVUTIL_VERSION_INT < (50<<16)
|
|
- if((in_format == PIX_FMT_RGBA32) && (out_format == BC_RGBA8888))
|
|
+ if((in_format == AV_PIX_FMT_RGBA32) && (out_format == BC_RGBA8888))
|
|
#else
|
|
- if((in_format == PIX_FMT_RGB32) && (out_format == BC_RGBA8888))
|
|
+ if((in_format == AV_PIX_FMT_RGB32) && (out_format == BC_RGBA8888))
|
|
#endif
|
|
{
|
|
convert_image_decode_rgba(in_frame, out_frame, width, height, codec->y_offset);
|
|
@@ -829,7 +829,7 @@ static int lqt_ffmpeg_decode_video(quick
|
|
if(avcodec_open2(codec->avctx, codec->decoder, NULL) != 0)
|
|
return -1;
|
|
#endif
|
|
- codec->frame = avcodec_alloc_frame();
|
|
+ codec->frame = av_frame_alloc();
|
|
vtrack->stream_cmodel = LQT_COLORMODEL_NONE;
|
|
codec->initialized = 1;
|
|
}
|
|
@@ -929,10 +929,10 @@ static int lqt_ffmpeg_decode_video(quick
|
|
#ifdef HAVE_LIBSWSCALE
|
|
|
|
#if LIBAVUTIL_VERSION_INT < (50<<16)
|
|
- if(!((codec->avctx->pix_fmt == PIX_FMT_RGBA32) &&
|
|
+ if(!((codec->avctx->pix_fmt == AV_PIX_FMT_RGBA32) &&
|
|
(vtrack->stream_cmodel == BC_RGBA8888)))
|
|
#else
|
|
- if(!((codec->avctx->pix_fmt == PIX_FMT_RGB32) &&
|
|
+ if(!((codec->avctx->pix_fmt == AV_PIX_FMT_RGB32) &&
|
|
(vtrack->stream_cmodel == BC_RGBA8888)))
|
|
#endif
|
|
{
|
|
@@ -1318,7 +1318,7 @@ static int lqt_ffmpeg_encode_video(quick
|
|
|
|
if(!codec->initialized)
|
|
{
|
|
- codec->frame = avcodec_alloc_frame();
|
|
+ codec->frame = av_frame_alloc();
|
|
|
|
/* time_base is 1/framerate for constant framerate */
|
|
|
|
@@ -1396,9 +1396,9 @@ static int lqt_ffmpeg_encode_video(quick
|
|
if(vtrack->stream_cmodel == BC_RGBA8888)
|
|
{
|
|
/* Libquicktime doesn't natively support a color model equivalent
|
|
- to PIX_FMT_ARGB, which is required for QTRLE with alpha channel.
|
|
+ to AV_PIX_FMT_ARGB, which is required for QTRLE with alpha channel.
|
|
So, we use BC_RGBA8888 and do ad hoc conversion below. */
|
|
- codec->avctx->pix_fmt = PIX_FMT_ARGB;
|
|
+ codec->avctx->pix_fmt = AV_PIX_FMT_ARGB;
|
|
vtrack->track->mdia.minf.stbl.stsd.table[0].depth = 32;
|
|
}
|
|
}
|
|
@@ -1467,7 +1467,7 @@ static int lqt_ffmpeg_encode_video(quick
|
|
}
|
|
// codec->lqt_colormodel = ffmepg_2_lqt(codec->com.ffcodec_enc);
|
|
|
|
- if(codec->y_offset != 0 || codec->avctx->pix_fmt == PIX_FMT_ARGB)
|
|
+ if(codec->y_offset != 0 || codec->avctx->pix_fmt == AV_PIX_FMT_ARGB)
|
|
{
|
|
if(!codec->tmp_rows)
|
|
{
|
|
@@ -1492,7 +1492,7 @@ static int lqt_ffmpeg_encode_video(quick
|
|
vtrack->stream_cmodel,
|
|
0, 0, 0, codec->y_offset);
|
|
}
|
|
- else if(codec->avctx->pix_fmt == PIX_FMT_ARGB)
|
|
+ else if(codec->avctx->pix_fmt == AV_PIX_FMT_ARGB)
|
|
{
|
|
convert_rgba_to_argb(row_pointers[0], vtrack->stream_row_span,
|
|
codec->tmp_rows[0], codec->tmp_row_span,
|
|
@@ -1600,7 +1600,7 @@ static int lqt_ffmpeg_encode_video(quick
|
|
{
|
|
int advanced = 0;
|
|
if(codec->avctx->max_b_frames ||
|
|
- (codec->avctx->flags & (CODEC_FLAG_QPEL|CODEC_FLAG_GMC)))
|
|
+ (codec->avctx->flags & (AV_CODEC_FLAG_QPEL|CODEC_FLAG_GMC)))
|
|
advanced = 1;
|
|
|
|
setup_header_mpeg4(file, track, codec->avctx->extradata,
|