forked from FFmpeg/FFmpeg
avcodec/mpegvideo: Add const where appropriate
Specifically, add const to the pointed-to-type of pointers that point to something static or that belong to last_pic or next_pic (because modifying these might lead to data races). Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@outlook.com>
This commit is contained in:
parent
f1c4e8950e
commit
dda009b97d
23 changed files with 123 additions and 117 deletions
|
@ -281,7 +281,7 @@ static int h261_decode_block(H261DecContext *h, int16_t *block, int n, int coded
|
|||
{
|
||||
MpegEncContext *const s = &h->s;
|
||||
int level, i, j, run;
|
||||
RLTable *rl = &ff_h261_rl_tcoeff;
|
||||
const RLTable *rl = &ff_h261_rl_tcoeff;
|
||||
const uint8_t *scan_table;
|
||||
|
||||
/* For the variable length encoding there are two code tables, one being
|
||||
|
|
|
@ -167,7 +167,7 @@ static void h261_encode_block(H261EncContext *h, int16_t *block, int n)
|
|||
{
|
||||
MpegEncContext *const s = &h->s;
|
||||
int level, run, i, j, last_index, last_non_zero, sign, slevel, code;
|
||||
RLTable *rl;
|
||||
const RLTable *rl;
|
||||
|
||||
rl = &ff_h261_rl_tcoeff;
|
||||
if (s->mb_intra) {
|
||||
|
|
|
@ -534,7 +534,7 @@ static int h263_decode_block(MpegEncContext * s, int16_t * block,
|
|||
int n, int coded)
|
||||
{
|
||||
int level, i, j, run;
|
||||
RLTable *rl = &ff_h263_rl_inter;
|
||||
const RLTable *rl = &ff_h263_rl_inter;
|
||||
const uint8_t *scan_table;
|
||||
GetBitContext gb= s->gb;
|
||||
|
||||
|
@ -719,7 +719,7 @@ static int h263_get_modb(GetBitContext *gb, int pb_frame, int *cbpb)
|
|||
|
||||
#define tab_size ((signed)FF_ARRAY_ELEMS(s->direct_scale_mv[0]))
|
||||
#define tab_bias (tab_size / 2)
|
||||
static inline void set_one_direct_mv(MpegEncContext *s, Picture *p, int i)
|
||||
static inline void set_one_direct_mv(MpegEncContext *s, const Picture *p, int i)
|
||||
{
|
||||
int xy = s->block_index[i];
|
||||
uint16_t time_pp = s->pp_time;
|
||||
|
@ -750,7 +750,7 @@ static inline void set_one_direct_mv(MpegEncContext *s, Picture *p, int i)
|
|||
static int set_direct_mv(MpegEncContext *s)
|
||||
{
|
||||
const int mb_index = s->mb_x + s->mb_y * s->mb_stride;
|
||||
Picture *p = &s->next_pic;
|
||||
const Picture *p = &s->next_pic;
|
||||
int colocated_mb_type = p->mb_type[mb_index];
|
||||
int i;
|
||||
|
||||
|
|
|
@ -305,7 +305,7 @@ static const int dquant_code[5]= {1,0,9,2,3};
|
|||
static void h263_encode_block(MpegEncContext * s, int16_t * block, int n)
|
||||
{
|
||||
int level, run, last, i, j, last_index, last_non_zero, sign, slevel, code;
|
||||
RLTable *rl;
|
||||
const RLTable *rl;
|
||||
|
||||
rl = &ff_h263_rl_inter;
|
||||
if (s->mb_intra && !s->h263_aic) {
|
||||
|
|
|
@ -160,7 +160,7 @@ static inline int mpeg1_decode_block_inter(MpegEncContext *s,
|
|||
int16_t *block, int n)
|
||||
{
|
||||
int level, i, j, run;
|
||||
uint8_t *const scantable = s->intra_scantable.permutated;
|
||||
const uint8_t *const scantable = s->intra_scantable.permutated;
|
||||
const uint16_t *quant_matrix = s->inter_matrix;
|
||||
const int qscale = s->qscale;
|
||||
|
||||
|
@ -244,7 +244,7 @@ static inline int mpeg2_decode_block_non_intra(MpegEncContext *s,
|
|||
int16_t *block, int n)
|
||||
{
|
||||
int level, i, j, run;
|
||||
uint8_t *const scantable = s->intra_scantable.permutated;
|
||||
const uint8_t *const scantable = s->intra_scantable.permutated;
|
||||
const uint16_t *quant_matrix;
|
||||
const int qscale = s->qscale;
|
||||
int mismatch;
|
||||
|
@ -331,7 +331,7 @@ static inline int mpeg2_decode_block_intra(MpegEncContext *s,
|
|||
int level, dc, diff, i, j, run;
|
||||
int component;
|
||||
const RL_VLC_ELEM *rl_vlc;
|
||||
uint8_t *const scantable = s->intra_scantable.permutated;
|
||||
const uint8_t *const scantable = s->intra_scantable.permutated;
|
||||
const uint16_t *quant_matrix;
|
||||
const int qscale = s->qscale;
|
||||
int mismatch;
|
||||
|
|
|
@ -470,7 +470,7 @@ void ff_mpeg1_encode_slice_header(MpegEncContext *s)
|
|||
void ff_mpeg1_encode_picture_header(MpegEncContext *s)
|
||||
{
|
||||
MPEG12EncContext *const mpeg12 = (MPEG12EncContext*)s;
|
||||
AVFrameSideData *side_data;
|
||||
const AVFrameSideData *side_data;
|
||||
mpeg1_encode_sequence_header(s);
|
||||
|
||||
/* MPEG-1 picture header */
|
||||
|
@ -557,7 +557,7 @@ void ff_mpeg1_encode_picture_header(MpegEncContext *s)
|
|||
side_data = av_frame_get_side_data(s->cur_pic_ptr->f,
|
||||
AV_FRAME_DATA_STEREO3D);
|
||||
if (side_data) {
|
||||
AVStereo3D *stereo = (AVStereo3D *)side_data->data;
|
||||
const AVStereo3D *stereo = (AVStereo3D *)side_data->data;
|
||||
uint8_t fpa_type;
|
||||
|
||||
switch (stereo->type) {
|
||||
|
@ -711,7 +711,7 @@ static inline void encode_dc(MpegEncContext *s, int diff, int component)
|
|||
}
|
||||
}
|
||||
|
||||
static void mpeg1_encode_block(MpegEncContext *s, int16_t *block, int n)
|
||||
static void mpeg1_encode_block(MpegEncContext *s, const int16_t *block, int n)
|
||||
{
|
||||
int alevel, level, last_non_zero, dc, diff, i, j, run, last_index, sign;
|
||||
int code, component;
|
||||
|
@ -793,7 +793,7 @@ next_coef:
|
|||
}
|
||||
|
||||
static av_always_inline void mpeg1_encode_mb_internal(MpegEncContext *s,
|
||||
int16_t block[8][64],
|
||||
const int16_t block[8][64],
|
||||
int motion_x, int motion_y,
|
||||
int mb_block_count,
|
||||
int chroma_y_shift)
|
||||
|
|
|
@ -1294,8 +1294,8 @@ static inline int mpeg4_decode_block(Mpeg4DecContext *ctx, int16_t *block,
|
|||
MpegEncContext *s = &ctx->m;
|
||||
int level, i, last, run, qmul, qadd;
|
||||
int av_uninit(dc_pred_dir);
|
||||
RLTable *rl;
|
||||
RL_VLC_ELEM *rl_vlc;
|
||||
const RLTable *rl;
|
||||
const RL_VLC_ELEM *rl_vlc;
|
||||
const uint8_t *scan_table;
|
||||
|
||||
// Note intra & rvlc should be optimized away if this is inlined
|
||||
|
@ -1653,7 +1653,6 @@ static int mpeg4_decode_mb(MpegEncContext *s, int16_t block[6][64])
|
|||
{
|
||||
Mpeg4DecContext *ctx = s->avctx->priv_data;
|
||||
int cbpc, cbpy, i, cbp, pred_x, pred_y, mx, my, dquant;
|
||||
int16_t *mot_val;
|
||||
static const int8_t quant_tab[4] = { -1, -2, 1, 2 };
|
||||
const int xy = s->mb_x + s->mb_y * s->mb_stride;
|
||||
int next;
|
||||
|
@ -1784,7 +1783,7 @@ static int mpeg4_decode_mb(MpegEncContext *s, int16_t block[6][64])
|
|||
s->cur_pic.mb_type[xy] = MB_TYPE_8x8 | MB_TYPE_L0;
|
||||
s->mv_type = MV_TYPE_8X8;
|
||||
for (i = 0; i < 4; i++) {
|
||||
mot_val = ff_h263_pred_motion(s, i, 0, &pred_x, &pred_y);
|
||||
int16_t *mot_val = ff_h263_pred_motion(s, i, 0, &pred_x, &pred_y);
|
||||
mx = ff_h263_decode_motion(s, pred_x, s->f_code);
|
||||
if (mx >= 0xffff)
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
@ -2077,7 +2076,7 @@ static int mpeg4_decode_studio_block(MpegEncContext *s, int32_t block[64], int n
|
|||
int cc, dct_dc_size, dct_diff, code, j, idx = 1, group = 0, run = 0,
|
||||
additional_code_len, sign, mismatch;
|
||||
const VLCElem *cur_vlc = studio_intra_tab[0];
|
||||
uint8_t *const scantable = s->intra_scantable.permutated;
|
||||
const uint8_t *const scantable = s->intra_scantable.permutated;
|
||||
const uint16_t *quant_matrix;
|
||||
uint32_t flc;
|
||||
const int min = -1 * (1 << (s->avctx->bits_per_raw_sample + 6));
|
||||
|
|
|
@ -71,7 +71,7 @@ static uint8_t uni_mpeg4_inter_rl_len[64 * 64 * 2 * 2];
|
|||
* @param[in] block_last_index last index in scantable order that refers to a non zero element in block.
|
||||
*/
|
||||
static inline int get_block_rate(MpegEncContext *s, int16_t block[64],
|
||||
int block_last_index, uint8_t scantable[64])
|
||||
int block_last_index, const uint8_t scantable[64])
|
||||
{
|
||||
int last = 0;
|
||||
int j;
|
||||
|
@ -106,7 +106,7 @@ static inline int get_block_rate(MpegEncContext *s, int16_t block[64],
|
|||
* @param[in] zigzag_last_index index referring to the last non zero coefficient in zigzag order
|
||||
*/
|
||||
static inline void restore_ac_coeffs(MpegEncContext *s, int16_t block[6][64],
|
||||
const int dir[6], uint8_t *st[6],
|
||||
const int dir[6], const uint8_t *st[6],
|
||||
const int zigzag_last_index[6])
|
||||
{
|
||||
int i, n;
|
||||
|
@ -137,12 +137,12 @@ static inline void restore_ac_coeffs(MpegEncContext *s, int16_t block[6][64],
|
|||
* @param[out] zigzag_last_index index referring to the last non zero coefficient in zigzag order
|
||||
*/
|
||||
static inline int decide_ac_pred(MpegEncContext *s, int16_t block[6][64],
|
||||
const int dir[6], uint8_t *st[6],
|
||||
const int dir[6], const uint8_t *st[6],
|
||||
int zigzag_last_index[6])
|
||||
{
|
||||
int score = 0;
|
||||
int i, n;
|
||||
int8_t *const qscale_table = s->cur_pic.qscale_table;
|
||||
const int8_t *const qscale_table = s->cur_pic.qscale_table;
|
||||
|
||||
memcpy(zigzag_last_index, s->block_last_index, sizeof(int) * 6);
|
||||
|
||||
|
@ -288,14 +288,14 @@ static inline int mpeg4_get_dc_length(int level, int n)
|
|||
* Encode an 8x8 block.
|
||||
* @param n block index (0-3 are luma, 4-5 are chroma)
|
||||
*/
|
||||
static inline void mpeg4_encode_block(MpegEncContext *s,
|
||||
int16_t *block, int n, int intra_dc,
|
||||
uint8_t *scan_table, PutBitContext *dc_pb,
|
||||
static inline void mpeg4_encode_block(const MpegEncContext *s,
|
||||
const int16_t *block, int n, int intra_dc,
|
||||
const uint8_t *scan_table, PutBitContext *dc_pb,
|
||||
PutBitContext *ac_pb)
|
||||
{
|
||||
int i, last_non_zero;
|
||||
uint32_t *bits_tab;
|
||||
uint8_t *len_tab;
|
||||
const uint32_t *bits_tab;
|
||||
const uint8_t *len_tab;
|
||||
const int last_index = s->block_last_index[n];
|
||||
|
||||
if (s->mb_intra) { // Note gcc (3.2.1 at least) will optimize this away
|
||||
|
@ -350,11 +350,11 @@ static inline void mpeg4_encode_block(MpegEncContext *s,
|
|||
}
|
||||
|
||||
static int mpeg4_get_block_length(MpegEncContext *s,
|
||||
int16_t *block, int n,
|
||||
int intra_dc, uint8_t *scan_table)
|
||||
const int16_t *block, int n,
|
||||
int intra_dc, const uint8_t *scan_table)
|
||||
{
|
||||
int i, last_non_zero;
|
||||
uint8_t *len_tab;
|
||||
const uint8_t *len_tab;
|
||||
const int last_index = s->block_last_index[n];
|
||||
int len = 0;
|
||||
|
||||
|
@ -403,8 +403,10 @@ static int mpeg4_get_block_length(MpegEncContext *s,
|
|||
return len;
|
||||
}
|
||||
|
||||
static inline void mpeg4_encode_blocks(MpegEncContext *s, int16_t block[6][64],
|
||||
int intra_dc[6], uint8_t **scan_table,
|
||||
static inline void mpeg4_encode_blocks(MpegEncContext *s,
|
||||
const int16_t block[6][64],
|
||||
const int intra_dc[6],
|
||||
const uint8_t * const *scan_table,
|
||||
PutBitContext *dc_pb,
|
||||
PutBitContext *ac_pb)
|
||||
{
|
||||
|
@ -796,7 +798,7 @@ void ff_mpeg4_encode_mb(MpegEncContext *s, int16_t block[6][64],
|
|||
int dc_diff[6]; // dc values with the dc prediction subtracted
|
||||
int dir[6]; // prediction direction
|
||||
int zigzag_last_index[6];
|
||||
uint8_t *scan_table[6];
|
||||
const uint8_t *scan_table[6];
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 6; i++)
|
||||
|
|
|
@ -22,7 +22,7 @@
|
|||
#include "mpegvideodec.h"
|
||||
#include "mpeg_er.h"
|
||||
|
||||
static void set_erpic(ERPicture *dst, Picture *src)
|
||||
static void set_erpic(ERPicture *dst, const Picture *src)
|
||||
{
|
||||
int i;
|
||||
|
||||
|
|
|
@ -595,8 +595,8 @@ void ff_mpv_motion(MpegEncContext *s,
|
|||
uint8_t *dest_y, uint8_t *dest_cb,
|
||||
uint8_t *dest_cr, int dir,
|
||||
uint8_t *const *ref_picture,
|
||||
op_pixels_func (*pix_op)[4],
|
||||
qpel_mc_func (*qpix_op)[16]);
|
||||
const op_pixels_func (*pix_op)[4],
|
||||
const qpel_mc_func (*qpix_op)[16]);
|
||||
|
||||
static inline void ff_update_block_index(MpegEncContext *s, int bits_per_raw_sample,
|
||||
int lowres, int chroma_x_shift)
|
||||
|
|
|
@ -4219,8 +4219,8 @@ static int dct_quantize_refine(MpegEncContext *s, //FIXME breaks denoise?
|
|||
int prev_run=0;
|
||||
int prev_level=0;
|
||||
int qmul, qadd, start_i, last_non_zero, i, dc;
|
||||
uint8_t * length;
|
||||
uint8_t * last_length;
|
||||
const uint8_t *length;
|
||||
const uint8_t *last_length;
|
||||
int lambda;
|
||||
int rle_index, run, q = 1, sum; //q is only used when s->mb_intra is true
|
||||
|
||||
|
@ -4533,7 +4533,7 @@ static int dct_quantize_refine(MpegEncContext *s, //FIXME breaks denoise?
|
|||
* permutation up, the block is not (inverse) permutated
|
||||
* to scantable order!
|
||||
*/
|
||||
void ff_block_permute(int16_t *block, uint8_t *permutation,
|
||||
void ff_block_permute(int16_t *block, const uint8_t *permutation,
|
||||
const uint8_t *scantable, int last)
|
||||
{
|
||||
int i;
|
||||
|
|
|
@ -38,7 +38,7 @@
|
|||
static inline int hpel_motion(MpegEncContext *s,
|
||||
uint8_t *dest, uint8_t *src,
|
||||
int src_x, int src_y,
|
||||
op_pixels_func *pix_op,
|
||||
const op_pixels_func *pix_op,
|
||||
int motion_x, int motion_y)
|
||||
{
|
||||
int dxy = 0;
|
||||
|
@ -79,7 +79,7 @@ void mpeg_motion_internal(MpegEncContext *s,
|
|||
int bottom_field,
|
||||
int field_select,
|
||||
uint8_t *const *ref_picture,
|
||||
op_pixels_func (*pix_op)[4],
|
||||
const op_pixels_func (*pix_op)[4],
|
||||
int motion_x,
|
||||
int motion_y,
|
||||
int h,
|
||||
|
@ -219,7 +219,7 @@ void mpeg_motion_internal(MpegEncContext *s,
|
|||
static void mpeg_motion(MpegEncContext *s,
|
||||
uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
|
||||
int field_select, uint8_t *const *ref_picture,
|
||||
op_pixels_func (*pix_op)[4],
|
||||
const op_pixels_func (*pix_op)[4],
|
||||
int motion_x, int motion_y, int h, int is_16x8, int mb_y)
|
||||
{
|
||||
#if !CONFIG_SMALL
|
||||
|
@ -238,7 +238,7 @@ static void mpeg_motion_field(MpegEncContext *s, uint8_t *dest_y,
|
|||
uint8_t *dest_cb, uint8_t *dest_cr,
|
||||
int bottom_field, int field_select,
|
||||
uint8_t *const *ref_picture,
|
||||
op_pixels_func (*pix_op)[4],
|
||||
const op_pixels_func (*pix_op)[4],
|
||||
int motion_x, int motion_y, int mb_y)
|
||||
{
|
||||
#if !CONFIG_SMALL
|
||||
|
@ -254,7 +254,7 @@ static void mpeg_motion_field(MpegEncContext *s, uint8_t *dest_y,
|
|||
}
|
||||
|
||||
// FIXME: SIMDify, avg variant, 16x16 version
|
||||
static inline void put_obmc(uint8_t *dst, uint8_t *src[5], int stride)
|
||||
static inline void put_obmc(uint8_t *dst, uint8_t *const src[5], int stride)
|
||||
{
|
||||
int x;
|
||||
uint8_t *const top = src[1];
|
||||
|
@ -310,7 +310,7 @@ static inline void put_obmc(uint8_t *dst, uint8_t *src[5], int stride)
|
|||
static inline void obmc_motion(MpegEncContext *s,
|
||||
uint8_t *dest, uint8_t *src,
|
||||
int src_x, int src_y,
|
||||
op_pixels_func *pix_op,
|
||||
const op_pixels_func *pix_op,
|
||||
int16_t mv[5][2] /* mid top left right bottom */)
|
||||
#define MID 0
|
||||
{
|
||||
|
@ -339,8 +339,8 @@ static inline void qpel_motion(MpegEncContext *s,
|
|||
uint8_t *dest_cr,
|
||||
int field_based, int bottom_field,
|
||||
int field_select, uint8_t *const *ref_picture,
|
||||
op_pixels_func (*pix_op)[4],
|
||||
qpel_mc_func (*qpix_op)[16],
|
||||
const op_pixels_func (*pix_op)[4],
|
||||
const qpel_mc_func (*qpix_op)[16],
|
||||
int motion_x, int motion_y, int h)
|
||||
{
|
||||
const uint8_t *ptr_y, *ptr_cb, *ptr_cr;
|
||||
|
@ -443,7 +443,7 @@ static inline void qpel_motion(MpegEncContext *s,
|
|||
static void chroma_4mv_motion(MpegEncContext *s,
|
||||
uint8_t *dest_cb, uint8_t *dest_cr,
|
||||
uint8_t *const *ref_picture,
|
||||
op_pixels_func *pix_op,
|
||||
const op_pixels_func *pix_op,
|
||||
int mx, int my)
|
||||
{
|
||||
const uint8_t *ptr;
|
||||
|
@ -511,7 +511,7 @@ static inline void apply_obmc(MpegEncContext *s,
|
|||
uint8_t *dest_cb,
|
||||
uint8_t *dest_cr,
|
||||
uint8_t *const *ref_picture,
|
||||
op_pixels_func (*pix_op)[4])
|
||||
const op_pixels_func (*pix_op)[4])
|
||||
{
|
||||
LOCAL_ALIGNED_8(int16_t, mv_cache, [4], [4][2]);
|
||||
const Picture *cur_frame = &s->cur_pic;
|
||||
|
@ -599,8 +599,8 @@ static inline void apply_8x8(MpegEncContext *s,
|
|||
uint8_t *dest_cr,
|
||||
int dir,
|
||||
uint8_t *const *ref_picture,
|
||||
qpel_mc_func (*qpix_op)[16],
|
||||
op_pixels_func (*pix_op)[4])
|
||||
const qpel_mc_func (*qpix_op)[16],
|
||||
const op_pixels_func (*pix_op)[4])
|
||||
{
|
||||
int dxy, mx, my, src_x, src_y;
|
||||
int i;
|
||||
|
@ -684,8 +684,8 @@ static av_always_inline void mpv_motion_internal(MpegEncContext *s,
|
|||
uint8_t *dest_cr,
|
||||
int dir,
|
||||
uint8_t *const *ref_picture,
|
||||
op_pixels_func (*pix_op)[4],
|
||||
qpel_mc_func (*qpix_op)[16],
|
||||
const op_pixels_func (*pix_op)[4],
|
||||
const qpel_mc_func (*qpix_op)[16],
|
||||
int is_mpeg12)
|
||||
{
|
||||
int i;
|
||||
|
@ -820,8 +820,8 @@ void ff_mpv_motion(MpegEncContext *s,
|
|||
uint8_t *dest_y, uint8_t *dest_cb,
|
||||
uint8_t *dest_cr, int dir,
|
||||
uint8_t *const *ref_picture,
|
||||
op_pixels_func (*pix_op)[4],
|
||||
qpel_mc_func (*qpix_op)[16])
|
||||
const op_pixels_func (*pix_op)[4],
|
||||
const qpel_mc_func (*qpix_op)[16])
|
||||
{
|
||||
av_assert2(s->out_format == FMT_MPEG1 ||
|
||||
s->out_format == FMT_H263 ||
|
||||
|
|
|
@ -152,7 +152,7 @@ int ff_dct_quantize_c(MpegEncContext *s, int16_t *block, int n, int qscale, int
|
|||
void ff_convert_matrix(MpegEncContext *s, int (*qmat)[64], uint16_t (*qmat16)[2][64],
|
||||
const uint16_t *quant_matrix, int bias, int qmin, int qmax, int intra);
|
||||
|
||||
void ff_block_permute(int16_t *block, uint8_t *permutation,
|
||||
void ff_block_permute(int16_t *block, const uint8_t *permutation,
|
||||
const uint8_t *scantable, int last);
|
||||
|
||||
static inline int get_bits_diff(MpegEncContext *s)
|
||||
|
|
|
@ -144,8 +144,8 @@ void mpv_reconstruct_mb_internal(MpegEncContext *s, int16_t block[12][64],
|
|||
MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_pic.data, op_pix);
|
||||
}
|
||||
} else {
|
||||
op_pixels_func (*op_pix)[4];
|
||||
qpel_mc_func (*op_qpix)[16];
|
||||
const op_pixels_func (*op_pix)[4];
|
||||
const qpel_mc_func (*op_qpix)[16];
|
||||
|
||||
if ((is_mpeg12 == DEFINITELY_MPEG12 || !s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
|
||||
op_pix = s->hdsp.put_pixels_tab;
|
||||
|
|
|
@ -627,8 +627,8 @@ int ff_msmpeg4_decode_block(MpegEncContext * s, int16_t * block,
|
|||
{
|
||||
int level, i, last, run, run_diff;
|
||||
int av_uninit(dc_pred_dir);
|
||||
RLTable *rl;
|
||||
RL_VLC_ELEM *rl_vlc;
|
||||
const RLTable *rl;
|
||||
const RL_VLC_ELEM *rl_vlc;
|
||||
int qmul, qadd;
|
||||
|
||||
if (s->mb_intra) {
|
||||
|
@ -811,7 +811,7 @@ int ff_msmpeg4_decode_block(MpegEncContext * s, int16_t * block,
|
|||
|
||||
void ff_msmpeg4_decode_motion(MpegEncContext *s, int *mx_ptr, int *my_ptr)
|
||||
{
|
||||
MVTable *mv;
|
||||
const MVTable *mv;
|
||||
int code, mx, my;
|
||||
|
||||
mv = &ff_mv_tables[s->mv_table_index];
|
||||
|
|
|
@ -70,7 +70,7 @@ FF_DISABLE_DEPRECATION_WARNINGS
|
|||
FF_ENABLE_DEPRECATION_WARNINGS
|
||||
}
|
||||
|
||||
static inline double qp2bits(RateControlEntry *rce, double qp)
|
||||
static inline double qp2bits(const RateControlEntry *rce, double qp)
|
||||
{
|
||||
if (qp <= 0.0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "qp<=0.0\n");
|
||||
|
@ -83,7 +83,7 @@ static double qp2bits_cb(void *rce, double qp)
|
|||
return qp2bits(rce, qp);
|
||||
}
|
||||
|
||||
static inline double bits2qp(RateControlEntry *rce, double bits)
|
||||
static inline double bits2qp(const RateControlEntry *rce, double bits)
|
||||
{
|
||||
if (bits < 0.9) {
|
||||
av_log(NULL, AV_LOG_ERROR, "bits<0.9\n");
|
||||
|
@ -96,7 +96,7 @@ static double bits2qp_cb(void *rce, double qp)
|
|||
return bits2qp(rce, qp);
|
||||
}
|
||||
|
||||
static double get_diff_limited_q(MpegEncContext *s, RateControlEntry *rce, double q)
|
||||
static double get_diff_limited_q(MpegEncContext *s, const RateControlEntry *rce, double q)
|
||||
{
|
||||
RateControlContext *rcc = &s->rc_context;
|
||||
AVCodecContext *a = s->avctx;
|
||||
|
@ -163,7 +163,7 @@ static void get_qminmax(int *qmin_ret, int *qmax_ret, MpegEncContext *s, int pic
|
|||
*qmax_ret = qmax;
|
||||
}
|
||||
|
||||
static double modify_qscale(MpegEncContext *s, RateControlEntry *rce,
|
||||
static double modify_qscale(MpegEncContext *s, const RateControlEntry *rce,
|
||||
double q, int frame_num)
|
||||
{
|
||||
RateControlContext *rcc = &s->rc_context;
|
||||
|
@ -385,7 +385,7 @@ static int init_pass2(MpegEncContext *s)
|
|||
|
||||
/* find qscale */
|
||||
for (i = 0; i < rcc->num_entries; i++) {
|
||||
RateControlEntry *rce = &rcc->entry[i];
|
||||
const RateControlEntry *rce = &rcc->entry[i];
|
||||
|
||||
qscale[i] = get_qscale(s, &rcc->entry[i], rate_factor, i);
|
||||
rcc->last_qscale_for[rce->pict_type] = qscale[i];
|
||||
|
@ -394,20 +394,20 @@ static int init_pass2(MpegEncContext *s)
|
|||
|
||||
/* fixed I/B QP relative to P mode */
|
||||
for (i = FFMAX(0, rcc->num_entries - 300); i < rcc->num_entries; i++) {
|
||||
RateControlEntry *rce = &rcc->entry[i];
|
||||
const RateControlEntry *rce = &rcc->entry[i];
|
||||
|
||||
qscale[i] = get_diff_limited_q(s, rce, qscale[i]);
|
||||
}
|
||||
|
||||
for (i = rcc->num_entries - 1; i >= 0; i--) {
|
||||
RateControlEntry *rce = &rcc->entry[i];
|
||||
const RateControlEntry *rce = &rcc->entry[i];
|
||||
|
||||
qscale[i] = get_diff_limited_q(s, rce, qscale[i]);
|
||||
}
|
||||
|
||||
/* smooth curve */
|
||||
for (i = 0; i < rcc->num_entries; i++) {
|
||||
RateControlEntry *rce = &rcc->entry[i];
|
||||
const RateControlEntry *rce = &rcc->entry[i];
|
||||
const int pict_type = rce->new_pict_type;
|
||||
int j;
|
||||
double q = 0.0, sum = 0.0;
|
||||
|
@ -877,8 +877,8 @@ static void adaptive_quantization(MpegEncContext *s, double q)
|
|||
|
||||
void ff_get_2pass_fcode(MpegEncContext *s)
|
||||
{
|
||||
RateControlContext *rcc = &s->rc_context;
|
||||
RateControlEntry *rce = &rcc->entry[s->picture_number];
|
||||
const RateControlContext *rcc = &s->rc_context;
|
||||
const RateControlEntry *rce = &rcc->entry[s->picture_number];
|
||||
|
||||
s->f_code = rce->f_code;
|
||||
s->b_code = rce->b_code;
|
||||
|
@ -929,7 +929,7 @@ float ff_rate_estimate_qscale(MpegEncContext *s, int dry_run)
|
|||
rce = &rcc->entry[picture_number];
|
||||
wanted_bits = rce->expected_bits;
|
||||
} else {
|
||||
Picture *dts_pic;
|
||||
const Picture *dts_pic;
|
||||
rce = &local_rce;
|
||||
|
||||
/* FIXME add a dts field to AVFrame and ensure it is set and use it
|
||||
|
|
|
@ -681,7 +681,8 @@ static inline void rv34_mc(RV34DecContext *r, const int block_type,
|
|||
h264_chroma_mc_func (*chroma_mc))
|
||||
{
|
||||
MpegEncContext *s = &r->s;
|
||||
uint8_t *Y, *U, *V, *srcY, *srcU, *srcV;
|
||||
uint8_t *Y, *U, *V;
|
||||
const uint8_t *srcY, *srcU, *srcV;
|
||||
int dxy, mx, my, umx, umy, lx, ly, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
|
||||
int mv_pos = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride + mv_off;
|
||||
int is16x16 = 1;
|
||||
|
|
|
@ -413,9 +413,10 @@ static av_always_inline void vc1_p_h_loop_filter(VC1Context *v, uint8_t *dest, u
|
|||
}
|
||||
}
|
||||
|
||||
static av_always_inline void vc1_p_v_loop_filter(VC1Context *v, uint8_t *dest, uint32_t *cbp,
|
||||
uint8_t *is_intra, int16_t (*mv)[2], uint8_t *mv_f,
|
||||
int *ttblk, uint32_t flags, int block_num)
|
||||
static av_always_inline
|
||||
void vc1_p_v_loop_filter(VC1Context *v, uint8_t *dest, const uint32_t *cbp,
|
||||
const uint8_t *is_intra, int16_t (*mv)[2], const uint8_t *mv_f,
|
||||
const int *ttblk, uint32_t flags, int block_num)
|
||||
{
|
||||
MpegEncContext *s = &v->s;
|
||||
int pq = v->pq;
|
||||
|
@ -799,7 +800,7 @@ void ff_vc1_p_loop_filter(VC1Context *v)
|
|||
}
|
||||
}
|
||||
|
||||
static av_always_inline void vc1_p_h_intfr_loop_filter(VC1Context *v, uint8_t *dest, int *ttblk,
|
||||
static av_always_inline void vc1_p_h_intfr_loop_filter(VC1Context *v, uint8_t *dest, const int *ttblk,
|
||||
uint32_t flags, uint8_t fieldtx, int block_num)
|
||||
{
|
||||
MpegEncContext *s = &v->s;
|
||||
|
@ -849,8 +850,9 @@ static av_always_inline void vc1_p_h_intfr_loop_filter(VC1Context *v, uint8_t *d
|
|||
}
|
||||
}
|
||||
|
||||
static av_always_inline void vc1_p_v_intfr_loop_filter(VC1Context *v, uint8_t *dest, int *ttblk,
|
||||
uint32_t flags, uint8_t fieldtx, int block_num)
|
||||
static av_always_inline
|
||||
void vc1_p_v_intfr_loop_filter(VC1Context *v, uint8_t *dest, const int *ttblk,
|
||||
uint32_t flags, uint8_t fieldtx, int block_num)
|
||||
{
|
||||
MpegEncContext *s = &v->s;
|
||||
int pq = v->pq;
|
||||
|
@ -1109,8 +1111,9 @@ void ff_vc1_p_intfr_loop_filter(VC1Context *v)
|
|||
}
|
||||
}
|
||||
|
||||
static av_always_inline void vc1_b_h_intfi_loop_filter(VC1Context *v, uint8_t *dest, uint32_t *cbp,
|
||||
int *ttblk, uint32_t flags, int block_num)
|
||||
static av_always_inline
|
||||
void vc1_b_h_intfi_loop_filter(VC1Context *v, uint8_t *dest, const uint32_t *cbp,
|
||||
const int *ttblk, uint32_t flags, int block_num)
|
||||
{
|
||||
MpegEncContext *s = &v->s;
|
||||
int pq = v->pq;
|
||||
|
@ -1141,8 +1144,9 @@ static av_always_inline void vc1_b_h_intfi_loop_filter(VC1Context *v, uint8_t *d
|
|||
}
|
||||
}
|
||||
|
||||
static av_always_inline void vc1_b_v_intfi_loop_filter(VC1Context *v, uint8_t *dest, uint32_t *cbp,
|
||||
int *ttblk, uint32_t flags, int block_num)
|
||||
static av_always_inline
|
||||
void vc1_b_v_intfi_loop_filter(VC1Context *v, uint8_t *dest, const uint32_t *cbp,
|
||||
const int *ttblk, uint32_t flags, int block_num)
|
||||
{
|
||||
MpegEncContext *s = &v->s;
|
||||
int pq = v->pq;
|
||||
|
@ -1174,7 +1178,7 @@ void ff_vc1_b_intfi_loop_filter(VC1Context *v)
|
|||
MpegEncContext *s = &v->s;
|
||||
int block_count = CONFIG_GRAY && (s->avctx->flags & AV_CODEC_FLAG_GRAY) ? 4 : 6;
|
||||
uint8_t *dest;
|
||||
uint32_t *cbp;
|
||||
const uint32_t *cbp;
|
||||
int *ttblk;
|
||||
uint32_t flags = 0;
|
||||
int i;
|
||||
|
|
|
@ -58,7 +58,7 @@ static av_always_inline void vc1_scale_chroma(uint8_t *srcU, uint8_t *srcV,
|
|||
}
|
||||
|
||||
static av_always_inline void vc1_lut_scale_luma(uint8_t *srcY,
|
||||
uint8_t *lut1, uint8_t *lut2,
|
||||
const uint8_t *lut1, const uint8_t *lut2,
|
||||
int k, int linesize)
|
||||
{
|
||||
int i, j;
|
||||
|
@ -78,7 +78,7 @@ static av_always_inline void vc1_lut_scale_luma(uint8_t *srcY,
|
|||
}
|
||||
|
||||
static av_always_inline void vc1_lut_scale_chroma(uint8_t *srcU, uint8_t *srcV,
|
||||
uint8_t *lut1, uint8_t *lut2,
|
||||
const uint8_t *lut1, const uint8_t *lut2,
|
||||
int k, int uvlinesize)
|
||||
{
|
||||
int i, j;
|
||||
|
@ -177,7 +177,7 @@ void ff_vc1_mc_1mv(VC1Context *v, int dir)
|
|||
int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
|
||||
int v_edge_pos = s->v_edge_pos >> v->field_mode;
|
||||
int i;
|
||||
uint8_t (*luty)[256], (*lutuv)[256];
|
||||
const uint8_t (*luty)[256], (*lutuv)[256];
|
||||
int use_ic;
|
||||
int interlace;
|
||||
int linesize, uvlinesize;
|
||||
|
@ -457,7 +457,7 @@ void ff_vc1_mc_4mv_luma(VC1Context *v, int n, int dir, int avg)
|
|||
int off;
|
||||
int fieldmv = (v->fcm == ILACE_FRAME) ? v->blk_mv_type[s->block_index[n]] : 0;
|
||||
int v_edge_pos = s->v_edge_pos >> v->field_mode;
|
||||
uint8_t (*luty)[256];
|
||||
const uint8_t (*luty)[256];
|
||||
int use_ic;
|
||||
int interlace;
|
||||
int linesize;
|
||||
|
@ -640,7 +640,7 @@ void ff_vc1_mc_4mv_chroma(VC1Context *v, int dir)
|
|||
int16_t tx, ty;
|
||||
int chroma_ref_type;
|
||||
int v_edge_pos = s->v_edge_pos >> v->field_mode;
|
||||
uint8_t (*lutuv)[256];
|
||||
const uint8_t (*lutuv)[256];
|
||||
int use_ic;
|
||||
int interlace;
|
||||
int uvlinesize;
|
||||
|
@ -851,7 +851,7 @@ void ff_vc1_mc_4mv_chroma4(VC1Context *v, int dir, int dir2, int avg)
|
|||
int use_ic;
|
||||
int interlace;
|
||||
int uvlinesize;
|
||||
uint8_t (*lutuv)[256];
|
||||
const uint8_t (*lutuv)[256];
|
||||
|
||||
if (CONFIG_GRAY && s->avctx->flags & AV_CODEC_FLAG_GRAY)
|
||||
return;
|
||||
|
@ -1191,8 +1191,8 @@ void ff_vc1_interp_mc(VC1Context *v)
|
|||
}
|
||||
|
||||
if (use_ic) {
|
||||
uint8_t (*luty )[256] = v->next_luty;
|
||||
uint8_t (*lutuv)[256] = v->next_lutuv;
|
||||
const uint8_t (*luty )[256] = v->next_luty;
|
||||
const uint8_t (*lutuv)[256] = v->next_lutuv;
|
||||
vc1_lut_scale_luma(srcY,
|
||||
luty[v->field_mode ? v->ref_field_type[1] : ((0+src_y - s->mspel) & 1)],
|
||||
luty[v->field_mode ? v->ref_field_type[1] : ((1+src_y - s->mspel) & 1)],
|
||||
|
|
|
@ -33,7 +33,7 @@
|
|||
#include "vc1_pred.h"
|
||||
#include "vc1data.h"
|
||||
|
||||
static av_always_inline int scaleforsame_x(VC1Context *v, int n /* MV */, int dir)
|
||||
static av_always_inline int scaleforsame_x(const VC1Context *v, int n /* MV */, int dir)
|
||||
{
|
||||
int scaledvalue, refdist;
|
||||
int scalesame1, scalesame2;
|
||||
|
@ -66,7 +66,7 @@ static av_always_inline int scaleforsame_x(VC1Context *v, int n /* MV */, int di
|
|||
return av_clip(scaledvalue, -v->range_x, v->range_x - 1);
|
||||
}
|
||||
|
||||
static av_always_inline int scaleforsame_y(VC1Context *v, int i, int n /* MV */, int dir)
|
||||
static av_always_inline int scaleforsame_y(const VC1Context *v, int i, int n /* MV */, int dir)
|
||||
{
|
||||
int scaledvalue, refdist;
|
||||
int scalesame1, scalesame2;
|
||||
|
@ -103,7 +103,7 @@ static av_always_inline int scaleforsame_y(VC1Context *v, int i, int n /* MV */,
|
|||
return av_clip(scaledvalue, -v->range_y / 2, v->range_y / 2 - 1);
|
||||
}
|
||||
|
||||
static av_always_inline int scaleforopp_x(VC1Context *v, int n /* MV */)
|
||||
static av_always_inline int scaleforopp_x(const VC1Context *v, int n /* MV */)
|
||||
{
|
||||
int scalezone1_x, zone1offset_x;
|
||||
int scaleopp1, scaleopp2, brfd;
|
||||
|
@ -130,7 +130,7 @@ static av_always_inline int scaleforopp_x(VC1Context *v, int n /* MV */)
|
|||
return av_clip(scaledvalue, -v->range_x, v->range_x - 1);
|
||||
}
|
||||
|
||||
static av_always_inline int scaleforopp_y(VC1Context *v, int n /* MV */, int dir)
|
||||
static av_always_inline int scaleforopp_y(const VC1Context *v, int n /* MV */, int dir)
|
||||
{
|
||||
int scalezone1_y, zone1offset_y;
|
||||
int scaleopp1, scaleopp2, brfd;
|
||||
|
@ -161,7 +161,7 @@ static av_always_inline int scaleforopp_y(VC1Context *v, int n /* MV */, int dir
|
|||
}
|
||||
}
|
||||
|
||||
static av_always_inline int scaleforsame(VC1Context *v, int i, int n /* MV */,
|
||||
static av_always_inline int scaleforsame(const VC1Context *v, int i, int n /* MV */,
|
||||
int dim, int dir)
|
||||
{
|
||||
int brfd, scalesame;
|
||||
|
@ -182,7 +182,7 @@ static av_always_inline int scaleforsame(VC1Context *v, int i, int n /* MV */,
|
|||
return n;
|
||||
}
|
||||
|
||||
static av_always_inline int scaleforopp(VC1Context *v, int n /* MV */,
|
||||
static av_always_inline int scaleforopp(const VC1Context *v, int n /* MV */,
|
||||
int dim, int dir)
|
||||
{
|
||||
int refdist, scaleopp;
|
||||
|
@ -215,7 +215,6 @@ void ff_vc1_pred_mv(VC1Context *v, int n, int dmv_x, int dmv_y,
|
|||
{
|
||||
MpegEncContext *s = &v->s;
|
||||
int xy, wrap, off = 0;
|
||||
int16_t *A, *B, *C;
|
||||
int px, py;
|
||||
int sum;
|
||||
int mixedmv_pic, num_samefield = 0, num_oppfield = 0;
|
||||
|
@ -301,7 +300,7 @@ void ff_vc1_pred_mv(VC1Context *v, int n, int dmv_x, int dmv_y,
|
|||
}
|
||||
|
||||
if (a_valid) {
|
||||
A = s->cur_pic.motion_val[dir][xy - wrap + v->blocks_off];
|
||||
const int16_t *A = s->cur_pic.motion_val[dir][xy - wrap + v->blocks_off];
|
||||
a_f = v->mv_f[dir][xy - wrap + v->blocks_off];
|
||||
num_oppfield += a_f;
|
||||
num_samefield += 1 - a_f;
|
||||
|
@ -312,7 +311,7 @@ void ff_vc1_pred_mv(VC1Context *v, int n, int dmv_x, int dmv_y,
|
|||
a_f = 0;
|
||||
}
|
||||
if (b_valid) {
|
||||
B = s->cur_pic.motion_val[dir][xy - wrap + off + v->blocks_off];
|
||||
const int16_t *B = s->cur_pic.motion_val[dir][xy - wrap + off + v->blocks_off];
|
||||
b_f = v->mv_f[dir][xy - wrap + off + v->blocks_off];
|
||||
num_oppfield += b_f;
|
||||
num_samefield += 1 - b_f;
|
||||
|
@ -323,7 +322,7 @@ void ff_vc1_pred_mv(VC1Context *v, int n, int dmv_x, int dmv_y,
|
|||
b_f = 0;
|
||||
}
|
||||
if (c_valid) {
|
||||
C = s->cur_pic.motion_val[dir][xy - 1 + v->blocks_off];
|
||||
const int16_t *C = s->cur_pic.motion_val[dir][xy - 1 + v->blocks_off];
|
||||
c_f = v->mv_f[dir][xy - 1 + v->blocks_off];
|
||||
num_oppfield += c_f;
|
||||
num_samefield += 1 - c_f;
|
||||
|
@ -692,8 +691,7 @@ void ff_vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2],
|
|||
int direct, int mvtype)
|
||||
{
|
||||
MpegEncContext *s = &v->s;
|
||||
int xy, wrap, off = 0;
|
||||
int16_t *A, *B, *C;
|
||||
int xy, wrap;
|
||||
int px, py;
|
||||
int sum;
|
||||
int r_x, r_y;
|
||||
|
@ -743,10 +741,10 @@ void ff_vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2],
|
|||
}
|
||||
|
||||
if ((mvtype == BMV_TYPE_FORWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
|
||||
C = s->cur_pic.motion_val[0][xy - 2];
|
||||
A = s->cur_pic.motion_val[0][xy - wrap * 2];
|
||||
off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
|
||||
B = s->cur_pic.motion_val[0][xy - wrap * 2 + off];
|
||||
int16_t *C = s->cur_pic.motion_val[0][xy - 2];
|
||||
const int16_t *A = s->cur_pic.motion_val[0][xy - wrap * 2];
|
||||
int off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
|
||||
const int16_t *B = s->cur_pic.motion_val[0][xy - wrap * 2 + off];
|
||||
|
||||
if (!s->mb_x) C[0] = C[1] = 0;
|
||||
if (!s->first_slice_line) { // predictor A is not out of bounds
|
||||
|
@ -812,10 +810,10 @@ void ff_vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2],
|
|||
s->mv[0][0][1] = ((py + dmv_y[0] + r_y) & ((r_y << 1) - 1)) - r_y;
|
||||
}
|
||||
if ((mvtype == BMV_TYPE_BACKWARD) || (mvtype == BMV_TYPE_INTERPOLATED)) {
|
||||
C = s->cur_pic.motion_val[1][xy - 2];
|
||||
A = s->cur_pic.motion_val[1][xy - wrap * 2];
|
||||
off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
|
||||
B = s->cur_pic.motion_val[1][xy - wrap * 2 + off];
|
||||
int16_t *C = s->cur_pic.motion_val[1][xy - 2];
|
||||
const int16_t *A = s->cur_pic.motion_val[1][xy - wrap * 2];
|
||||
int off = (s->mb_x == (s->mb_width - 1)) ? -2 : 2;
|
||||
const int16_t *B = s->cur_pic.motion_val[1][xy - wrap * 2 + off];
|
||||
|
||||
if (!s->mb_x)
|
||||
C[0] = C[1] = 0;
|
||||
|
|
|
@ -211,7 +211,7 @@ static void vc1_draw_sprites(VC1Context *v, SpriteData* sd)
|
|||
{
|
||||
int i, plane, row, sprite;
|
||||
int sr_cache[2][2] = { { -1, -1 }, { -1, -1 } };
|
||||
uint8_t* src_h[2][2];
|
||||
const uint8_t *src_h[2][2];
|
||||
int xoff[2], xadv[2], yoff[2], yadv[2], alpha;
|
||||
int ysub[2];
|
||||
MpegEncContext *s = &v->s;
|
||||
|
@ -235,7 +235,7 @@ static void vc1_draw_sprites(VC1Context *v, SpriteData* sd)
|
|||
v->sprite_output_frame->linesize[plane] * row;
|
||||
|
||||
for (sprite = 0; sprite <= v->two_sprites; sprite++) {
|
||||
uint8_t *iplane = s->cur_pic.data[plane];
|
||||
const uint8_t *iplane = s->cur_pic.data[plane];
|
||||
int iline = s->cur_pic.linesize[plane];
|
||||
int ycoord = yoff[sprite] + yadv[sprite] * row;
|
||||
int yline = ycoord >> 16;
|
||||
|
@ -667,7 +667,7 @@ static av_cold int vc1_decode_init(AVCodecContext *avctx)
|
|||
}
|
||||
} else { // VC1/WVC1/WVP2
|
||||
const uint8_t *start = avctx->extradata;
|
||||
uint8_t *end = avctx->extradata + avctx->extradata_size;
|
||||
const uint8_t *end = avctx->extradata + avctx->extradata_size;
|
||||
const uint8_t *next;
|
||||
int size, buf2_size;
|
||||
uint8_t *buf2 = NULL;
|
||||
|
|
|
@ -49,7 +49,8 @@ av_cold void ff_wmv2_common_init(MpegEncContext *s)
|
|||
|
||||
void ff_mspel_motion(MpegEncContext *s, uint8_t *dest_y,
|
||||
uint8_t *dest_cb, uint8_t *dest_cr,
|
||||
uint8_t *const *ref_picture, op_pixels_func (*pix_op)[4],
|
||||
uint8_t *const *ref_picture,
|
||||
const op_pixels_func (*pix_op)[4],
|
||||
int motion_x, int motion_y, int h)
|
||||
{
|
||||
WMV2Context *const w = s->private_ctx;
|
||||
|
|
|
@ -39,7 +39,8 @@ void ff_wmv2_common_init(MpegEncContext *s);
|
|||
|
||||
void ff_mspel_motion(MpegEncContext *s,
|
||||
uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
|
||||
uint8_t *const *ref_picture, op_pixels_func (*pix_op)[4],
|
||||
uint8_t *const *ref_picture,
|
||||
const op_pixels_func (*pix_op)[4],
|
||||
int motion_x, int motion_y, int h);
|
||||
|
||||
|
||||
|
|
Loading…
Add table
Reference in a new issue