1
0

avcodec/mpegutils: Don't use MB_TYPE_L[01] for mpegvideo

MB_TYPE_L[01] is based upon H.264 terminology (it stands for
list); yet the mpegvideo based decoders don't have lists
of reference frames, they have at most one forward and one
backward reference. So use terminology based upon this.

This also has a second advantage: MB_TYPE_L[01] is actually
an OR of two flags (which are set independently for H.264,
but aren't for mpegvideo). Switching to different flags
makes the flags fit into an int16_t, which will be useful
in future commits.

The only downside to this is a very small amount of code
in error_resilience.c and mpegutils.c (the only code shared
between the H.264 decoder and mpegvideo).

Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@outlook.com>
This commit is contained in:
Andreas Rheinhardt 2024-06-03 00:07:04 +02:00
parent 07ae09bdf1
commit f5d5b80f3c
12 changed files with 123 additions and 111 deletions

View File

@ -902,6 +902,7 @@ void ff_er_frame_end(ERContext *s, int *decode_error_flags)
int threshold = 50;
int is_intra_likely;
int size = s->b8_stride * 2 * s->mb_height;
int guessed_mb_type;
/* We do not support ER of field pictures yet,
* though it should not crash if enabled. */
@ -1117,16 +1118,15 @@ void ff_er_frame_end(ERContext *s, int *decode_error_flags)
is_intra_likely = is_intra_more_likely(s);
/* set unknown mb-type to most likely */
guessed_mb_type = is_intra_likely ? MB_TYPE_INTRA4x4 :
(MB_TYPE_16x16 | (s->avctx->codec_id == AV_CODEC_ID_H264 ? MB_TYPE_L0 : MB_TYPE_FORWARD_MV));
for (i = 0; i < s->mb_num; i++) {
const int mb_xy = s->mb_index2xy[i];
int error = s->error_status_table[mb_xy];
if (!((error & ER_DC_ERROR) && (error & ER_MV_ERROR)))
continue;
if (is_intra_likely)
s->cur_pic.mb_type[mb_xy] = MB_TYPE_INTRA4x4;
else
s->cur_pic.mb_type[mb_xy] = MB_TYPE_16x16 | MB_TYPE_L0;
s->cur_pic.mb_type[mb_xy] = guessed_mb_type;
}
// change inter to intra blocks if no reference frames are available

View File

@ -232,7 +232,7 @@ static int h261_decode_mb_skipped(H261DecContext *h, int mba1, int mba2)
s->mv_dir = MV_DIR_FORWARD;
s->mv_type = MV_TYPE_16X16;
s->cur_pic.mb_type[xy] = MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0;
s->cur_pic.mb_type[xy] = MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_FORWARD_MV;
s->mv[0][0][0] = 0;
s->mv[0][0][1] = 0;
s->mb_skipped = 1;
@ -460,7 +460,7 @@ static int h261_decode_mb(H261DecContext *h)
//set motion vectors
s->mv_dir = MV_DIR_FORWARD;
s->mv_type = MV_TYPE_16X16;
s->cur_pic.mb_type[xy] = MB_TYPE_16x16 | MB_TYPE_L0;
s->cur_pic.mb_type[xy] = MB_TYPE_16x16 | MB_TYPE_FORWARD_MV;
s->mv[0][0][0] = h->current_mv_x * 2; // gets divided by 2 in motion compensation
s->mv[0][0][1] = h->current_mv_y * 2;

View File

@ -60,18 +60,18 @@
#define CBPC_B_VLC_BITS 3
static const int h263_mb_type_b_map[15]= {
MB_TYPE_DIRECT2 | MB_TYPE_L0L1,
MB_TYPE_DIRECT2 | MB_TYPE_L0L1 | MB_TYPE_CBP,
MB_TYPE_DIRECT2 | MB_TYPE_L0L1 | MB_TYPE_CBP | MB_TYPE_QUANT,
MB_TYPE_L0 | MB_TYPE_16x16,
MB_TYPE_L0 | MB_TYPE_CBP | MB_TYPE_16x16,
MB_TYPE_L0 | MB_TYPE_CBP | MB_TYPE_QUANT | MB_TYPE_16x16,
MB_TYPE_L1 | MB_TYPE_16x16,
MB_TYPE_L1 | MB_TYPE_CBP | MB_TYPE_16x16,
MB_TYPE_L1 | MB_TYPE_CBP | MB_TYPE_QUANT | MB_TYPE_16x16,
MB_TYPE_L0L1 | MB_TYPE_16x16,
MB_TYPE_L0L1 | MB_TYPE_CBP | MB_TYPE_16x16,
MB_TYPE_L0L1 | MB_TYPE_CBP | MB_TYPE_QUANT | MB_TYPE_16x16,
MB_TYPE_DIRECT2 | MB_TYPE_BIDIR_MV,
MB_TYPE_DIRECT2 | MB_TYPE_BIDIR_MV | MB_TYPE_CBP,
MB_TYPE_DIRECT2 | MB_TYPE_BIDIR_MV | MB_TYPE_CBP | MB_TYPE_QUANT,
MB_TYPE_FORWARD_MV | MB_TYPE_16x16,
MB_TYPE_FORWARD_MV | MB_TYPE_CBP | MB_TYPE_16x16,
MB_TYPE_FORWARD_MV | MB_TYPE_CBP | MB_TYPE_QUANT | MB_TYPE_16x16,
MB_TYPE_BACKWARD_MV | MB_TYPE_16x16,
MB_TYPE_BACKWARD_MV | MB_TYPE_CBP | MB_TYPE_16x16,
MB_TYPE_BACKWARD_MV | MB_TYPE_CBP | MB_TYPE_QUANT | MB_TYPE_16x16,
MB_TYPE_BIDIR_MV | MB_TYPE_16x16,
MB_TYPE_BIDIR_MV | MB_TYPE_CBP | MB_TYPE_16x16,
MB_TYPE_BIDIR_MV | MB_TYPE_CBP | MB_TYPE_QUANT | MB_TYPE_16x16,
0, //stuffing
MB_TYPE_INTRA4x4 | MB_TYPE_CBP,
MB_TYPE_INTRA4x4 | MB_TYPE_CBP | MB_TYPE_QUANT,
@ -363,7 +363,7 @@ static void preview_obmc(MpegEncContext *s){
mot_val[1 ]= mot_val[3 ]=
mot_val[1+stride]= mot_val[3+stride]= 0;
s->cur_pic.mb_type[xy] = MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0;
s->cur_pic.mb_type[xy] = MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_FORWARD_MV;
goto end;
}
cbpc = get_vlc2(&s->gb, ff_h263_inter_MCBPC_vlc, INTER_MCBPC_VLC_BITS, 2);
@ -382,7 +382,7 @@ static void preview_obmc(MpegEncContext *s){
}
if ((cbpc & 16) == 0) {
s->cur_pic.mb_type[xy] = MB_TYPE_16x16 | MB_TYPE_L0;
s->cur_pic.mb_type[xy] = MB_TYPE_16x16 | MB_TYPE_FORWARD_MV;
/* 16x16 motion prediction */
mot_val= ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
if (s->umvplus)
@ -400,7 +400,7 @@ static void preview_obmc(MpegEncContext *s){
mot_val[1 ]= mot_val[3 ]=
mot_val[1+stride]= mot_val[3+stride]= my;
} else {
s->cur_pic.mb_type[xy] = MB_TYPE_8x8 | MB_TYPE_L0;
s->cur_pic.mb_type[xy] = MB_TYPE_8x8 | MB_TYPE_FORWARD_MV;
for(i=0;i<4;i++) {
mot_val = ff_h263_pred_motion(s, i, 0, &pred_x, &pred_y);
if (s->umvplus)
@ -763,7 +763,7 @@ static int set_direct_mv(MpegEncContext *s)
s->mv_type = MV_TYPE_8X8;
for (i = 0; i < 4; i++)
set_one_direct_mv(s, p, i);
return MB_TYPE_DIRECT2 | MB_TYPE_8x8 | MB_TYPE_L0L1;
return MB_TYPE_DIRECT2 | MB_TYPE_8x8 | MB_TYPE_BIDIR_MV;
} else {
set_one_direct_mv(s, p, 0);
s->mv[0][1][0] =
@ -780,7 +780,7 @@ static int set_direct_mv(MpegEncContext *s)
s->mv[1][3][1] = s->mv[1][0][1];
s->mv_type = MV_TYPE_8X8;
// Note see prev line
return MB_TYPE_DIRECT2 | MB_TYPE_16x16 | MB_TYPE_L0L1;
return MB_TYPE_DIRECT2 | MB_TYPE_16x16 | MB_TYPE_BIDIR_MV;
}
}
@ -803,7 +803,7 @@ int ff_h263_decode_mb(MpegEncContext *s,
s->block_last_index[i] = -1;
s->mv_dir = MV_DIR_FORWARD;
s->mv_type = MV_TYPE_16X16;
s->cur_pic.mb_type[xy] = MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0;
s->cur_pic.mb_type[xy] = MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_FORWARD_MV;
s->mv[0][0][0] = 0;
s->mv[0][0][1] = 0;
s->mb_skipped = !(s->obmc | s->loop_filter);
@ -841,7 +841,7 @@ int ff_h263_decode_mb(MpegEncContext *s,
s->mv_dir = MV_DIR_FORWARD;
if ((cbpc & 16) == 0) {
s->cur_pic.mb_type[xy] = MB_TYPE_16x16 | MB_TYPE_L0;
s->cur_pic.mb_type[xy] = MB_TYPE_16x16 | MB_TYPE_FORWARD_MV;
/* 16x16 motion prediction */
s->mv_type = MV_TYPE_16X16;
ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
@ -866,7 +866,7 @@ int ff_h263_decode_mb(MpegEncContext *s,
if (s->umvplus && (mx - pred_x) == 1 && (my - pred_y) == 1)
skip_bits1(&s->gb); /* Bit stuffing to prevent PSC */
} else {
s->cur_pic.mb_type[xy] = MB_TYPE_8x8 | MB_TYPE_L0;
s->cur_pic.mb_type[xy] = MB_TYPE_8x8 | MB_TYPE_FORWARD_MV;
s->mv_type = MV_TYPE_8X8;
for(i=0;i<4;i++) {
mot_val = ff_h263_pred_motion(s, i, 0, &pred_x, &pred_y);
@ -952,7 +952,7 @@ int ff_h263_decode_mb(MpegEncContext *s,
s->mv_type= MV_TYPE_16X16;
//FIXME UMV
if(USES_LIST(mb_type, 0)){
if (HAS_FORWARD_MV(mb_type)) {
int16_t *mot_val= ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
s->mv_dir = MV_DIR_FORWARD;
@ -979,7 +979,7 @@ int ff_h263_decode_mb(MpegEncContext *s,
mot_val[1 ]= mot_val[3 ]= mot_val[1+2*stride]= mot_val[3+2*stride]= my;
}
if(USES_LIST(mb_type, 1)){
if (HAS_BACKWARD_MV(mb_type)) {
int16_t *mot_val= ff_h263_pred_motion(s, 0, 1, &pred_x, &pred_y);
s->mv_dir |= MV_DIR_BACKWARD;

View File

@ -697,11 +697,11 @@ void ff_h263_update_mb(MpegEncContext *s)
s->cur_pic.mbskip_table[mb_xy] = s->mb_skipped;
if (s->mv_type == MV_TYPE_8X8)
s->cur_pic.mb_type[mb_xy] = MB_TYPE_L0 | MB_TYPE_8x8;
s->cur_pic.mb_type[mb_xy] = MB_TYPE_FORWARD_MV | MB_TYPE_8x8;
else if(s->mb_intra)
s->cur_pic.mb_type[mb_xy] = MB_TYPE_INTRA;
else
s->cur_pic.mb_type[mb_xy] = MB_TYPE_L0 | MB_TYPE_16x16;
s->cur_pic.mb_type[mb_xy] = MB_TYPE_FORWARD_MV | MB_TYPE_16x16;
ff_h263_update_motion_val(s);
}

View File

@ -97,26 +97,26 @@ typedef struct Mpeg1Context {
static const uint32_t ptype2mb_type[7] = {
MB_TYPE_INTRA,
MB_TYPE_L0 | MB_TYPE_CBP | MB_TYPE_ZERO_MV | MB_TYPE_16x16,
MB_TYPE_L0,
MB_TYPE_L0 | MB_TYPE_CBP,
MB_TYPE_FORWARD_MV | MB_TYPE_CBP | MB_TYPE_ZERO_MV | MB_TYPE_16x16,
MB_TYPE_FORWARD_MV,
MB_TYPE_FORWARD_MV | MB_TYPE_CBP,
MB_TYPE_QUANT | MB_TYPE_INTRA,
MB_TYPE_QUANT | MB_TYPE_L0 | MB_TYPE_CBP | MB_TYPE_ZERO_MV | MB_TYPE_16x16,
MB_TYPE_QUANT | MB_TYPE_L0 | MB_TYPE_CBP,
MB_TYPE_QUANT | MB_TYPE_FORWARD_MV | MB_TYPE_CBP | MB_TYPE_ZERO_MV | MB_TYPE_16x16,
MB_TYPE_QUANT | MB_TYPE_FORWARD_MV | MB_TYPE_CBP,
};
static const uint32_t btype2mb_type[11] = {
MB_TYPE_INTRA,
MB_TYPE_L1,
MB_TYPE_L1 | MB_TYPE_CBP,
MB_TYPE_L0,
MB_TYPE_L0 | MB_TYPE_CBP,
MB_TYPE_L0L1,
MB_TYPE_L0L1 | MB_TYPE_CBP,
MB_TYPE_BACKWARD_MV,
MB_TYPE_BACKWARD_MV | MB_TYPE_CBP,
MB_TYPE_FORWARD_MV,
MB_TYPE_FORWARD_MV | MB_TYPE_CBP,
MB_TYPE_BIDIR_MV,
MB_TYPE_BIDIR_MV | MB_TYPE_CBP,
MB_TYPE_QUANT | MB_TYPE_INTRA,
MB_TYPE_QUANT | MB_TYPE_L1 | MB_TYPE_CBP,
MB_TYPE_QUANT | MB_TYPE_L0 | MB_TYPE_CBP,
MB_TYPE_QUANT | MB_TYPE_L0L1 | MB_TYPE_CBP,
MB_TYPE_QUANT | MB_TYPE_BACKWARD_MV | MB_TYPE_CBP,
MB_TYPE_QUANT | MB_TYPE_FORWARD_MV | MB_TYPE_CBP,
MB_TYPE_QUANT | MB_TYPE_BIDIR_MV | MB_TYPE_CBP,
};
/* as H.263, but only 17 codes */
@ -438,7 +438,7 @@ static int mpeg_decode_mb(MpegEncContext *s, int16_t block[12][64])
if (s->pict_type == AV_PICTURE_TYPE_P) {
s->mb_skipped = 1;
s->cur_pic.mb_type[s->mb_x + s->mb_y * s->mb_stride] =
MB_TYPE_SKIP | MB_TYPE_L0 | MB_TYPE_16x16;
MB_TYPE_SKIP | MB_TYPE_FORWARD_MV | MB_TYPE_16x16;
} else {
int mb_type;
@ -579,7 +579,7 @@ static int mpeg_decode_mb(MpegEncContext *s, int16_t block[12][64])
s->mv[0][0][0] = 0;
s->mv[0][0][1] = 0;
} else {
av_assert2(mb_type & MB_TYPE_L0L1);
av_assert2(mb_type & MB_TYPE_BIDIR_MV);
// FIXME decide if MBs in field pictures are MB_TYPE_INTERLACED
/* get additional motion vector type */
if (s->picture_structure == PICT_FRAME && s->frame_pred_frame_dct) {
@ -594,7 +594,7 @@ static int mpeg_decode_mb(MpegEncContext *s, int16_t block[12][64])
s->qscale = mpeg_get_qscale(s);
/* motion vectors */
s->mv_dir = (mb_type >> 13) & 3;
s->mv_dir = MB_TYPE_MV_2_MV_DIR(mb_type);
ff_tlog(s->avctx, "motion_type=%d\n", motion_type);
switch (motion_type) {
case MT_FRAME: /* or MT_16X8 */
@ -602,7 +602,7 @@ static int mpeg_decode_mb(MpegEncContext *s, int16_t block[12][64])
mb_type |= MB_TYPE_16x16;
s->mv_type = MV_TYPE_16X16;
for (i = 0; i < 2; i++) {
if (USES_LIST(mb_type, i)) {
if (HAS_MV(mb_type, i)) {
/* MT_FRAME */
s->mv[i][0][0] =
s->last_mv[i][0][0] =
@ -625,7 +625,7 @@ static int mpeg_decode_mb(MpegEncContext *s, int16_t block[12][64])
mb_type |= MB_TYPE_16x8 | MB_TYPE_INTERLACED;
s->mv_type = MV_TYPE_16X8;
for (i = 0; i < 2; i++) {
if (USES_LIST(mb_type, i)) {
if (HAS_MV(mb_type, i)) {
/* MT_16X8 */
for (j = 0; j < 2; j++) {
s->field_select[i][j] = get_bits1(&s->gb);
@ -645,7 +645,7 @@ static int mpeg_decode_mb(MpegEncContext *s, int16_t block[12][64])
if (s->picture_structure == PICT_FRAME) {
mb_type |= MB_TYPE_16x8 | MB_TYPE_INTERLACED;
for (i = 0; i < 2; i++) {
if (USES_LIST(mb_type, i)) {
if (HAS_MV(mb_type, i)) {
for (j = 0; j < 2; j++) {
s->field_select[i][j] = get_bits1(&s->gb);
val = mpeg_decode_motion(s, s->mpeg_f_code[i][0],
@ -665,7 +665,7 @@ static int mpeg_decode_mb(MpegEncContext *s, int16_t block[12][64])
av_assert0(!s->progressive_sequence);
mb_type |= MB_TYPE_16x16 | MB_TYPE_INTERLACED;
for (i = 0; i < 2; i++) {
if (USES_LIST(mb_type, i)) {
if (HAS_MV(mb_type, i)) {
s->field_select[i][0] = get_bits1(&s->gb);
for (k = 0; k < 2; k++) {
val = mpeg_decode_motion(s, s->mpeg_f_code[i][k],
@ -685,7 +685,7 @@ static int mpeg_decode_mb(MpegEncContext *s, int16_t block[12][64])
}
s->mv_type = MV_TYPE_DMV;
for (i = 0; i < 2; i++) {
if (USES_LIST(mb_type, i)) {
if (HAS_MV(mb_type, i)) {
int dmx, dmy, mx, my, m;
const int my_shift = s->picture_structure == PICT_FRAME;

View File

@ -141,7 +141,7 @@ int ff_mpeg4_set_direct_mv(MpegEncContext *s, int mx, int my)
s->mv_type = MV_TYPE_8X8;
for (i = 0; i < 4; i++)
ff_mpeg4_set_one_direct_mv(s, mx, my, i);
return MB_TYPE_DIRECT2 | MB_TYPE_8x8 | MB_TYPE_L0L1;
return MB_TYPE_DIRECT2 | MB_TYPE_8x8 | MB_TYPE_BIDIR_MV;
} else if (IS_INTERLACED(colocated_mb_type)) {
s->mv_type = MV_TYPE_FIELD;
for (i = 0; i < 2; i++) {
@ -169,7 +169,7 @@ int ff_mpeg4_set_direct_mv(MpegEncContext *s, int mx, int my)
(time_pb - time_pp) / time_pp;
}
return MB_TYPE_DIRECT2 | MB_TYPE_16x8 |
MB_TYPE_L0L1 | MB_TYPE_INTERLACED;
MB_TYPE_BIDIR_MV | MB_TYPE_INTERLACED;
} else {
ff_mpeg4_set_one_direct_mv(s, mx, my, 0);
s->mv[0][1][0] =
@ -190,6 +190,6 @@ int ff_mpeg4_set_direct_mv(MpegEncContext *s, int mx, int my)
else
s->mv_type = MV_TYPE_8X8;
// Note see prev line
return MB_TYPE_DIRECT2 | MB_TYPE_16x16 | MB_TYPE_L0L1;
return MB_TYPE_DIRECT2 | MB_TYPE_16x16 | MB_TYPE_BIDIR_MV;
}
}

View File

@ -68,10 +68,10 @@ static VLCElem studio_chroma_dc[528];
static const uint8_t mpeg4_block_count[4] = { 0, 6, 8, 12 };
static const int mb_type_b_map[4] = {
MB_TYPE_DIRECT2 | MB_TYPE_L0L1,
MB_TYPE_L0L1 | MB_TYPE_16x16,
MB_TYPE_L1 | MB_TYPE_16x16,
MB_TYPE_L0 | MB_TYPE_16x16,
MB_TYPE_DIRECT2 | MB_TYPE_BIDIR_MV,
MB_TYPE_BIDIR_MV | MB_TYPE_16x16,
MB_TYPE_BACKWARD_MV | MB_TYPE_16x16,
MB_TYPE_FORWARD_MV | MB_TYPE_16x16,
};
static void gmc1_motion(MpegEncContext *s, const Mpeg4DecContext *ctx,
@ -1010,13 +1010,13 @@ try_again:
s->cur_pic.mb_type[xy] = MB_TYPE_SKIP |
MB_TYPE_16x16 |
MB_TYPE_GMC |
MB_TYPE_L0;
MB_TYPE_FORWARD_MV;
mx = get_amv(ctx, 0);
my = get_amv(ctx, 1);
} else {
s->cur_pic.mb_type[xy] = MB_TYPE_SKIP |
MB_TYPE_16x16 |
MB_TYPE_L0;
MB_TYPE_FORWARD_MV;
mx = my = 0;
}
mot_val[0] =
@ -1081,13 +1081,13 @@ try_again:
if (my >= 0xffff)
return AVERROR_INVALIDDATA;
s->cur_pic.mb_type[xy] = MB_TYPE_16x16 |
MB_TYPE_L0;
MB_TYPE_FORWARD_MV;
} else {
mx = get_amv(ctx, 0);
my = get_amv(ctx, 1);
s->cur_pic.mb_type[xy] = MB_TYPE_16x16 |
MB_TYPE_GMC |
MB_TYPE_L0;
MB_TYPE_FORWARD_MV;
}
mot_val[0] =
@ -1101,7 +1101,7 @@ try_again:
} else {
int i;
s->cur_pic.mb_type[xy] = MB_TYPE_8x8 |
MB_TYPE_L0;
MB_TYPE_FORWARD_MV;
for (i = 0; i < 4; i++) {
int16_t *mot_val = ff_h263_pred_motion(s, i, 0, &pred_x, &pred_y);
mx = ff_h263_decode_motion(s, pred_x, s->f_code);
@ -1675,16 +1675,15 @@ static int mpeg4_decode_mb(MpegEncContext *s, int16_t block[6][64])
s->cur_pic.mb_type[xy] = MB_TYPE_SKIP |
MB_TYPE_GMC |
MB_TYPE_16x16 |
MB_TYPE_L0;
MB_TYPE_FORWARD_MV;
s->mcsel = 1;
s->mv[0][0][0] = get_amv(ctx, 0);
s->mv[0][0][1] = get_amv(ctx, 1);
s->cur_pic.mbskip_table[xy] = 0;
s->mb_skipped = 0;
} else {
s->cur_pic.mb_type[xy] = MB_TYPE_SKIP |
MB_TYPE_16x16 |
MB_TYPE_L0;
s->cur_pic.mb_type[xy] = MB_TYPE_SKIP | MB_TYPE_16x16 |
MB_TYPE_FORWARD_MV;
s->mcsel = 0;
s->mv[0][0][0] = 0;
s->mv[0][0][1] = 0;
@ -1729,9 +1728,8 @@ static int mpeg4_decode_mb(MpegEncContext *s, int16_t block[6][64])
s->mv_dir = MV_DIR_FORWARD;
if ((cbpc & 16) == 0) {
if (s->mcsel) {
s->cur_pic.mb_type[xy] = MB_TYPE_GMC |
MB_TYPE_16x16 |
MB_TYPE_L0;
s->cur_pic.mb_type[xy] = MB_TYPE_GMC | MB_TYPE_16x16 |
MB_TYPE_FORWARD_MV;
/* 16x16 global motion prediction */
s->mv_type = MV_TYPE_16X16;
mx = get_amv(ctx, 0);
@ -1739,8 +1737,7 @@ static int mpeg4_decode_mb(MpegEncContext *s, int16_t block[6][64])
s->mv[0][0][0] = mx;
s->mv[0][0][1] = my;
} else if ((!s->progressive_sequence) && get_bits1(&s->gb)) {
s->cur_pic.mb_type[xy] = MB_TYPE_16x8 |
MB_TYPE_L0 |
s->cur_pic.mb_type[xy] = MB_TYPE_16x8 | MB_TYPE_FORWARD_MV |
MB_TYPE_INTERLACED;
/* 16x8 field motion prediction */
s->mv_type = MV_TYPE_FIELD;
@ -1763,7 +1760,7 @@ static int mpeg4_decode_mb(MpegEncContext *s, int16_t block[6][64])
s->mv[0][i][1] = my;
}
} else {
s->cur_pic.mb_type[xy] = MB_TYPE_16x16 | MB_TYPE_L0;
s->cur_pic.mb_type[xy] = MB_TYPE_16x16 | MB_TYPE_FORWARD_MV;
/* 16x16 motion prediction */
s->mv_type = MV_TYPE_16X16;
ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
@ -1780,7 +1777,7 @@ static int mpeg4_decode_mb(MpegEncContext *s, int16_t block[6][64])
s->mv[0][0][1] = my;
}
} else {
s->cur_pic.mb_type[xy] = MB_TYPE_8x8 | MB_TYPE_L0;
s->cur_pic.mb_type[xy] = MB_TYPE_8x8 | MB_TYPE_FORWARD_MV;
s->mv_type = MV_TYPE_8X8;
for (i = 0; i < 4; i++) {
int16_t *mot_val = ff_h263_pred_motion(s, i, 0, &pred_x, &pred_y);
@ -1832,14 +1829,14 @@ static int mpeg4_decode_mb(MpegEncContext *s, int16_t block[6][64])
s->mv[1][0][1] = 0;
s->cur_pic.mb_type[xy] = MB_TYPE_SKIP |
MB_TYPE_16x16 |
MB_TYPE_L0;
MB_TYPE_FORWARD_MV;
goto end;
}
modb1 = get_bits1(&s->gb);
if (modb1) {
// like MB_TYPE_B_DIRECT but no vectors coded
mb_type = MB_TYPE_DIRECT2 | MB_TYPE_SKIP | MB_TYPE_L0L1;
mb_type = MB_TYPE_DIRECT2 | MB_TYPE_SKIP | MB_TYPE_BIDIR_MV;
cbp = 0;
} else {
modb2 = get_bits1(&s->gb);
@ -1869,11 +1866,11 @@ static int mpeg4_decode_mb(MpegEncContext *s, int16_t block[6][64])
mb_type |= MB_TYPE_16x8 | MB_TYPE_INTERLACED;
mb_type &= ~MB_TYPE_16x16;
if (USES_LIST(mb_type, 0)) {
if (HAS_FORWARD_MV(mb_type)) {
s->field_select[0][0] = get_bits1(&s->gb);
s->field_select[0][1] = get_bits1(&s->gb);
}
if (USES_LIST(mb_type, 1)) {
if (HAS_BACKWARD_MV(mb_type)) {
s->field_select[1][0] = get_bits1(&s->gb);
s->field_select[1][1] = get_bits1(&s->gb);
}
@ -1884,7 +1881,7 @@ static int mpeg4_decode_mb(MpegEncContext *s, int16_t block[6][64])
if ((mb_type & (MB_TYPE_DIRECT2 | MB_TYPE_INTERLACED)) == 0) {
s->mv_type = MV_TYPE_16X16;
if (USES_LIST(mb_type, 0)) {
if (HAS_FORWARD_MV(mb_type)) {
s->mv_dir = MV_DIR_FORWARD;
mx = ff_h263_decode_motion(s, s->last_mv[0][0][0], s->f_code);
@ -1897,7 +1894,7 @@ static int mpeg4_decode_mb(MpegEncContext *s, int16_t block[6][64])
s->mv[0][0][1] = my;
}
if (USES_LIST(mb_type, 1)) {
if (HAS_BACKWARD_MV(mb_type)) {
s->mv_dir |= MV_DIR_BACKWARD;
mx = ff_h263_decode_motion(s, s->last_mv[1][0][0], s->b_code);
@ -1912,7 +1909,7 @@ static int mpeg4_decode_mb(MpegEncContext *s, int16_t block[6][64])
} else if (!IS_DIRECT(mb_type)) {
s->mv_type = MV_TYPE_FIELD;
if (USES_LIST(mb_type, 0)) {
if (HAS_FORWARD_MV(mb_type)) {
s->mv_dir = MV_DIR_FORWARD;
for (i = 0; i < 2; i++) {
@ -1924,7 +1921,7 @@ static int mpeg4_decode_mb(MpegEncContext *s, int16_t block[6][64])
}
}
if (USES_LIST(mb_type, 1)) {
if (HAS_BACKWARD_MV(mb_type)) {
s->mv_dir |= MV_DIR_BACKWARD;
for (i = 0; i < 2; i++) {

View File

@ -104,7 +104,9 @@ void ff_draw_horiz_band(AVCodecContext *avctx,
y, picture_structure, h);
}
static char get_type_mv_char(int mb_type)
#define HAS_MV_EXT(mb_type, flags, dir) ((mb_type) & flags[(dir)])
static char get_type_mv_char(int mb_type, const int mb_type_mv_flags[2])
{
// Type & MV direction
if (IS_PCM(mb_type))
@ -125,12 +127,12 @@ static char get_type_mv_char(int mb_type)
return 'G';
else if (IS_SKIP(mb_type))
return 'S';
else if (!USES_LIST(mb_type, 1))
else if (!HAS_MV_EXT(mb_type, 1, mb_type_mv_flags))
return '>';
else if (!USES_LIST(mb_type, 0))
else if (!HAS_MV_EXT(mb_type, 0, mb_type_mv_flags))
return '<';
else {
av_assert2(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
av_assert2(HAS_MV_EXT(mb_type, 0, mb_type_mv_flags) && HAS_MV_EXT(mb_type, 1, mb_type_mv_flags));
return 'X';
}
}
@ -162,12 +164,15 @@ void ff_print_debug_info2(AVCodecContext *avctx, AVFrame *pict,
const int8_t *qscale_table, int16_t (*const motion_val[2])[2],
int mb_width, int mb_height, int mb_stride, int quarter_sample)
{
const int is_h264 = avctx->codec_id == AV_CODEC_ID_H264;
const int mb_type_mv_flags[2] = { is_h264 ? MB_TYPE_L0 : MB_TYPE_FORWARD_MV,
is_h264 ? MB_TYPE_L1 : MB_TYPE_BACKWARD_MV };
if ((avctx->export_side_data & AV_CODEC_EXPORT_DATA_MVS) && mbtype_table && motion_val[0]) {
const int shift = 1 + quarter_sample;
const int scale = 1 << shift;
const int mv_sample_log2 = avctx->codec_id == AV_CODEC_ID_H264 ? 2 : 1;
const int mv_stride = (mb_width << mv_sample_log2) +
(avctx->codec->id == AV_CODEC_ID_H264 ? 0 : 1);
const int mv_sample_log2 = is_h264 ? 2 : 1;
const int mv_stride = (mb_width << mv_sample_log2) + !is_h264;
int mb_x, mb_y, mbcount = 0;
/* size is width * height * 2 * 4 where 2 is for directions and 4 is
@ -180,7 +185,7 @@ void ff_print_debug_info2(AVCodecContext *avctx, AVFrame *pict,
for (mb_x = 0; mb_x < mb_width; mb_x++) {
int i, direction, mb_type = mbtype_table[mb_x + mb_y * mb_stride];
for (direction = 0; direction < 2; direction++) {
if (!USES_LIST(mb_type, direction))
if (!HAS_MV_EXT(mb_type, direction, mb_type_mv_flags))
continue;
if (IS_8X8(mb_type)) {
for (i = 0; i < 4; i++) {
@ -299,7 +304,7 @@ void ff_print_debug_info2(AVCodecContext *avctx, AVFrame *pict,
int mb_type = mbtype_table[x + y * mb_stride];
av_bprintf(&buf, "%c%c%c",
get_type_mv_char(mb_type),
get_type_mv_char(mb_type, mb_type_mv_flags),
get_segmentation_char(mb_type),
get_interlacement_char(mb_type));
}

View File

@ -47,6 +47,10 @@
#define MB_TYPE_DIRECT2 (1 << 8) // FIXME
#define MB_TYPE_CBP (1 << 10)
#define MB_TYPE_QUANT (1 << 11)
#define MB_TYPE_FORWARD_MV (1 << 12)
#define MB_TYPE_BACKWARD_MV (1 << 13)
#define MB_TYPE_BIDIR_MV (MB_TYPE_FORWARD_MV | MB_TYPE_BACKWARD_MV)
// MB_TYPE_P[01]L[01], MB_TYPE_L[01] and MB_TYPE_L0L1 are H.264 only.
#define MB_TYPE_P0L0 (1 << 12)
#define MB_TYPE_P1L0 (1 << 13)
#define MB_TYPE_P0L1 (1 << 14)
@ -90,6 +94,12 @@
#define USES_LIST(a, list) ((a) & ((MB_TYPE_P0L0 | MB_TYPE_P1L0) << (2 * (list))))
#define HAS_CBP(a) ((a) & MB_TYPE_CBP)
#define HAS_FORWARD_MV(a) ((a) & MB_TYPE_FORWARD_MV)
#define HAS_BACKWARD_MV(a) ((a) & MB_TYPE_BACKWARD_MV)
// dir == 0 means forward, dir == 1 is backward
#define HAS_MV(a, dir) ((a) & (MB_TYPE_FORWARD_MV << (dir)))
#define MB_TYPE_MV_2_MV_DIR(a) (((a) >> 12) & (MV_DIR_FORWARD | MV_DIR_BACKWARD))
/**
* Draw a horizontal band if supported.

View File

@ -120,7 +120,7 @@ static int msmpeg4v12_decode_mb(MpegEncContext *s, int16_t block[6][64])
s->mv[0][0][0] = 0;
s->mv[0][0][1] = 0;
s->mb_skipped = 1;
*mb_type_ptr = MB_TYPE_SKIP | MB_TYPE_L0 | MB_TYPE_16x16;
*mb_type_ptr = MB_TYPE_SKIP | MB_TYPE_FORWARD_MV | MB_TYPE_16x16;
return 0;
}
}
@ -170,7 +170,7 @@ static int msmpeg4v12_decode_mb(MpegEncContext *s, int16_t block[6][64])
s->mv_type = MV_TYPE_16X16;
s->mv[0][0][0] = mx;
s->mv[0][0][1] = my;
*mb_type_ptr = MB_TYPE_L0 | MB_TYPE_16x16;
*mb_type_ptr = MB_TYPE_FORWARD_MV | MB_TYPE_16x16;
} else {
int v;
if (s->msmpeg4_version == MSMP4_V2) {
@ -226,7 +226,7 @@ static int msmpeg4v34_decode_mb(MpegEncContext *s, int16_t block[6][64])
s->mv[0][0][0] = 0;
s->mv[0][0][1] = 0;
s->mb_skipped = 1;
*mb_type_ptr = MB_TYPE_SKIP | MB_TYPE_L0 | MB_TYPE_16x16;
*mb_type_ptr = MB_TYPE_SKIP | MB_TYPE_FORWARD_MV | MB_TYPE_16x16;
return 0;
}
@ -265,7 +265,7 @@ static int msmpeg4v34_decode_mb(MpegEncContext *s, int16_t block[6][64])
s->mv_type = MV_TYPE_16X16;
s->mv[0][0][0] = mx;
s->mv[0][0][1] = my;
*mb_type_ptr = MB_TYPE_L0 | MB_TYPE_16x16;
*mb_type_ptr = MB_TYPE_FORWARD_MV | MB_TYPE_16x16;
} else {
ff_dlog(s, "I at %d %d %d %06X\n", s->mb_x, s->mb_y,
((cbp & 3) ? 1 : 0) +((cbp & 0x3C)? 2 : 0),

View File

@ -59,16 +59,16 @@ static inline void ZERO8x2(void* dst, int stride)
static const int rv34_mb_type_to_lavc[12] = {
MB_TYPE_INTRA,
MB_TYPE_INTRA16x16 | MB_TYPE_SEPARATE_DC,
MB_TYPE_16x16 | MB_TYPE_L0,
MB_TYPE_8x8 | MB_TYPE_L0,
MB_TYPE_16x16 | MB_TYPE_L0,
MB_TYPE_16x16 | MB_TYPE_L1,
MB_TYPE_16x16 | MB_TYPE_FORWARD_MV,
MB_TYPE_8x8 | MB_TYPE_FORWARD_MV,
MB_TYPE_16x16 | MB_TYPE_FORWARD_MV,
MB_TYPE_16x16 | MB_TYPE_BACKWARD_MV,
MB_TYPE_SKIP,
MB_TYPE_DIRECT2 | MB_TYPE_16x16,
MB_TYPE_16x8 | MB_TYPE_L0,
MB_TYPE_8x16 | MB_TYPE_L0,
MB_TYPE_16x16 | MB_TYPE_L0L1,
MB_TYPE_16x16 | MB_TYPE_L0 | MB_TYPE_SEPARATE_DC
MB_TYPE_16x8 | MB_TYPE_FORWARD_MV,
MB_TYPE_8x16 | MB_TYPE_FORWARD_MV,
MB_TYPE_16x16 | MB_TYPE_BIDIR_MV,
MB_TYPE_16x16 | MB_TYPE_FORWARD_MV | MB_TYPE_SEPARATE_DC
};
@ -568,7 +568,7 @@ static void rv34_pred_mv_b(RV34DecContext *r, int block_type, int dir)
int mx, my;
int i, j;
MPVWorkPicture *cur_pic = &s->cur_pic;
const int mask = dir ? MB_TYPE_L1 : MB_TYPE_L0;
const int mask = dir ? MB_TYPE_BACKWARD_MV : MB_TYPE_FORWARD_MV;
int type = cur_pic->mb_type[mb_pos];
if((r->avail_cache[6-1] & type) & mask){

View File

@ -112,7 +112,7 @@ static int parse_mb_skip(WMV2DecContext *w)
for (mb_y = 0; mb_y < s->mb_height; mb_y++)
for (mb_x = 0; mb_x < s->mb_width; mb_x++)
mb_type[mb_y * s->mb_stride + mb_x] =
MB_TYPE_16x16 | MB_TYPE_L0;
MB_TYPE_16x16 | MB_TYPE_FORWARD_MV;
break;
case SKIP_TYPE_MPEG:
if (get_bits_left(&s->gb) < s->mb_height * s->mb_width)
@ -120,7 +120,7 @@ static int parse_mb_skip(WMV2DecContext *w)
for (mb_y = 0; mb_y < s->mb_height; mb_y++)
for (mb_x = 0; mb_x < s->mb_width; mb_x++)
mb_type[mb_y * s->mb_stride + mb_x] =
(get_bits1(&s->gb) ? MB_TYPE_SKIP : 0) | MB_TYPE_16x16 | MB_TYPE_L0;
(get_bits1(&s->gb) ? MB_TYPE_SKIP : 0) | MB_TYPE_16x16 | MB_TYPE_FORWARD_MV;
break;
case SKIP_TYPE_ROW:
for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
@ -129,11 +129,11 @@ static int parse_mb_skip(WMV2DecContext *w)
if (get_bits1(&s->gb)) {
for (mb_x = 0; mb_x < s->mb_width; mb_x++)
mb_type[mb_y * s->mb_stride + mb_x] =
MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0;
MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_FORWARD_MV;
} else {
for (mb_x = 0; mb_x < s->mb_width; mb_x++)
mb_type[mb_y * s->mb_stride + mb_x] =
(get_bits1(&s->gb) ? MB_TYPE_SKIP : 0) | MB_TYPE_16x16 | MB_TYPE_L0;
(get_bits1(&s->gb) ? MB_TYPE_SKIP : 0) | MB_TYPE_16x16 | MB_TYPE_FORWARD_MV;
}
}
break;
@ -144,11 +144,11 @@ static int parse_mb_skip(WMV2DecContext *w)
if (get_bits1(&s->gb)) {
for (mb_y = 0; mb_y < s->mb_height; mb_y++)
mb_type[mb_y * s->mb_stride + mb_x] =
MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0;
MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_FORWARD_MV;
} else {
for (mb_y = 0; mb_y < s->mb_height; mb_y++)
mb_type[mb_y * s->mb_stride + mb_x] =
(get_bits1(&s->gb) ? MB_TYPE_SKIP : 0) | MB_TYPE_16x16 | MB_TYPE_L0;
(get_bits1(&s->gb) ? MB_TYPE_SKIP : 0) | MB_TYPE_16x16 | MB_TYPE_FORWARD_MV;
}
}
break;