1
0

avcodec/mpegpicture: Rename Picture->MPVPicture

Picture is just too generic.

Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@outlook.com>
This commit is contained in:
Andreas Rheinhardt 2023-10-08 01:44:47 +02:00
parent 2dfe7c1e40
commit 59422955cf
22 changed files with 59 additions and 57 deletions

View File

@ -45,7 +45,7 @@ void ff_dxva2_mpeg2_fill_picture_parameters(AVCodecContext *avctx,
DXVA_PictureParameters *pp)
{
const struct MpegEncContext *s = avctx->priv_data;
const Picture *current_picture = s->cur_pic_ptr;
const MPVPicture *current_picture = s->cur_pic_ptr;
int is_field = s->picture_structure != PICT_FRAME;
memset(pp, 0, sizeof(*pp));

View File

@ -46,7 +46,7 @@ void ff_dxva2_vc1_fill_picture_parameters(AVCodecContext *avctx,
{
const VC1Context *v = avctx->priv_data;
const MpegEncContext *s = &v->s;
const Picture *current_picture = s->cur_pic_ptr;
const MPVPicture *current_picture = s->cur_pic_ptr;
int intcomp = 0;
// determine if intensity compensation is needed
@ -336,7 +336,7 @@ static int dxva2_vc1_decode_slice(AVCodecContext *avctx,
uint32_t size)
{
const VC1Context *v = avctx->priv_data;
const Picture *current_picture = v->s.cur_pic_ptr;
const MPVPicture *current_picture = v->s.cur_pic_ptr;
struct dxva2_picture_context *ctx_pic = current_picture->hwaccel_picture_private;
unsigned position;

View File

@ -730,7 +730,7 @@ av_cold void ff_intrax8_common_end(IntraX8Context *w)
av_freep(&w->prediction_table);
}
int ff_intrax8_decode_picture(IntraX8Context *w, Picture *pict,
int ff_intrax8_decode_picture(IntraX8Context *w, MPVPicture *pict,
GetBitContext *gb, int *mb_x, int *mb_y,
int dquant, int quant_offset,
int loopfilter, int lowdelay)

View File

@ -106,7 +106,7 @@ void ff_intrax8_common_end(IntraX8Context *w);
* @param quant_offset offset away from zero
* @param loopfilter enable filter after decoding a block
*/
int ff_intrax8_decode_picture(IntraX8Context *w, Picture *pict,
int ff_intrax8_decode_picture(IntraX8Context *w, MPVPicture *pict,
GetBitContext *gb, int *mb_x, int *mb_y,
int quant, int halfpq,
int loopfilter, int lowdelay);

View File

@ -719,7 +719,7 @@ static int h263_get_modb(GetBitContext *gb, int pb_frame, int *cbpb)
#define tab_size ((signed)FF_ARRAY_ELEMS(s->direct_scale_mv[0]))
#define tab_bias (tab_size / 2)
static inline void set_one_direct_mv(MpegEncContext *s, const Picture *p, int i)
static inline void set_one_direct_mv(MpegEncContext *s, const MPVPicture *p, int i)
{
int xy = s->block_index[i];
uint16_t time_pp = s->pp_time;
@ -750,7 +750,7 @@ static inline void set_one_direct_mv(MpegEncContext *s, const Picture *p, int i)
static int set_direct_mv(MpegEncContext *s)
{
const int mb_index = s->mb_x + s->mb_y * s->mb_stride;
const Picture *p = &s->next_pic;
const MPVPicture *p = &s->next_pic;
int colocated_mb_type = p->mb_type[mb_index];
int i;

View File

@ -652,7 +652,7 @@ void ff_mpeg4_encode_mb(MpegEncContext *s, int16_t block[6][64],
for (i = 0; i < s->max_b_frames; i++) {
const uint8_t *b_pic;
int diff;
Picture *pic = s->reordered_input_picture[i + 1];
const MPVPicture *pic = s->reordered_input_picture[i + 1];
if (!pic || pic->f->pict_type != AV_PICTURE_TYPE_B)
break;

View File

@ -22,7 +22,7 @@
#include "mpegvideodec.h"
#include "mpeg_er.h"
static void set_erpic(ERPicture *dst, const Picture *src)
static void set_erpic(ERPicture *dst, const MPVPicture *src)
{
int i;

View File

@ -30,7 +30,7 @@
#include "refstruct.h"
#include "threadframe.h"
static void av_noinline free_picture_tables(Picture *pic)
static void av_noinline free_picture_tables(MPVPicture *pic)
{
ff_refstruct_unref(&pic->mbskip_table);
ff_refstruct_unref(&pic->qscale_table_base);
@ -116,7 +116,7 @@ int ff_mpv_pic_check_linesize(void *logctx, const AVFrame *f,
return 0;
}
static int alloc_picture_tables(BufferPoolContext *pools, Picture *pic,
static int alloc_picture_tables(BufferPoolContext *pools, MPVPicture *pic,
int mb_height)
{
#define GET_BUFFER(name, buf_suffix, idx_suffix) do { \
@ -143,7 +143,7 @@ static int alloc_picture_tables(BufferPoolContext *pools, Picture *pic,
return 0;
}
int ff_mpv_alloc_pic_accessories(AVCodecContext *avctx, Picture *pic,
int ff_mpv_alloc_pic_accessories(AVCodecContext *avctx, MPVPicture *pic,
MotionEstContext *me, ScratchpadContext *sc,
BufferPoolContext *pools, int mb_height)
{
@ -181,7 +181,7 @@ fail:
* Deallocate a picture; frees the picture tables in case they
* need to be reallocated anyway.
*/
void ff_mpeg_unref_picture(Picture *pic)
void ff_mpeg_unref_picture(MPVPicture *pic)
{
pic->tf.f = pic->f;
ff_thread_release_ext_buffer(&pic->tf);
@ -203,7 +203,7 @@ void ff_mpeg_unref_picture(Picture *pic)
pic->coded_picture_number = 0;
}
static void update_picture_tables(Picture *dst, const Picture *src)
static void update_picture_tables(MPVPicture *dst, const MPVPicture *src)
{
ff_refstruct_replace(&dst->mbskip_table, src->mbskip_table);
ff_refstruct_replace(&dst->qscale_table_base, src->qscale_table_base);
@ -223,7 +223,7 @@ static void update_picture_tables(Picture *dst, const Picture *src)
dst->mb_stride = src->mb_stride;
}
int ff_mpeg_ref_picture(Picture *dst, Picture *src)
int ff_mpeg_ref_picture(MPVPicture *dst, MPVPicture *src)
{
int ret;
@ -260,7 +260,7 @@ fail:
return ret;
}
int ff_find_unused_picture(AVCodecContext *avctx, Picture *picture, int shared)
int ff_find_unused_picture(AVCodecContext *avctx, MPVPicture *picture, int shared)
{
for (int i = 0; i < MAX_PICTURE_COUNT; i++)
if (!picture[i].f->buf[0])
@ -283,7 +283,7 @@ int ff_find_unused_picture(AVCodecContext *avctx, Picture *picture, int shared)
return -1;
}
void av_cold ff_mpv_picture_free(Picture *pic)
void av_cold ff_mpv_picture_free(MPVPicture *pic)
{
ff_mpeg_unref_picture(pic);
av_frame_free(&pic->f);

View File

@ -52,9 +52,9 @@ typedef struct BufferPoolContext {
} BufferPoolContext;
/**
* Picture.
* MPVPicture.
*/
typedef struct Picture {
typedef struct MPVPicture {
struct AVFrame *f;
ThreadFrame tf;
@ -91,12 +91,12 @@ typedef struct Picture {
int display_picture_number;
int coded_picture_number;
} Picture;
} MPVPicture;
/**
* Allocate a Picture's accessories, but not the AVFrame's buffer itself.
* Allocate an MPVPicture's accessories, but not the AVFrame's buffer itself.
*/
int ff_mpv_alloc_pic_accessories(AVCodecContext *avctx, Picture *pic,
int ff_mpv_alloc_pic_accessories(AVCodecContext *avctx, MPVPicture *pic,
MotionEstContext *me, ScratchpadContext *sc,
BufferPoolContext *pools, int mb_height);
@ -112,11 +112,11 @@ int ff_mpv_pic_check_linesize(void *logctx, const struct AVFrame *f,
int ff_mpeg_framesize_alloc(AVCodecContext *avctx, MotionEstContext *me,
ScratchpadContext *sc, int linesize);
int ff_mpeg_ref_picture(Picture *dst, Picture *src);
void ff_mpeg_unref_picture(Picture *picture);
int ff_mpeg_ref_picture(MPVPicture *dst, MPVPicture *src);
void ff_mpeg_unref_picture(MPVPicture *picture);
void ff_mpv_picture_free(Picture *pic);
void ff_mpv_picture_free(MPVPicture *pic);
int ff_find_unused_picture(AVCodecContext *avctx, Picture *picture, int shared);
int ff_find_unused_picture(AVCodecContext *avctx, MPVPicture *picture, int shared);
#endif /* AVCODEC_MPEGPICTURE_H */

View File

@ -128,9 +128,9 @@ typedef struct MpegEncContext {
int mb_num; ///< number of MBs of a picture
ptrdiff_t linesize; ///< line size, in bytes, may be different from width
ptrdiff_t uvlinesize; ///< line size, for chroma in bytes, may be different from width
Picture *picture; ///< main picture buffer
Picture **input_picture; ///< next pictures on display order for encoding
Picture **reordered_input_picture; ///< pointer to the next pictures in coded order for encoding
MPVPicture *picture; ///< main picture buffer
MPVPicture **input_picture;///< next pictures on display order for encoding
MPVPicture **reordered_input_picture; ///< pointer to the next pictures in coded order for encoding
BufferPoolContext buffer_pools;
@ -156,13 +156,13 @@ typedef struct MpegEncContext {
* copy of the previous picture structure.
* note, linesize & data, might not match the previous picture (for field pictures)
*/
Picture last_pic;
MPVPicture last_pic;
/**
* copy of the next picture structure.
* note, linesize & data, might not match the next picture (for field pictures)
*/
Picture next_pic;
MPVPicture next_pic;
/**
* Reference to the source picture for encoding.
@ -174,11 +174,11 @@ typedef struct MpegEncContext {
* copy of the current picture structure.
* note, linesize & data, might not match the current picture (for field pictures)
*/
Picture cur_pic; ///< buffer to store the decompressed current picture
MPVPicture cur_pic;
Picture *last_pic_ptr; ///< pointer to the previous picture.
Picture *next_pic_ptr; ///< pointer to the next picture (for bidir pred)
Picture *cur_pic_ptr; ///< pointer to the current picture
MPVPicture *last_pic_ptr; ///< pointer to the previous picture.
MPVPicture *next_pic_ptr; ///< pointer to the next picture (for bidir pred)
MPVPicture *cur_pic_ptr; ///< pointer to the current picture
int skipped_last_frame;
int last_dc[3]; ///< last DC values for MPEG-1
int16_t *dc_val_base;

View File

@ -228,11 +228,11 @@ int ff_mpv_common_frame_size_change(MpegEncContext *s)
return err;
}
static int alloc_picture(MpegEncContext *s, Picture **picp, int reference)
static int alloc_picture(MpegEncContext *s, MPVPicture **picp, int reference)
{
AVCodecContext *avctx = s->avctx;
int idx = ff_find_unused_picture(s->avctx, s->picture, 0);
Picture *pic;
MPVPicture *pic;
int ret;
if (idx < 0)
@ -283,9 +283,9 @@ fail:
return ret;
}
static int av_cold alloc_dummy_frame(MpegEncContext *s, Picture **picp, Picture *wpic)
static int av_cold alloc_dummy_frame(MpegEncContext *s, MPVPicture **picp, MPVPicture *wpic)
{
Picture *pic;
MPVPicture *pic;
int ret = alloc_picture(s, &pic, 1);
if (ret < 0)
return ret;
@ -475,14 +475,15 @@ void ff_mpv_frame_end(MpegEncContext *s)
ff_thread_report_progress(&s->cur_pic_ptr->tf, INT_MAX, 0);
}
void ff_print_debug_info(const MpegEncContext *s, const Picture *p, AVFrame *pict)
void ff_print_debug_info(const MpegEncContext *s, const MPVPicture *p, AVFrame *pict)
{
ff_print_debug_info2(s->avctx, pict, s->mbskip_table, p->mb_type,
p->qscale_table, p->motion_val,
s->mb_width, s->mb_height, s->mb_stride, s->quarter_sample);
}
int ff_mpv_export_qp_table(const MpegEncContext *s, AVFrame *f, const Picture *p, int qp_type)
int ff_mpv_export_qp_table(const MpegEncContext *s, AVFrame *f,
const MPVPicture *p, int qp_type)
{
AVVideoEncParams *par;
int mult = (qp_type == FF_MPV_QSCALE_TYPE_MPEG1) ? 2 : 1;

View File

@ -1129,7 +1129,7 @@ static int prepare_picture(MpegEncContext *s, AVFrame *f, const AVFrame *props_f
static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
{
Picture *pic = NULL;
MPVPicture *pic = NULL;
int64_t pts;
int i, display_picture_number = 0, ret;
int encoding_delay = s->max_b_frames ? s->max_b_frames
@ -1266,7 +1266,7 @@ fail:
return ret;
}
static int skip_check(MpegEncContext *s, const Picture *p, const Picture *ref)
static int skip_check(MpegEncContext *s, const MPVPicture *p, const MPVPicture *ref)
{
int x, y, plane;
int score = 0;
@ -1355,7 +1355,7 @@ static int estimate_best_b_count(MpegEncContext *s)
FF_LAMBDA_SHIFT;
for (i = 0; i < s->max_b_frames + 2; i++) {
const Picture *pre_input_ptr = i ? s->input_picture[i - 1] :
const MPVPicture *pre_input_ptr = i ? s->input_picture[i - 1] :
s->next_pic_ptr;
if (pre_input_ptr) {

View File

@ -514,7 +514,7 @@ static inline void apply_obmc(MpegEncContext *s,
const op_pixels_func (*pix_op)[4])
{
LOCAL_ALIGNED_8(int16_t, mv_cache, [4], [4][2]);
const Picture *cur_frame = &s->cur_pic;
const MPVPicture *cur_frame = &s->cur_pic;
int mb_x = s->mb_x;
int mb_y = s->mb_y;
const int xy = mb_x + mb_y * s->mb_stride;

View File

@ -58,12 +58,13 @@ void ff_mpv_reconstruct_mb(MpegEncContext *s, int16_t block[12][64]);
void ff_mpv_report_decode_progress(MpegEncContext *s);
void ff_mpv_frame_end(MpegEncContext *s);
int ff_mpv_export_qp_table(const MpegEncContext *s, AVFrame *f, const Picture *p, int qp_type);
int ff_mpv_export_qp_table(const MpegEncContext *s, AVFrame *f,
const MPVPicture *p, int qp_type);
int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src);
void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h);
void ff_mpeg_flush(AVCodecContext *avctx);
void ff_print_debug_info(const MpegEncContext *s, const Picture *p, AVFrame *pict);
void ff_print_debug_info(const MpegEncContext *s, const MPVPicture *p, AVFrame *pict);
static inline int mpeg_get_qscale(MpegEncContext *s)
{

View File

@ -382,7 +382,7 @@ static int decode_wmv9(AVCodecContext *avctx, const uint8_t *buf, int buf_size,
MSS12Context *c = &ctx->c;
VC1Context *v = avctx->priv_data;
MpegEncContext *s = &v->s;
Picture *f;
MPVPicture *f;
int ret;
ff_mpeg_flush(avctx);

View File

@ -929,7 +929,7 @@ float ff_rate_estimate_qscale(MpegEncContext *s, int dry_run)
rce = &rcc->entry[picture_number];
wanted_bits = rce->expected_bits;
} else {
const Picture *dts_pic;
const MPVPicture *dts_pic;
rce = &local_rce;
/* FIXME add a dts field to AVFrame and ensure it is set and use it

View File

@ -567,7 +567,7 @@ static void rv34_pred_mv_b(RV34DecContext *r, int block_type, int dir)
int has_A = 0, has_B = 0, has_C = 0;
int mx, my;
int i, j;
Picture *cur_pic = &s->cur_pic;
MPVPicture *cur_pic = &s->cur_pic;
const int mask = dir ? MB_TYPE_L1 : MB_TYPE_L0;
int type = cur_pic->mb_type[mb_pos];

View File

@ -340,7 +340,7 @@ static void vc1_sprite_flush(AVCodecContext *avctx)
{
VC1Context *v = avctx->priv_data;
MpegEncContext *s = &v->s;
Picture *f = &s->cur_pic;
MPVPicture *f = &s->cur_pic;
int plane, i;
/* Windows Media Image codecs have a convergence interval of two keyframes.

View File

@ -370,7 +370,7 @@ int ff_vdpau_common_end_frame(AVCodecContext *avctx, AVFrame *frame,
int ff_vdpau_mpeg_end_frame(AVCodecContext *avctx)
{
MpegEncContext *s = avctx->priv_data;
Picture *pic = s->cur_pic_ptr;
MPVPicture *pic = s->cur_pic_ptr;
struct vdpau_picture_context *pic_ctx = pic->hwaccel_picture_private;
int val;

View File

@ -35,7 +35,7 @@ static int vdpau_mpeg_start_frame(AVCodecContext *avctx,
const uint8_t *buffer, uint32_t size)
{
MpegEncContext * const s = avctx->priv_data;
Picture *pic = s->cur_pic_ptr;
MPVPicture *pic = s->cur_pic_ptr;
struct vdpau_picture_context *pic_ctx = pic->hwaccel_picture_private;
VdpPictureInfoMPEG1Or2 *info = &pic_ctx->info.mpeg;
VdpVideoSurface ref;
@ -87,7 +87,7 @@ static int vdpau_mpeg_decode_slice(AVCodecContext *avctx,
const uint8_t *buffer, uint32_t size)
{
MpegEncContext * const s = avctx->priv_data;
Picture *pic = s->cur_pic_ptr;
MPVPicture *pic = s->cur_pic_ptr;
struct vdpau_picture_context *pic_ctx = pic->hwaccel_picture_private;
int val;

View File

@ -34,7 +34,7 @@ static int vdpau_mpeg4_start_frame(AVCodecContext *avctx,
{
Mpeg4DecContext *ctx = avctx->priv_data;
MpegEncContext * const s = &ctx->m;
Picture *pic = s->cur_pic_ptr;
MPVPicture *pic = s->cur_pic_ptr;
struct vdpau_picture_context *pic_ctx = pic->hwaccel_picture_private;
VdpPictureInfoMPEG4Part2 *info = &pic_ctx->info.mpeg4;
VdpVideoSurface ref;

View File

@ -36,7 +36,7 @@ static int vdpau_vc1_start_frame(AVCodecContext *avctx,
{
VC1Context * const v = avctx->priv_data;
MpegEncContext * const s = &v->s;
Picture *pic = s->cur_pic_ptr;
MPVPicture *pic = s->cur_pic_ptr;
struct vdpau_picture_context *pic_ctx = pic->hwaccel_picture_private;
VdpPictureInfoVC1 *info = &pic_ctx->info.vc1;
VdpVideoSurface ref;
@ -104,7 +104,7 @@ static int vdpau_vc1_decode_slice(AVCodecContext *avctx,
{
VC1Context * const v = avctx->priv_data;
MpegEncContext * const s = &v->s;
Picture *pic = s->cur_pic_ptr;
MPVPicture *pic = s->cur_pic_ptr;
struct vdpau_picture_context *pic_ctx = pic->hwaccel_picture_private;
int val;