forked from FFmpeg/FFmpeg
Stop hardcoding align=32 in av_frame_get_buffer() calls.
Use 0, which selects the alignment automatically.
This commit is contained in:
parent
8cfab9fa8c
commit
f30a41a608
21 changed files with 23 additions and 23 deletions
|
@ -145,7 +145,7 @@ int main(int argc, char **argv)
|
|||
frame->width = c->width;
|
||||
frame->height = c->height;
|
||||
|
||||
ret = av_frame_get_buffer(frame, 32);
|
||||
ret = av_frame_get_buffer(frame, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not allocate the video frame data\n");
|
||||
exit(1);
|
||||
|
|
|
@ -396,7 +396,7 @@ static AVFrame *alloc_picture(enum AVPixelFormat pix_fmt, int width, int height)
|
|||
picture->height = height;
|
||||
|
||||
/* allocate the buffers for the frame data */
|
||||
ret = av_frame_get_buffer(picture, 32);
|
||||
ret = av_frame_get_buffer(picture, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not allocate frame data.\n");
|
||||
exit(1);
|
||||
|
|
|
@ -172,7 +172,7 @@ int main(int argc, char *argv[])
|
|||
sw_frame->width = width;
|
||||
sw_frame->height = height;
|
||||
sw_frame->format = AV_PIX_FMT_NV12;
|
||||
if ((err = av_frame_get_buffer(sw_frame, 32)) < 0)
|
||||
if ((err = av_frame_get_buffer(sw_frame, 0)) < 0)
|
||||
goto close;
|
||||
if ((err = fread((uint8_t*)(sw_frame->data[0]), size, 1, fin)) <= 0)
|
||||
break;
|
||||
|
|
|
@ -182,7 +182,7 @@ static int sub2video_get_blank_frame(InputStream *ist)
|
|||
ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
|
||||
ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
|
||||
ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
|
||||
if ((ret = av_frame_get_buffer(frame, 32)) < 0)
|
||||
if ((ret = av_frame_get_buffer(frame, 0)) < 0)
|
||||
return ret;
|
||||
memset(frame->data[0], 0, frame->height * frame->linesize[0]);
|
||||
return 0;
|
||||
|
|
|
@ -67,7 +67,7 @@ static int videotoolbox_retrieve_data(AVCodecContext *s, AVFrame *frame)
|
|||
|
||||
vt->tmp_frame->width = frame->width;
|
||||
vt->tmp_frame->height = frame->height;
|
||||
ret = av_frame_get_buffer(vt->tmp_frame, 32);
|
||||
ret = av_frame_get_buffer(vt->tmp_frame, 0);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
|
|
|
@ -228,7 +228,7 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
|||
clone->format = pict->format;
|
||||
clone->width = FFALIGN(pict->width, 16);
|
||||
clone->height = FFALIGN(pict->height, 16);
|
||||
ret = av_frame_get_buffer(clone, 32);
|
||||
ret = av_frame_get_buffer(clone, 0);
|
||||
if (ret < 0) {
|
||||
av_frame_free(&clone);
|
||||
return ret;
|
||||
|
|
|
@ -90,7 +90,7 @@ static int pad_last_frame(AVCodecContext *s, AVFrame **dst, const AVFrame *src)
|
|||
frame->channel_layout = src->channel_layout;
|
||||
frame->channels = src->channels;
|
||||
frame->nb_samples = s->frame_size;
|
||||
ret = av_frame_get_buffer(frame, 32);
|
||||
ret = av_frame_get_buffer(frame, 0);
|
||||
if (ret < 0)
|
||||
goto fail;
|
||||
|
||||
|
|
|
@ -142,7 +142,7 @@ int ff_libwebp_get_frame(AVCodecContext *avctx, LibWebPContextCommon *s,
|
|||
alt_frame->format = frame->format;
|
||||
if (s->cr_threshold)
|
||||
alt_frame->format = AV_PIX_FMT_YUVA420P;
|
||||
ret = av_frame_get_buffer(alt_frame, 32);
|
||||
ret = av_frame_get_buffer(alt_frame, 0);
|
||||
if (ret < 0)
|
||||
goto end;
|
||||
alt_frame->format = frame->format;
|
||||
|
|
|
@ -1044,7 +1044,7 @@ FF_ENABLE_DEPRECATION_WARNINGS
|
|||
s->tmp_frames[i]->width = s->width >> s->brd_scale;
|
||||
s->tmp_frames[i]->height = s->height >> s->brd_scale;
|
||||
|
||||
ret = av_frame_get_buffer(s->tmp_frames[i], 32);
|
||||
ret = av_frame_get_buffer(s->tmp_frames[i], 0);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -741,7 +741,7 @@ static int apng_encode_frame(AVCodecContext *avctx, const AVFrame *pict,
|
|||
diffFrame->format = pict->format;
|
||||
diffFrame->width = pict->width;
|
||||
diffFrame->height = pict->height;
|
||||
if ((ret = av_frame_get_buffer(diffFrame, 32)) < 0)
|
||||
if ((ret = av_frame_get_buffer(diffFrame, 0)) < 0)
|
||||
goto fail;
|
||||
|
||||
original_bytestream = s->bytestream;
|
||||
|
@ -956,7 +956,7 @@ static int encode_apng(AVCodecContext *avctx, AVPacket *pkt,
|
|||
s->prev_frame->format = pict->format;
|
||||
s->prev_frame->width = pict->width;
|
||||
s->prev_frame->height = pict->height;
|
||||
if ((ret = av_frame_get_buffer(s->prev_frame, 32)) < 0)
|
||||
if ((ret = av_frame_get_buffer(s->prev_frame, 0)) < 0)
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -484,7 +484,7 @@ static int tdsc_parse_tdsf(AVCodecContext *avctx, int number_tiles)
|
|||
|
||||
/* Allocate the reference frame if not already done or on size change */
|
||||
if (init_refframe) {
|
||||
ret = av_frame_get_buffer(ctx->refframe, 32);
|
||||
ret = av_frame_get_buffer(ctx->refframe, 0);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -535,7 +535,7 @@ static int config_output(AVFilterLink *outlink)
|
|||
s->delay_frame->nb_samples = s->delay_samples;
|
||||
s->delay_frame->channel_layout = outlink->channel_layout;
|
||||
|
||||
err = av_frame_get_buffer(s->delay_frame, 32);
|
||||
err = av_frame_get_buffer(s->delay_frame, 0);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
|
|
|
@ -115,7 +115,7 @@ static int config_input(AVFilterLink *inlink)
|
|||
s->delay_frame->nb_samples = new_size;
|
||||
s->delay_frame->channel_layout = inlink->channel_layout;
|
||||
|
||||
return av_frame_get_buffer(s->delay_frame, 32);
|
||||
return av_frame_get_buffer(s->delay_frame, 0);
|
||||
}
|
||||
|
||||
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
|
||||
|
|
|
@ -365,7 +365,7 @@ static AVFrame *alloc_frame_empty(enum AVPixelFormat format, int w, int h)
|
|||
out->format = format;
|
||||
out->width = w;
|
||||
out->height = h;
|
||||
if (av_frame_get_buffer(out, 32) < 0) {
|
||||
if (av_frame_get_buffer(out, 0) < 0) {
|
||||
av_frame_free(&out);
|
||||
return NULL;
|
||||
}
|
||||
|
|
|
@ -80,7 +80,7 @@ static AVFrame *downscale(AVFrame *in)
|
|||
frame->width = (in->width + 1) / 2;
|
||||
frame->height = (in->height+ 1) / 2;
|
||||
|
||||
if (av_frame_get_buffer(frame, 32) < 0) {
|
||||
if (av_frame_get_buffer(frame, 0) < 0) {
|
||||
av_frame_free(&frame);
|
||||
return NULL;
|
||||
}
|
||||
|
|
|
@ -150,7 +150,7 @@ static AVFrame *alloc_frame(enum AVPixelFormat pixfmt, int w, int h)
|
|||
frame->width = w;
|
||||
frame->height = h;
|
||||
|
||||
if (av_frame_get_buffer(frame, 32) < 0) {
|
||||
if (av_frame_get_buffer(frame, 0) < 0) {
|
||||
av_frame_free(&frame);
|
||||
return NULL;
|
||||
}
|
||||
|
|
|
@ -461,7 +461,7 @@ int av_frame_ref(AVFrame *dst, const AVFrame *src)
|
|||
|
||||
/* duplicate the frame data if it's not refcounted */
|
||||
if (!src->buf[0]) {
|
||||
ret = av_frame_get_buffer(dst, 32);
|
||||
ret = av_frame_get_buffer(dst, 0);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
|
@ -631,7 +631,7 @@ int av_frame_make_writable(AVFrame *frame)
|
|||
if (frame->hw_frames_ctx)
|
||||
ret = av_hwframe_get_buffer(frame->hw_frames_ctx, &tmp, 0);
|
||||
else
|
||||
ret = av_frame_get_buffer(&tmp, 32);
|
||||
ret = av_frame_get_buffer(&tmp, 0);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
|
|
|
@ -422,7 +422,7 @@ static int transfer_data_alloc(AVFrame *dst, const AVFrame *src, int flags)
|
|||
frame_tmp->width = ctx->width;
|
||||
frame_tmp->height = ctx->height;
|
||||
|
||||
ret = av_frame_get_buffer(frame_tmp, 32);
|
||||
ret = av_frame_get_buffer(frame_tmp, 0);
|
||||
if (ret < 0)
|
||||
goto fail;
|
||||
|
||||
|
|
|
@ -922,7 +922,7 @@ static int qsv_transfer_data_to(AVHWFramesContext *ctx, AVFrame *dst,
|
|||
tmp_frame.format = src->format;
|
||||
tmp_frame.width = FFALIGN(src->width, 16);
|
||||
tmp_frame.height = FFALIGN(src->height, 16);
|
||||
ret = av_frame_get_buffer(&tmp_frame, 32);
|
||||
ret = av_frame_get_buffer(&tmp_frame, 0);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
|
|
|
@ -126,7 +126,7 @@ static int run_test(AVCodec *enc, AVCodec *dec, AVCodecContext *enc_ctx,
|
|||
in_frame->nb_samples = enc_ctx->frame_size;
|
||||
in_frame->format = enc_ctx->sample_fmt;
|
||||
in_frame->channel_layout = enc_ctx->channel_layout;
|
||||
if (av_frame_get_buffer(in_frame, 32) != 0) {
|
||||
if (av_frame_get_buffer(in_frame, 0) != 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Can't allocate a buffer for input frame\n");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
|
|
@ -101,7 +101,7 @@ static void *sender_thread(void *arg)
|
|||
msg.frame->format = AV_PIX_FMT_RGBA;
|
||||
msg.frame->width = 320;
|
||||
msg.frame->height = 240;
|
||||
ret = av_frame_get_buffer(msg.frame, 32);
|
||||
ret = av_frame_get_buffer(msg.frame, 0);
|
||||
if (ret < 0) {
|
||||
av_frame_free(&msg.frame);
|
||||
break;
|
||||
|
|
Loading…
Add table
Reference in a new issue