Lines Matching defs:frame
85 /* If a frame duration is longer than this, it will not be duplicated to compensate AV sync */
105 /* TODO: We assume that a decoded and resampled frame fits into this buffer */
155 AVFrame *frame;
158 double pts; /* presentation timestamp for the frame */
159 double duration; /* estimated duration of the frame */
160 int64_t pos; /* byte position of the frame in the input file */
286 double max_frame_duration; // maximum duration of a frame - above this, we consider the jump a timestamp discontinuity
577 static int decoder_decode_frame(Decoder *d, AVFrame *frame, AVSubtitle *sub) {
588 ret = avcodec_receive_frame(d->avctx, frame);
591 frame->pts = frame->best_effort_timestamp;
593 frame->pts = frame->pkt_dts;
598 ret = avcodec_receive_frame(d->avctx, frame);
600 AVRational tb = (AVRational){1, frame->sample_rate};
601 if (frame->pts != AV_NOPTS_VALUE)
602 frame->pts = av_rescale_q(frame->pts, d->avctx->pkt_timebase, tb);
604 frame->pts = av_rescale_q(d->next_pts, d->next_pts_tb, tb);
605 if (frame->pts != AV_NOPTS_VALUE) {
606 d->next_pts = frame->pts + frame->nb_samples;
673 av_frame_unref(vp->frame);
693 if (!(f->queue[i].frame = av_frame_alloc()))
704 av_frame_free(&vp->frame);
734 /* wait until we have space to put a new frame */
750 /* wait until we have a readable a new frame */
894 static int upload_texture(SDL_Texture **tex, AVFrame *frame, struct SwsContext **img_convert_ctx) {
898 get_sdl_pix_fmt_and_blendmode(frame->format, &sdl_pix_fmt, &sdl_blendmode);
899 if (realloc_texture(tex, sdl_pix_fmt == SDL_PIXELFORMAT_UNKNOWN ? SDL_PIXELFORMAT_ARGB8888 : sdl_pix_fmt, frame->width, frame->height, sdl_blendmode, 0) < 0)
905 frame->width, frame->height, frame->format, frame->width, frame->height,
911 sws_scale(*img_convert_ctx, (const uint8_t * const *)frame->data, frame->linesize,
912 0, frame->height, pixels, pitch);
921 if (frame->linesize[0] > 0 && frame->linesize[1] > 0 && frame->linesize[2] > 0) {
922 ret = SDL_UpdateYUVTexture(*tex, NULL, frame->data[0], frame->linesize[0],
923 frame->data[1], frame->linesize[1],
924 frame->data[2], frame->linesize[2]);
925 } else if (frame->linesize[0] < 0 && frame->linesize[1] < 0 && frame->linesize[2] < 0) {
926 ret = SDL_UpdateYUVTexture(*tex, NULL, frame->data[0] + frame->linesize[0] * (frame->height - 1), -frame->linesize[0],
927 frame->data[1] + frame->linesize[1] * (AV_CEIL_RSHIFT(frame->height, 1) - 1), -frame->linesize[1],
928 frame->data[2] + frame->linesize[2] * (AV_CEIL_RSHIFT(frame->height, 1) - 1), -frame->linesize[2]);
935 if (frame->linesize[0] < 0) {
936 ret = SDL_UpdateTexture(*tex, NULL, frame->data[0] + frame->linesize[0] * (frame->height - 1), -frame->linesize[0]);
938 ret = SDL_UpdateTexture(*tex, NULL, frame->data[0], frame->linesize[0]);
945 static void set_sdl_yuv_conversion_mode(AVFrame *frame)
949 if (frame && (frame->format == AV_PIX_FMT_YUV420P || frame->format == AV_PIX_FMT_YUYV422 || frame->format == AV_PIX_FMT_UYVY422)) {
950 if (frame->color_range == AVCOL_RANGE_JPEG)
952 else if (frame->colorspace == AVCOL_SPC_BT709)
954 else if (frame->colorspace == AVCOL_SPC_BT470BG || frame->colorspace == AVCOL_SPC_SMPTE170M)
1014 set_sdl_yuv_conversion_mode(vp->frame);
1017 if (upload_texture(&is->vid_texture, vp->frame, &is->img_convert_ctx) < 0) {
1022 vp->flip_v = vp->frame->linesize[0] < 0;
1521 duplicating or deleting a frame */
1524 /* skip or repeat frame. We take into account the
1562 /* called to display each frame */
1758 av_frame_move_ref(vp->frame, src_frame);
1763 static int get_video_frame(VideoState *is, AVFrame *frame)
1767 if ((got_picture = decoder_decode_frame(&is->viddec, frame, NULL)) < 0)
1773 if (frame->pts != AV_NOPTS_VALUE)
1774 dpts = av_q2d(is->video_st->time_base) * frame->pts;
1776 frame->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, frame);
1779 if (frame->pts != AV_NOPTS_VALUE) {
1786 av_frame_unref(frame);
1840 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters, AVFrame *frame)
1876 frame->width, frame->height, frame->format,
2023 AVFrame *frame = av_frame_alloc();
2033 if (!frame)
2037 if ((got_frame = decoder_decode_frame(&is->auddec, frame, NULL)) < 0)
2041 tb = (AVRational){1, frame->sample_rate};
2046 frame->format, frame->ch_layout.nb_channels) ||
2047 av_channel_layout_compare(&is->audio_filter_src.ch_layout, &frame->ch_layout) ||
2048 is->audio_filter_src.freq != frame->sample_rate ||
2054 av_channel_layout_describe(&frame->ch_layout, buf2, sizeof(buf2));
2056 "Audio frame changed from rate:%d ch:%d fmt:%s layout:%s serial:%d to rate:%d ch:%d fmt:%s layout:%s serial:%d\n",
2058 frame->sample_rate, frame->ch_layout.nb_channels, av_get_sample_fmt_name(frame->format), buf2, is->auddec.pkt_serial);
2060 is->audio_filter_src.fmt = frame->format;
2061 ret = av_channel_layout_copy(&is->audio_filter_src.ch_layout, &frame->ch_layout);
2064 is->audio_filter_src.freq = frame->sample_rate;
2071 if ((ret = av_buffersrc_add_frame(is->in_audio_filter, frame)) < 0)
2074 while ((ret = av_buffersink_get_frame_flags(is->out_audio_filter, frame, 0)) >= 0) {
2080 af->pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
2081 af->pos = frame->pkt_pos;
2083 af->duration = av_q2d((AVRational){frame->nb_samples, frame->sample_rate});
2085 av_frame_move_ref(af->frame, frame);
2101 av_frame_free(&frame);
2119 AVFrame *frame = av_frame_alloc();
2136 if (!frame)
2140 ret = get_video_frame(is, frame);
2147 if ( last_w != frame->width
2148 || last_h != frame->height
2149 || last_format != frame->format
2153 "Video frame changed from size:%dx%d format:%s serial:%d to size:%dx%d format:%s serial:%d\n",
2156 frame->width, frame->height,
2157 (const char *)av_x_if_null(av_get_pix_fmt_name(frame->format), "none"), is->viddec.pkt_serial);
2165 if ((ret = configure_video_filters(graph, is, vfilters_list ? vfilters_list[is->vfilter_idx] : NULL, frame)) < 0) {
2174 last_w = frame->width;
2175 last_h = frame->height;
2176 last_format = frame->format;
2182 ret = av_buffersrc_add_frame(filt_in, frame);
2189 ret = av_buffersink_get_frame_flags(filt_out, frame, 0);
2203 pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
2204 ret = queue_picture(is, frame, pts, duration, frame->pkt_pos, is->viddec.pkt_serial);
2205 av_frame_unref(frame);
2219 av_frame_free(&frame);
2320 * Decode one audio frame and return its uncompressed size.
2322 * The processed audio frame is decoded, converted if required, and
2349 data_size = av_samples_get_buffer_size(NULL, af->frame->ch_layout.nb_channels,
2350 af->frame->nb_samples,
2351 af->frame->format, 1);
2353 wanted_nb_samples = synchronize_audio(is, af->frame->nb_samples);
2355 if (af->frame->format != is->audio_src.fmt ||
2356 av_channel_layout_compare(&af->frame->ch_layout, &is->audio_src.ch_layout) ||
2357 af->frame->sample_rate != is->audio_src.freq ||
2358 (wanted_nb_samples != af->frame->nb_samples && !is->swr_ctx)) {
2362 &af->frame->ch_layout, af->frame->format, af->frame->sample_rate,
2367 af->frame->sample_rate, av_get_sample_fmt_name(af->frame->format), af->frame->ch_layout.nb_channels,
2372 if (av_channel_layout_copy(&is->audio_src.ch_layout, &af->frame->ch_layout) < 0)
2374 is->audio_src.freq = af->frame->sample_rate;
2375 is->audio_src.fmt = af->frame->format;
2379 const uint8_t **in = (const uint8_t **)af->frame->extended_data;
2381 int out_count = (int64_t)wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate + 256;
2388 if (wanted_nb_samples != af->frame->nb_samples) {
2389 if (swr_set_compensation(is->swr_ctx, (wanted_nb_samples - af->frame->nb_samples) * is->audio_tgt.freq / af->frame->sample_rate,
2390 wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate) < 0) {
2398 len2 = swr_convert(is->swr_ctx, out, out_count, in, af->frame->nb_samples);
2411 is->audio_buf = af->frame->data[0];
2418 is->audio_clock = af->pts + (double) af->frame->nb_samples / af->frame->sample_rate;
3305 case SDLK_s: // S: Step to next frame
3648 "s activate frame-step mode\n"