/third_party/ffmpeg/libavfilter/ |
H A D | af_apad.c | 75 static int filter_frame(AVFilterLink *inlink, AVFrame *frame) in filter_frame() argument 81 s->whole_len_left = FFMAX(s->whole_len_left - frame->nb_samples, 0); in filter_frame() 83 "n_out:%d whole_len_left:%"PRId64"\n", frame->nb_samples, s->whole_len_left); in filter_frame() 86 s->next_pts = frame->pts + av_rescale_q(frame->nb_samples, (AVRational){1, inlink->sample_rate}, inlink->time_base); in filter_frame() 87 return ff_filter_frame(ctx->outputs[0], frame); in filter_frame()
|
H A D | vf_dejudder.c | 35 * - In order to avoid calculating this sum ever frame, a running tally 36 * is maintained in ctx->new_pts. Each frame the new term at the start 119 static int filter_frame(AVFilterLink *inlink, AVFrame *frame) in filter_frame() argument 126 int64_t next_pts = frame->pts; in filter_frame() 130 return ff_filter_frame(outlink, frame); in filter_frame() 151 frame->pts = s->new_pts; in filter_frame() 155 av_log(ctx, AV_LOG_DEBUG, "next=%"PRId64", new=%"PRId64"\n", next_pts, frame->pts); in filter_frame() 157 return ff_filter_frame(outlink, frame); in filter_frame()
|
/third_party/ffmpeg/libavcodec/ |
H A D | pcm-blurayenc.c | 29 uint16_t header; // Header added to every frame 109 const AVFrame *frame, int *got_packet_ptr) in pcm_bluray_encode_frame() 122 samples = frame->nb_samples; in pcm_bluray_encode_frame() 132 src16 = (const int16_t *)frame->data[0]; in pcm_bluray_encode_frame() 133 src32 = (const int32_t *)frame->data[0]; in pcm_bluray_encode_frame() 145 bytestream2_put_bufferu(&pb, frame->data[0], samples * 2); in pcm_bluray_encode_frame() 269 avpkt->pts = frame->pts; in pcm_bluray_encode_frame() 270 avpkt->duration = ff_samples_to_time_base(avctx, frame->nb_samples); in pcm_bluray_encode_frame() 108 pcm_bluray_encode_frame(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr) pcm_bluray_encode_frame() argument
|
H A D | crystalhd.c | 31 * Of course, nothing is ever that simple. Due, at the very least, to b-frame 35 * frames being fed into the decoder to satisfy the b-frame dependencies. 430 AVFrame *frame, int *got_frame) in copy_frame() 483 if (!frame->data[0]) { in copy_frame() 484 if (ff_get_buffer(avctx, frame, 0) < 0) in copy_frame() 507 dStride = frame->linesize[0]; in copy_frame() 508 dst = frame->data[0]; in copy_frame() 510 av_log(priv->avctx, AV_LOG_VERBOSE, "CrystalHD: Copying out frame\n"); in copy_frame() 542 frame->interlaced_frame = interlaced; in copy_frame() 544 frame in copy_frame() 428 copy_frame(AVCodecContext *avctx, BC_DTS_PROC_OUT *output, AVFrame *frame, int *got_frame) copy_frame() argument 562 receive_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame) receive_frame() argument 707 crystalhd_receive_frame(AVCodecContext *avctx, AVFrame *frame) crystalhd_receive_frame() argument [all...] |
H A D | cdtoons.c | 52 AVFrame *frame; member 95 dest = c->frame->data[0] + (dst_y + y) * c->frame->linesize[0] + dst_x; in cdtoons_render_sprite() 173 if ((ret = ff_reget_buffer(avctx, c->frame, 0)) < 0) in cdtoons_decode_frame() 197 /* read new sprites introduced in this frame */ in cdtoons_decode_frame() 256 av_log(avctx, AV_LOG_WARNING, "Ran (seriously) out of data for Diff frame.\n"); in cdtoons_decode_frame() 266 av_log(avctx, AV_LOG_WARNING, "Ran (seriously) out of data for Diff frame header.\n"); in cdtoons_decode_frame() 277 av_log(avctx, AV_LOG_WARNING, "Ran (seriously) out of data for Diff frame data.\n"); in cdtoons_decode_frame() 297 /* was an intra frame? */ in cdtoons_decode_frame() 322 memset(c->frame in cdtoons_decode_frame() [all...] |
H A D | kmvc.c | 188 } else { // copy block from previous frame in kmvc_decode_inter_8x8() 264 static int decode_frame(AVCodecContext * avctx, AVFrame *frame, in decode_frame() argument 275 if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) in decode_frame() 278 frame->palette_has_changed = ff_copy_palette(ctx->pal, avpkt, avctx); in decode_frame() 293 frame->key_frame = 1; in decode_frame() 294 frame->pict_type = AV_PICTURE_TYPE_I; in decode_frame() 296 frame->key_frame = 0; in decode_frame() 297 frame->pict_type = AV_PICTURE_TYPE_P; in decode_frame() 301 frame->palette_has_changed = 1; in decode_frame() 310 frame in decode_frame() [all...] |
H A D | fic.c | 45 AVFrame *frame; member 188 int stride = ctx->frame->linesize[p]; in fic_decode_slice() 189 uint8_t* dst = ctx->frame->data[p] + (y_off >> !!p) * stride; in fic_decode_slice() 283 if ((ret = ff_reget_buffer(avctx, ctx->frame, 0)) < 0) in fic_decode_frame() 296 /* Is it a skip frame? */ in fic_decode_frame() 299 av_log(avctx, AV_LOG_WARNING, "Initial frame is skipped\n"); in fic_decode_frame() 362 av_log(avctx, AV_LOG_ERROR, "Not enough frame data to decode.\n"); in fic_decode_frame() 409 ctx->frame->key_frame = 1; in fic_decode_frame() 410 ctx->frame->pict_type = AV_PICTURE_TYPE_I; in fic_decode_frame() 413 ctx->frame in fic_decode_frame() [all...] |
H A D | libfdk-aacdec.c | 380 static int fdk_aac_decode_frame(AVCodecContext *avctx, AVFrame *frame, in fdk_aac_decode_frame() argument 425 frame->nb_samples = avctx->frame_size; in fdk_aac_decode_frame() 431 frame->nb_samples = FFMIN(s->flush_samples, frame->nb_samples); in fdk_aac_decode_frame() 433 frame->nb_samples, s->flush_samples); in fdk_aac_decode_frame() 434 s->flush_samples -= frame->nb_samples; in fdk_aac_decode_frame() 440 int drop_samples = FFMIN(s->delay_samples, frame->nb_samples); in fdk_aac_decode_frame() 444 frame->nb_samples -= drop_samples; in fdk_aac_decode_frame() 446 if (frame->nb_samples <= 0) in fdk_aac_decode_frame() 452 if ((ret = ff_get_buffer(avctx, frame, in fdk_aac_decode_frame() [all...] |
H A D | ljpegenc.c | 33 #include "libavutil/frame.h" 66 const AVFrame *frame) in ljpeg_encode_bgr() 69 const int width = frame->width; in ljpeg_encode_bgr() 70 const int height = frame->height; in ljpeg_encode_bgr() 71 const int linesize = frame->linesize[0]; in ljpeg_encode_bgr() 81 uint8_t *ptr = frame->data[0] + (linesize * y); in ljpeg_encode_bgr() 84 av_log(avctx, AV_LOG_ERROR, "encoded frame too large\n"); in ljpeg_encode_bgr() 128 const AVFrame *frame, int predictor, in ljpeg_encode_yuv_mb() 139 linesize = frame->linesize[i]; in ljpeg_encode_yuv_mb() 145 ptr = frame in ljpeg_encode_yuv_mb() 65 ljpeg_encode_bgr(AVCodecContext *avctx, PutBitContext *pb, const AVFrame *frame) ljpeg_encode_bgr() argument 127 ljpeg_encode_yuv_mb(LJpegEncContext *s, PutBitContext *pb, const AVFrame *frame, int predictor, int mb_x, int mb_y) ljpeg_encode_yuv_mb() argument 192 ljpeg_encode_yuv(AVCodecContext *avctx, PutBitContext *pb, const AVFrame *frame) ljpeg_encode_yuv() argument [all...] |
H A D | aic.c | 144 AVFrame *frame; member 324 const int ystride = ctx->frame->linesize[0]; in aic_decode_slice() 334 Y = ctx->frame->data[0] + mb_x * 16 + y_pos * ystride; in aic_decode_slice() 336 C[i] = ctx->frame->data[i + 1] + mb_x * 8 in aic_decode_slice() 337 + c_pos * ctx->frame->linesize[i + 1]; in aic_decode_slice() 376 ctx->frame->linesize[blk + 1]); in aic_decode_slice() 384 static int aic_decode_frame(AVCodecContext *avctx, AVFrame *frame, in aic_decode_frame() argument 395 ctx->frame = frame; in aic_decode_frame() 396 ctx->frame in aic_decode_frame() [all...] |
H A D | rasc.c | 63 AVFrame *frame; member 69 static void clear_plane(AVCodecContext *avctx, AVFrame *frame) in clear_plane() argument 72 uint8_t *dst = frame->data[0]; in clear_plane() 79 dst += frame->linesize[0]; in clear_plane() 612 pal = s->frame->data[1]; in draw_cursor() 625 dst = s->frame->data[0] + s->frame->linesize[0] * (s->cursor_y + i) + (s->cursor_x + j); in draw_cursor() 651 dst = s->frame->data[0] + s->frame->linesize[0] * (s->cursor_y + i) + 2 * (s->cursor_x + j); in draw_cursor() 665 dst = s->frame in draw_cursor() 674 decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *avpkt) decode_frame() argument [all...] |
H A D | vaapi_hevc.c | 56 va_pic->picture_id = ff_vaapi_get_surface_id(pic->frame); in fill_vaapi_pic() 63 if (pic->frame->interlaced_frame) { in fill_vaapi_pic() 66 if (!pic->frame->top_field_first) in fill_vaapi_pic() 73 VASurfaceID pic_surf = ff_vaapi_get_surface_id(pic->frame); in find_frame_rps_type() 77 if (pic_surf == ff_vaapi_get_surface_id(h->rps[ST_CURR_BEF].ref[i]->frame)) in find_frame_rps_type() 82 if (pic_surf == ff_vaapi_get_surface_id(h->rps[ST_CURR_AFT].ref[i]->frame)) in find_frame_rps_type() 87 if (pic_surf == ff_vaapi_get_surface_id(h->rps[LT_CURR].ref[i]->frame)) in find_frame_rps_type() 100 const HEVCFrame *frame = NULL; in fill_vaapi_reference_frames() local 102 while (!frame && j < FF_ARRAY_ELEMS(h->DPB)) { in fill_vaapi_reference_frames() 104 frame in fill_vaapi_reference_frames() 388 get_ref_pic_index(const HEVCContext *h, const HEVCFrame *frame) get_ref_pic_index() argument [all...] |
H A D | pixlet.c | 317 int plane, AVFrame *frame) in read_highpass() 320 ptrdiff_t stride = frame->linesize[plane] / 2; in read_highpass() 328 int16_t *dest = (int16_t *)frame->data[plane] + in read_highpass() 480 static void postprocess_luma(AVCodecContext *avctx, AVFrame *frame, in postprocess_luma() argument 484 uint16_t *dsty = (uint16_t *)frame->data[0]; in postprocess_luma() 485 int16_t *srcy = (int16_t *)frame->data[0]; in postprocess_luma() 486 ptrdiff_t stridey = frame->linesize[0] / 2; in postprocess_luma() 504 static void postprocess_chroma(AVFrame *frame, int w, int h, int depth) in postprocess_chroma() argument 506 uint16_t *dstu = (uint16_t *)frame->data[1]; in postprocess_chroma() 507 uint16_t *dstv = (uint16_t *)frame in postprocess_chroma() 316 read_highpass(AVCodecContext *avctx, const uint8_t *ptr, int plane, AVFrame *frame) read_highpass() argument 528 decode_plane(AVCodecContext *avctx, int plane, const AVPacket *avpkt, AVFrame *frame) decode_plane() argument [all...] |
H A D | nvenc.c | 511 av_log(avctx, AV_LOG_WARNING, "Each B frame as reference is not supported\n"); in nvenc_check_capabilities() 924 "Defined b-frame requires more surfaces, " in nvenc_recalc_surfaces() 1707 av_frame_free(&ctx->frame); in ff_nvenc_encode_close() 1756 "hw_frames_ctx must match the GPU frame type\n"); in ff_nvenc_encode_init() 1764 ctx->frame = av_frame_alloc(); in ff_nvenc_encode_init() 1765 if (!ctx->frame) in ff_nvenc_encode_init() 1800 NV_ENC_LOCK_INPUT_BUFFER *lock_buffer_params, const AVFrame *frame) in nvenc_copy_frame() 1811 if (frame->format == AV_PIX_FMT_YUV420P) in nvenc_copy_frame() 1814 ret = av_image_fill_pointers(dst_data, frame->format, nv_surface->height, in nvenc_copy_frame() 1819 if (frame in nvenc_copy_frame() 1799 nvenc_copy_frame(AVCodecContext *avctx, NvencSurface *nv_surface, NV_ENC_LOCK_INPUT_BUFFER *lock_buffer_params, const AVFrame *frame) nvenc_copy_frame() argument 1863 nvenc_register_frame(AVCodecContext *avctx, const AVFrame *frame) nvenc_register_frame() argument 1917 nvenc_upload_frame(AVCodecContext *avctx, const AVFrame *frame, NvencSurface *nvenc_frame) nvenc_upload_frame() argument 2173 prepare_sei_data_array(AVCodecContext *avctx, const AVFrame *frame) prepare_sei_data_array() argument 2271 reconfig_encoder(AVCodecContext *avctx, const AVFrame *frame) reconfig_encoder() argument 2363 nvenc_send_frame(AVCodecContext *avctx, const AVFrame *frame) nvenc_send_frame() argument 2476 AVFrame *frame = ctx->frame; ff_nvenc_receive_packet() local [all...] |
H A D | dfpwmdec.c | 104 static int dfpwm_dec_frame(struct AVCodecContext *ctx, AVFrame *frame, in dfpwm_dec_frame() argument 113 frame->nb_samples = packet->size * 8LL / ctx->ch_layout.nb_channels; in dfpwm_dec_frame() 114 if (frame->nb_samples <= 0) { in dfpwm_dec_frame() 119 if ((ret = ff_get_buffer(ctx, frame, 0)) < 0) in dfpwm_dec_frame() 122 au_decompress(state, 140, packet->size, frame->data[0], packet->data); in dfpwm_dec_frame()
|
/third_party/skia/third_party/externals/angle2/src/tests/capture_replay_tests/ |
H A D | CaptureReplayTests.cpp | 145 for (uint32_t frame = traceInfo.frameStart; frame <= traceInfo.frameEnd; frame++) in runTest() 147 mTraceLibrary->replayFrame(frame); in runTest() 151 const char *capturedSerializedState = mTraceLibrary->getSerializedContextState(frame); in runTest() 164 << "_ContextReplayed" << frame << ".json"; in runTest() 171 << "_ContextCaptured" << frame << ".json"; in runTest()
|
/third_party/elfutils/libdwfl/ |
H A D | frame_unwind.c | 1 /* Get previous frame state for an existing frame state. 132 /* If FRAME is NULL is are computing CFI frame base. In such case another 136 expr_eval (Dwfl_Frame *state, Dwarf_Frame *frame, const Dwarf_Op *ops, in expr_eval() argument 313 const int elfclass = frame->cache->e_ident[EI_CLASS]; in expr_eval() 457 if (frame == NULL in expr_eval() 458 || dwarf_frame_cfa (frame, &cfa_ops, &cfa_nops) != 0 in expr_eval() 530 Dwarf_Frame *frame; in handle_cfi() local 531 if (INTUSE(dwarf_cfi_addrframe) (cfi, pc, &frame) != 0) in handle_cfi() 544 unwound->signal_frame = frame in handle_cfi() [all...] |
/third_party/ffmpeg/libavdevice/ |
H A D | decklink_enc.cpp | 144 decklink_frame *frame = static_cast<decklink_frame *>(_frame); in ScheduledFrameCompleted() local 145 struct decklink_ctx *ctx = frame->_ctx; in ScheduledFrameCompleted() 147 if (frame->_avframe) in ScheduledFrameCompleted() 148 av_frame_unref(frame->_avframe); in ScheduledFrameCompleted() 149 if (frame->_avpacket) in ScheduledFrameCompleted() 150 av_packet_unref(frame->_avpacket); in ScheduledFrameCompleted() 185 " Only V210 and wrapped frame with AV_PIX_FMT_UYVY422 are supported.\n"); in decklink_setup_video() 371 AVPacket *pkt, decklink_frame *frame) in decklink_construct_vanc() 418 result = frame->SetAncillaryData(vanc); in decklink_construct_vanc() 440 decklink_frame *frame; in decklink_write_video_packet() local 370 decklink_construct_vanc(AVFormatContext *avctx, struct decklink_ctx *ctx, AVPacket *pkt, decklink_frame *frame) decklink_construct_vanc() argument [all...] |
/third_party/skia/third_party/externals/swiftshader/src/Vulkan/Debug/ |
H A D | Server.cpp | 232 auto const &frame = stack[i]; in Impl() 233 auto const &loc = frame.location; in Impl() 236 sf.id = frame.id.value(); in Impl() 237 sf.name = frame.function; in Impl() 253 auto frame = lock.get(Frame::ID(req.frameId)); in Impl() 254 if(!frame) in Impl() 261 scope(lock, "locals", frame->locals.get()), in Impl() 262 scope(lock, "arguments", frame->arguments.get()), in Impl() 263 scope(lock, "registers", frame->registers.get()), in Impl() 427 auto frame in Impl() [all...] |
/third_party/node/deps/v8/src/execution/ |
H A D | tiering-manager.cc | 151 bool HaveCachedOSRCodeForCurrentBytecodeOffset(UnoptimizedFrame* frame, in HaveCachedOSRCodeForCurrentBytecodeOffset() argument 153 JSFunction function = frame->function(); in HaveCachedOSRCodeForCurrentBytecodeOffset() 154 const int current_offset = frame->GetBytecodeOffset(); in HaveCachedOSRCodeForCurrentBytecodeOffset() 157 handle(frame->GetBytecodeArray(), frame->isolate())); in HaveCachedOSRCodeForCurrentBytecodeOffset() 271 UnoptimizedFrame* frame, in MaybeOptimizeFrame() 302 frame, &osr_urgency_for_cached_osr_code)) { in MaybeOptimizeFrame() 311 // still in the unoptimized frame (this implies a long-running loop). in MaybeOptimizeFrame() 323 OptimizationDecision d = ShouldOptimize(function, code_kind, frame); in MaybeOptimizeFrame() 329 JavaScriptFrame* frame) { in ShouldOptimize() 270 MaybeOptimizeFrame(JSFunction function, UnoptimizedFrame* frame, CodeKind code_kind) MaybeOptimizeFrame() argument 327 ShouldOptimize(JSFunction function, CodeKind code_kind, JavaScriptFrame* frame) ShouldOptimize() argument 441 UnoptimizedFrame* frame = UnoptimizedFrame::cast(it.frame()); OnInterruptTick() local [all...] |
/third_party/skia/src/codec/ |
H A D | SkHeifCodec.cpp | 319 Frame* frame = fFrameHolder.appendNewFrame(); in onGetFrameCount() local 320 frame->setXYWH(0, 0, frameInfo.mWidth, frameInfo.mHeight); in onGetFrameCount() 321 frame->setDisposalMethod(SkCodecAnimation::DisposalMethod::kKeep); in onGetFrameCount() 322 // Currently we don't know the duration until the frame is actually in onGetFrameCount() 323 // decoded (onGetFrameInfo is also called before frame is decoded). in onGetFrameCount() 325 frame->setDuration(frameInfo.mDurationUs / 1000); in onGetFrameCount() 326 frame->setRequiredFrame(SkCodec::kNoFrame); in onGetFrameCount() 327 frame->setHasAlpha(false); in onGetFrameCount() 335 return static_cast<const SkFrame*>(this->frame(i)); in onGetFrame() 340 fFrames.emplace_back(i); // TODO: need to handle frame duratio in appendNewFrame() 344 const SkHeifCodec::Frame* SkHeifCodec::FrameHolder::frame(int i) const { frame() function in SkHeifCodec::FrameHolder 359 const Frame* frame = fFrameHolder.frame(i); onGetFrameInfo() local [all...] |
/third_party/skia/third_party/externals/libwebp/examples/ |
H A D | gif2webp.c | 71 printf(" -mixed ................. for each frame in the image, pick lossy\n" in Help() 110 WebPPicture frame; // Frame rectangle only (not disposed). in main() local 118 int frame_number = 0; // Whether we are processing the first frame. in main() 140 !WebPPictureInit(&frame) || !WebPPictureInit(&curr_canvas) || in main() 312 frame.width = gif->SWidth; in main() 313 frame.height = gif->SHeight; in main() 314 frame.use_argb = 1; in main() 315 if (!WebPPictureAlloc(&frame)) goto End; in main() 316 GIFClearPic(&frame, NULL); in main() 317 WebPPictureCopy(&frame, in main() [all...] |
/third_party/skia/tests/ |
H A D | CodecPartialTest.cpp | 163 // This is the end of the first frame. SkCodec will treat this as a in DEF_TEST() 164 // single frame gif. in DEF_TEST() 167 // first frame to decode a full image. in DEF_TEST() 173 // fRequiredFrame as soon as getFrameInfo reports the frame. 233 // to determine frame offsets. in DEF_TEST() 237 // frameByteCounts stores the number of bytes to decode a particular frame. in DEF_TEST() 243 SkBitmap frame; in DEF_TEST() local 244 frame.allocPixels(info); in DEF_TEST() 248 const SkCodec::Result result = fullCodec->getPixels(info, frame.getPixels(), in DEF_TEST() 249 frame in DEF_TEST() 292 SkBitmap frame; DEF_TEST() local [all...] |
/third_party/skia/platform_tools/libraries/include/ |
H A D | arcore_c_api.h | 46 /// - Transient large data. These objects are usually acquired per-frame and are 82 /// These changes mean that every frame should be considered to be in a 84 /// anchors and the camera should never be used outside the rendering frame 86 /// beyond the scope of a single rendering frame, either an anchor should be 113 /// @defgroup frame Frame 114 /// Per-frame state. 193 // Frame and frame objects. 195 /// @addtogroup frame 521 /// frame. For example, acquire the image metadata may fail with this error in AR_DEFINE_ENUM() 753 /// The light estimate is not valid this frame an in AR_DEFINE_ENUM() [all...] |
/third_party/python/Objects/ |
H A D | genobject.c | 39 _PyInterpreterFrame *frame = (_PyInterpreterFrame *)(gen->gi_iframe); in gen_traverse() local 40 assert(frame->frame_obj == NULL || in gen_traverse() 41 frame->frame_obj->f_frame->owner == FRAME_OWNED_BY_GENERATOR); in gen_traverse() 42 int err = _PyFrame_Traverse(frame, visit, arg); in gen_traverse() 135 _PyInterpreterFrame *frame = (_PyInterpreterFrame *)gen->gi_iframe; in gen_dealloc() local 137 frame->previous = NULL; in gen_dealloc() 138 _PyFrame_Clear(frame); in gen_dealloc() 155 _PyInterpreterFrame *frame = (_PyInterpreterFrame *)gen->gi_iframe; in gen_send_ex2() local 203 /* Push arg onto the frame's value stack */ in gen_send_ex2() 206 _PyFrame_StackPush(frame, resul in gen_send_ex2() 353 _PyInterpreterFrame *frame = (_PyInterpreterFrame *)gen->gi_iframe; _PyGen_yf() local 426 _PyInterpreterFrame *frame = (_PyInterpreterFrame *)gen->gi_iframe; _gen_throw() local 975 _PyInterpreterFrame *frame = (_PyInterpreterFrame *)gen->gi_iframe; gen_new_with_qualname() local 1330 _PyInterpreterFrame *frame = current_frame; compute_cr_origin() local [all...] |