/third_party/skia/src/codec/ |
H A D | SkHeifCodec.cpp | 319 Frame* frame = fFrameHolder.appendNewFrame(); in onGetFrameCount() local 320 frame->setXYWH(0, 0, frameInfo.mWidth, frameInfo.mHeight); in onGetFrameCount() 321 frame->setDisposalMethod(SkCodecAnimation::DisposalMethod::kKeep); in onGetFrameCount() 322 // Currently we don't know the duration until the frame is actually in onGetFrameCount() 323 // decoded (onGetFrameInfo is also called before frame is decoded). in onGetFrameCount() 325 frame->setDuration(frameInfo.mDurationUs / 1000); in onGetFrameCount() 326 frame->setRequiredFrame(SkCodec::kNoFrame); in onGetFrameCount() 327 frame->setHasAlpha(false); in onGetFrameCount() 335 return static_cast<const SkFrame*>(this->frame(i)); in onGetFrame() 340 fFrames.emplace_back(i); // TODO: need to handle frame duratio in appendNewFrame() 344 const SkHeifCodec::Frame* SkHeifCodec::FrameHolder::frame(int i) const { frame() function in SkHeifCodec::FrameHolder 359 const Frame* frame = fFrameHolder.frame(i); onGetFrameInfo() local [all...] |
/third_party/skia/third_party/externals/libwebp/examples/ |
H A D | gif2webp.c | 71 printf(" -mixed ................. for each frame in the image, pick lossy\n" in Help() 110 WebPPicture frame; // Frame rectangle only (not disposed). in main() local 118 int frame_number = 0; // Whether we are processing the first frame. in main() 140 !WebPPictureInit(&frame) || !WebPPictureInit(&curr_canvas) || in main() 312 frame.width = gif->SWidth; in main() 313 frame.height = gif->SHeight; in main() 314 frame.use_argb = 1; in main() 315 if (!WebPPictureAlloc(&frame)) goto End; in main() 316 GIFClearPic(&frame, NULL); in main() 317 WebPPictureCopy(&frame, in main() [all...] |
/third_party/skia/tests/ |
H A D | CodecPartialTest.cpp | 163 // This is the end of the first frame. SkCodec will treat this as a in DEF_TEST() 164 // single frame gif. in DEF_TEST() 167 // first frame to decode a full image. in DEF_TEST() 173 // fRequiredFrame as soon as getFrameInfo reports the frame. 233 // to determine frame offsets. in DEF_TEST() 237 // frameByteCounts stores the number of bytes to decode a particular frame. in DEF_TEST() 243 SkBitmap frame; in DEF_TEST() local 244 frame.allocPixels(info); in DEF_TEST() 248 const SkCodec::Result result = fullCodec->getPixels(info, frame.getPixels(), in DEF_TEST() 249 frame in DEF_TEST() 292 SkBitmap frame; DEF_TEST() local [all...] |
/third_party/skia/platform_tools/libraries/include/ |
H A D | arcore_c_api.h | 46 /// - Transient large data. These objects are usually acquired per-frame and are 82 /// These changes mean that every frame should be considered to be in a 84 /// anchors and the camera should never be used outside the rendering frame 86 /// beyond the scope of a single rendering frame, either an anchor should be 113 /// @defgroup frame Frame 114 /// Per-frame state. 193 // Frame and frame objects. 195 /// @addtogroup frame 521 /// frame. For example, acquire the image metadata may fail with this error in AR_DEFINE_ENUM() 753 /// The light estimate is not valid this frame an in AR_DEFINE_ENUM() [all...] |
/third_party/ffmpeg/libavutil/ |
H A D | downmix_info.c | 22 #include "frame.h" 24 AVDownmixInfo *av_downmix_info_update_side_data(AVFrame *frame) in av_downmix_info_update_side_data() argument 28 side_data = av_frame_get_side_data(frame, AV_FRAME_DATA_DOWNMIX_INFO); in av_downmix_info_update_side_data() 31 side_data = av_frame_new_side_data(frame, AV_FRAME_DATA_DOWNMIX_INFO, in av_downmix_info_update_side_data()
|
/third_party/ffmpeg/libavcodec/ |
H A D | rasc.c | 63 AVFrame *frame; member 69 static void clear_plane(AVCodecContext *avctx, AVFrame *frame) in clear_plane() argument 72 uint8_t *dst = frame->data[0]; in clear_plane() 79 dst += frame->linesize[0]; in clear_plane() 612 pal = s->frame->data[1]; in draw_cursor() 625 dst = s->frame->data[0] + s->frame->linesize[0] * (s->cursor_y + i) + (s->cursor_x + j); in draw_cursor() 651 dst = s->frame->data[0] + s->frame->linesize[0] * (s->cursor_y + i) + 2 * (s->cursor_x + j); in draw_cursor() 665 dst = s->frame in draw_cursor() 674 decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *avpkt) decode_frame() argument [all...] |
H A D | vaapi_hevc.c | 56 va_pic->picture_id = ff_vaapi_get_surface_id(pic->frame); in fill_vaapi_pic() 63 if (pic->frame->interlaced_frame) { in fill_vaapi_pic() 66 if (!pic->frame->top_field_first) in fill_vaapi_pic() 73 VASurfaceID pic_surf = ff_vaapi_get_surface_id(pic->frame); in find_frame_rps_type() 77 if (pic_surf == ff_vaapi_get_surface_id(h->rps[ST_CURR_BEF].ref[i]->frame)) in find_frame_rps_type() 82 if (pic_surf == ff_vaapi_get_surface_id(h->rps[ST_CURR_AFT].ref[i]->frame)) in find_frame_rps_type() 87 if (pic_surf == ff_vaapi_get_surface_id(h->rps[LT_CURR].ref[i]->frame)) in find_frame_rps_type() 100 const HEVCFrame *frame = NULL; in fill_vaapi_reference_frames() local 102 while (!frame && j < FF_ARRAY_ELEMS(h->DPB)) { in fill_vaapi_reference_frames() 104 frame in fill_vaapi_reference_frames() 388 get_ref_pic_index(const HEVCContext *h, const HEVCFrame *frame) get_ref_pic_index() argument [all...] |
H A D | pixlet.c | 317 int plane, AVFrame *frame) in read_highpass() 320 ptrdiff_t stride = frame->linesize[plane] / 2; in read_highpass() 328 int16_t *dest = (int16_t *)frame->data[plane] + in read_highpass() 480 static void postprocess_luma(AVCodecContext *avctx, AVFrame *frame, in postprocess_luma() argument 484 uint16_t *dsty = (uint16_t *)frame->data[0]; in postprocess_luma() 485 int16_t *srcy = (int16_t *)frame->data[0]; in postprocess_luma() 486 ptrdiff_t stridey = frame->linesize[0] / 2; in postprocess_luma() 504 static void postprocess_chroma(AVFrame *frame, int w, int h, int depth) in postprocess_chroma() argument 506 uint16_t *dstu = (uint16_t *)frame->data[1]; in postprocess_chroma() 507 uint16_t *dstv = (uint16_t *)frame in postprocess_chroma() 316 read_highpass(AVCodecContext *avctx, const uint8_t *ptr, int plane, AVFrame *frame) read_highpass() argument 528 decode_plane(AVCodecContext *avctx, int plane, const AVPacket *avpkt, AVFrame *frame) decode_plane() argument [all...] |
H A D | dfpwmdec.c | 104 static int dfpwm_dec_frame(struct AVCodecContext *ctx, AVFrame *frame, in dfpwm_dec_frame() argument 113 frame->nb_samples = packet->size * 8LL / ctx->ch_layout.nb_channels; in dfpwm_dec_frame() 114 if (frame->nb_samples <= 0) { in dfpwm_dec_frame() 119 if ((ret = ff_get_buffer(ctx, frame, 0)) < 0) in dfpwm_dec_frame() 122 au_decompress(state, 140, packet->size, frame->data[0], packet->data); in dfpwm_dec_frame()
|
H A D | dvaudiodec.c | 88 static int decode_frame(AVCodecContext *avctx, AVFrame *frame, in decode_frame() argument 99 frame->nb_samples = dv_get_audio_sample_count(pkt->data + 244, s->is_pal); in decode_frame() 100 if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) in decode_frame() 102 dst = (int16_t *)frame->data[0]; in decode_frame() 104 for (i = 0; i < frame->nb_samples; i++) { in decode_frame()
|
H A D | internal.h | 50 * When using frame-threaded decoding, this field is set for the first 56 * An audio frame with less than required samples has been submitted and 99 * The input frame is stored here for encoders implementing the simple 113 * Number of audio samples to skip at the start of the next decoded frame 161 void ff_color_frame(AVFrame *frame, const int color[4]); 202 * Get a buffer for a frame. This is a wrapper around 206 int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags); 213 int ff_reget_buffer(AVCodecContext *avctx, AVFrame *frame, int flags); 222 * Check that the provided frame dimensions are valid and set them on the codec 236 int ff_side_data_update_matrix_encoding(AVFrame *frame, [all...] |
H A D | hcom.c | 83 static int hcom_decode(AVCodecContext *avctx, AVFrame *frame, in hcom_decode() argument 93 frame->nb_samples = pkt->size * 8; in hcom_decode() 94 if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) in hcom_decode() 115 frame->data[0][n++] = s->sample; in hcom_decode() 121 frame->nb_samples = n; in hcom_decode()
|
H A D | libwebpenc_animencoder.c | 37 int64_t first_frame_pts; // pts of the first encoded frame. 60 const AVFrame *frame, int *got_packet) { in libwebp_anim_encode_frame() 64 if (!frame) { in libwebp_anim_encode_frame() 95 ret = ff_libwebp_get_frame(avctx, &s->cc, frame, &alt_frame, &pic); in libwebp_anim_encode_frame() 100 avctx->time_base.num * frame->pts * 1000 / avctx->time_base.den; in libwebp_anim_encode_frame() 104 "Encoding WebP frame failed with error: %d\n", in libwebp_anim_encode_frame() 111 s->first_frame_pts = frame->pts; in libwebp_anim_encode_frame() 59 libwebp_anim_encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *frame, int *got_packet) libwebp_anim_encode_frame() argument
|
H A D | nvenc.c | 511 av_log(avctx, AV_LOG_WARNING, "Each B frame as reference is not supported\n"); in nvenc_check_capabilities() 924 "Defined b-frame requires more surfaces, " in nvenc_recalc_surfaces() 1707 av_frame_free(&ctx->frame); in ff_nvenc_encode_close() 1756 "hw_frames_ctx must match the GPU frame type\n"); in ff_nvenc_encode_init() 1764 ctx->frame = av_frame_alloc(); in ff_nvenc_encode_init() 1765 if (!ctx->frame) in ff_nvenc_encode_init() 1800 NV_ENC_LOCK_INPUT_BUFFER *lock_buffer_params, const AVFrame *frame) in nvenc_copy_frame() 1811 if (frame->format == AV_PIX_FMT_YUV420P) in nvenc_copy_frame() 1814 ret = av_image_fill_pointers(dst_data, frame->format, nv_surface->height, in nvenc_copy_frame() 1819 if (frame in nvenc_copy_frame() 1799 nvenc_copy_frame(AVCodecContext *avctx, NvencSurface *nv_surface, NV_ENC_LOCK_INPUT_BUFFER *lock_buffer_params, const AVFrame *frame) nvenc_copy_frame() argument 1863 nvenc_register_frame(AVCodecContext *avctx, const AVFrame *frame) nvenc_register_frame() argument 1917 nvenc_upload_frame(AVCodecContext *avctx, const AVFrame *frame, NvencSurface *nvenc_frame) nvenc_upload_frame() argument 2173 prepare_sei_data_array(AVCodecContext *avctx, const AVFrame *frame) prepare_sei_data_array() argument 2271 reconfig_encoder(AVCodecContext *avctx, const AVFrame *frame) reconfig_encoder() argument 2363 nvenc_send_frame(AVCodecContext *avctx, const AVFrame *frame) nvenc_send_frame() argument 2476 AVFrame *frame = ctx->frame; ff_nvenc_receive_packet() local [all...] |
H A D | decode.h | 25 #include "libavutil/frame.h" 31 * This struct stores per-frame lavc-internal data and is attached to it via 36 * The callback to perform some delayed processing on the frame right 39 * @note This code is called at some unspecified point after the frame is 45 int (*post_process)(void *logctx, AVFrame *frame); 50 * Per-frame private data for hwaccels. 68 * Set various frame properties from the codec context / packet data. 70 int ff_decode_frame_props(AVCodecContext *avctx, AVFrame *frame); 80 int ff_attach_decode_data(AVFrame *frame);
|
H A D | cuviddec.c | 459 static int cuvid_output_frame(AVCodecContext *avctx, AVFrame *frame) in cuvid_output_frame() argument 512 ret = av_hwframe_get_buffer(ctx->hwframe, frame, 0); in cuvid_output_frame() 518 ret = ff_decode_frame_props(avctx, frame); in cuvid_output_frame() 532 .dstDevice = (CUdeviceptr)frame->data[i], in cuvid_output_frame() 534 .dstPitch = frame->linesize[i], in cuvid_output_frame() 536 .WidthInBytes = FFMIN(pitch, frame->linesize[i]), in cuvid_output_frame() 583 ret = ff_get_buffer(avctx, frame, 0); in cuvid_output_frame() 590 ret = av_hwframe_transfer_data(frame, tmp_frame, 0); in cuvid_output_frame() 602 frame->key_frame = ctx->key_frame[parsed_frame.dispinfo.picture_index]; in cuvid_output_frame() 605 frame in cuvid_output_frame() [all...] |
H A D | dsicinaudio.c | 90 static int cinaudio_decode_frame(AVCodecContext *avctx, AVFrame *frame, in cinaudio_decode_frame() argument 100 frame->nb_samples = avpkt->size - cin->initial_decode_frame; in cinaudio_decode_frame() 101 if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) in cinaudio_decode_frame() 103 samples = (int16_t *)frame->data[0]; in cinaudio_decode_frame()
|
H A D | bmvaudio.c | 42 static int bmv_aud_decode_frame(AVCodecContext *avctx, AVFrame *frame, in bmv_aud_decode_frame() argument 60 frame->nb_samples = total_blocks * 32; in bmv_aud_decode_frame() 61 if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) in bmv_aud_decode_frame() 63 output_samples = (int16_t *)frame->data[0]; in bmv_aud_decode_frame()
|
H A D | wrapped_avframe.c | 32 #include "libavutil/frame.h" 38 AVFrame *frame = (AVFrame *)data; in wrapped_avframe_release_buffer() local 40 av_frame_free(&frame); in wrapped_avframe_release_buffer() 44 const AVFrame *frame, int *got_packet) in wrapped_avframe_encode() 46 AVFrame *wrapped = av_frame_clone(frame); in wrapped_avframe_encode() 43 wrapped_avframe_encode(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *frame, int *got_packet) wrapped_avframe_encode() argument
|
/third_party/ffmpeg/libavfilter/ |
H A D | af_asetrate.c | 78 static int filter_frame(AVFilterLink *inlink, AVFrame *frame) in filter_frame() argument 84 frame->sample_rate = outlink->sample_rate; in filter_frame() 86 frame->pts = av_rescale(frame->pts, inlink->sample_rate, in filter_frame() 88 return ff_filter_frame(outlink, frame); in filter_frame()
|
H A D | qsvvpp.h | 44 AVFrame *frame; member 52 int (*filter_frame) (AVFilterLink *outlink, AVFrame *frame); /**< callback */ 55 mfxFrameInfo *frame_infos; /**< frame info for each input */ 86 int (*filter_frame)(AVFilterLink *outlink, AVFrame *frame); 108 /* vpp filter frame and call the cb if needed */ 109 int ff_qsvvpp_filter_frame(QSVVPPContext *vpp, AVFilterLink *inlink, AVFrame *frame);
|
/third_party/python/Objects/ |
H A D | genobject.c | 39 _PyInterpreterFrame *frame = (_PyInterpreterFrame *)(gen->gi_iframe); in gen_traverse() local 40 assert(frame->frame_obj == NULL || in gen_traverse() 41 frame->frame_obj->f_frame->owner == FRAME_OWNED_BY_GENERATOR); in gen_traverse() 42 int err = _PyFrame_Traverse(frame, visit, arg); in gen_traverse() 135 _PyInterpreterFrame *frame = (_PyInterpreterFrame *)gen->gi_iframe; in gen_dealloc() local 137 frame->previous = NULL; in gen_dealloc() 138 _PyFrame_Clear(frame); in gen_dealloc() 155 _PyInterpreterFrame *frame = (_PyInterpreterFrame *)gen->gi_iframe; in gen_send_ex2() local 203 /* Push arg onto the frame's value stack */ in gen_send_ex2() 206 _PyFrame_StackPush(frame, resul in gen_send_ex2() 353 _PyInterpreterFrame *frame = (_PyInterpreterFrame *)gen->gi_iframe; _PyGen_yf() local 426 _PyInterpreterFrame *frame = (_PyInterpreterFrame *)gen->gi_iframe; _gen_throw() local 975 _PyInterpreterFrame *frame = (_PyInterpreterFrame *)gen->gi_iframe; gen_new_with_qualname() local 1330 _PyInterpreterFrame *frame = current_frame; compute_cr_origin() local [all...] |
/third_party/node/deps/v8/src/debug/ |
H A D | debug.cc | 115 JavaScriptFrame* frame) { in FromFrame() 119 auto summary = FrameSummary::GetTop(frame).AsJavaScript(); in FromFrame() 157 Handle<DebugInfo> debug_info, JavaScriptFrame* frame, in AllAtCurrentStatement() 160 auto summary = FrameSummary::GetTop(frame).AsJavaScript(); in AllAtCurrentStatement() 178 JavaScriptFrame* frame) const { in GetGeneratorObjectForSuspendedFrame() 182 Object generator_obj = UnoptimizedFrame::cast(frame)->ReadInterpreterRegister( in GetGeneratorObjectForSuspendedFrame() 349 // interpreter stack frame in GetGeneratorObjectForSuspendedFrame. in GetBreakLocation() 423 // Set frame to what it was at Step break in RestoreDebug() 424 thread_local_.break_frame_id_ = frames_it.frame()->id(); in RestoreDebug() 490 void Debug::Break(JavaScriptFrame* frame, Handl argument 114 FromFrame(Handle<DebugInfo> debug_info, JavaScriptFrame* frame) FromFrame() argument 156 AllAtCurrentStatement( Handle<DebugInfo> debug_info, JavaScriptFrame* frame, std::vector<BreakLocation>* result_out) AllAtCurrentStatement() argument 654 IsMutedAtCurrentLocation(JavaScriptFrame* frame) IsMutedAtCurrentLocation() argument 668 GetHitBreakpointsAtCurrentStatement( JavaScriptFrame* frame, bool* has_break_points) GetHitBreakpointsAtCurrentStatement() argument 1113 JavaScriptFrame* frame = it.frame(); PrepareStepOnThrow() local 1128 JavaScriptFrame* frame = JavaScriptFrame::cast(it.frame()); PrepareStepOnThrow() local 1190 CommonFrame* frame = frames_it.frame(); PrepareStep() local 1414 BaselineFrame* frame = BaselineFrame::cast(it.frame()); global() variable 1427 JavaScriptFrame* frame = it.frame(); global() variable 2072 IsBreakAtReturn(JavaScriptFrame* frame) IsBreakAtReturn() argument 2167 IsFrameBlackboxed(JavaScriptFrame* frame) IsFrameBlackboxed() argument 2842 PerformSideEffectCheckAtBytecode(InterpretedFrame* frame) PerformSideEffectCheckAtBytecode() argument [all...] |
/third_party/node/deps/v8/third_party/jinja2/ |
H A D | nativetypes.py | 50 def _output_child_to_const(self, node, frame, finalize): 51 const = node.as_const(frame.eval_ctx) 61 def _output_child_pre(self, node, frame, finalize): 65 def _output_child_post(self, node, frame, finalize):
|
/third_party/nghttp2/src/ |
H A D | app_helper.h | 45 const nghttp2_frame *frame, const uint8_t *name, 50 const nghttp2_frame *frame, void *user_data); 53 const nghttp2_frame *frame, 57 const nghttp2_frame *frame, void *user_data);
|