/third_party/ffmpeg/libavcodec/ |
H A D | evrcdec.c | 57 * EVRC-A unpacked data frame 62 uint8_t pitch_delay; ///< pitch delay for entire frame 63 uint8_t delay_diff; ///< delay difference for entire frame 67 uint8_t energy_gain; ///< frame energy gain index 79 EVRCAFrame frame; member 112 EVRCAFrame *frame = &e->frame; in unpack_frame() local 117 frame->lpc_flag = get_bits1(gb); in unpack_frame() 118 frame->lsp[0] = get_bits(gb, 6); in unpack_frame() 119 frame in unpack_frame() 744 evrc_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame_ptr, AVPacket *avpkt) evrc_decode_frame() argument [all...] |
H A D | qtrle.c | 46 AVFrame *frame; member 63 int row_inc = s->frame->linesize[0]; in qtrle_decode_1bpp() 65 uint8_t *rgb = s->frame->data[0]; in qtrle_decode_1bpp() 66 int pixel_limit = s->frame->linesize[0] * s->avctx->height; in qtrle_decode_1bpp() 69 * as 'go to next line' during the decoding of a frame but is 'go to first in qtrle_decode_1bpp() 149 int row_inc = s->frame->linesize[0]; in qtrle_decode_2n4bpp() 151 uint8_t *rgb = s->frame->data[0]; in qtrle_decode_2n4bpp() 152 int pixel_limit = s->frame->linesize[0] * s->avctx->height; in qtrle_decode_2n4bpp() 207 int row_inc = s->frame->linesize[0]; in qtrle_decode_8bpp() 209 uint8_t *rgb = s->frame in qtrle_decode_8bpp() [all...] |
H A D | pictordec.c | 40 static void picmemset_8bpp(PicContext *s, AVFrame *frame, int value, int run, in picmemset_8bpp() argument 44 uint8_t *d = frame->data[0] + *y * frame->linesize[0]; in picmemset_8bpp() 61 static void picmemset(PicContext *s, AVFrame *frame, unsigned value, int run, in picmemset() argument 73 d = frame->data[0] + yl * frame->linesize[0]; in picmemset() 90 d = frame->data[0] + yl * frame->linesize[0]; in picmemset() 122 static int decode_frame(AVCodecContext *avctx, AVFrame *frame, in decode_frame() argument 171 if ((ret = ff_get_buffer(avctx, frame, in decode_frame() [all...] |
H A D | libtwolame.c | 111 const AVFrame *frame, int *got_packet_ptr) in twolame_encode_frame() 119 if (frame) { in twolame_encode_frame() 123 (const float *)frame->data[0], in twolame_encode_frame() 124 frame->nb_samples, in twolame_encode_frame() 130 (const float *)frame->data[0], in twolame_encode_frame() 131 (const float *)frame->data[1], in twolame_encode_frame() 132 frame->nb_samples, in twolame_encode_frame() 137 (const short int *)frame->data[0], in twolame_encode_frame() 138 frame->nb_samples, in twolame_encode_frame() 143 (const short int *)frame in twolame_encode_frame() 110 twolame_encode_frame(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr) twolame_encode_frame() argument [all...] |
H A D | yop.c | 37 AVFrame *frame; member 89 av_frame_free(&s->frame); in yop_decode_close() 123 s->frame = av_frame_alloc(); in yop_decode_init() 124 if (!s->frame) in yop_decode_init() 198 AVFrame *frame = s->frame; in yop_decode_frame() local 208 if ((ret = ff_reget_buffer(avctx, frame, 0)) < 0) in yop_decode_frame() 212 memset(frame->data[1], 0, AVPALETTE_SIZE); in yop_decode_frame() 214 s->dstbuf = frame->data[0]; in yop_decode_frame() 215 s->dstptr = frame in yop_decode_frame() [all...] |
H A D | bfi.c | 51 static int bfi_decode_frame(AVCodecContext *avctx, AVFrame *frame, in bfi_decode_frame() argument 63 if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) in bfi_decode_frame() 68 /* Set frame parameters and palette, if necessary */ in bfi_decode_frame() 70 frame->pict_type = AV_PICTURE_TYPE_I; in bfi_decode_frame() 71 frame->key_frame = 1; in bfi_decode_frame() 77 pal = (uint32_t *)frame->data[1]; in bfi_decode_frame() 86 memcpy(bfi->pal, frame->data[1], sizeof(bfi->pal)); in bfi_decode_frame() 87 frame->palette_has_changed = 1; in bfi_decode_frame() 89 frame->pict_type = AV_PICTURE_TYPE_P; in bfi_decode_frame() 90 frame in bfi_decode_frame() [all...] |
H A D | bethsoftvideo.c | 38 AVFrame *frame; member 47 vid->frame = av_frame_alloc(); in bethsoftvid_decode_init() 48 if (!vid->frame) in bethsoftvid_decode_init() 56 uint32_t *palette = (uint32_t *)ctx->frame->data[1]; in set_palette() 66 ctx->frame->palette_has_changed = 1; in set_palette() 82 if ((ret = ff_reget_buffer(avctx, vid->frame, 0)) < 0) in bethsoftvid_decode_frame() 84 wrap_to_next_line = vid->frame->linesize[0] - avctx->width; in bethsoftvid_decode_frame() 95 dst = vid->frame->data[0]; in bethsoftvid_decode_frame() 96 frame_end = vid->frame->data[0] + vid->frame in bethsoftvid_decode_frame() [all...] |
H A D | speedhq.c | 281 static int decode_speedhq_border(const SHQContext *s, GetBitContext *gb, AVFrame *frame, int field_number, int line_stride) in decode_speedhq_border() argument 283 int linesize_y = frame->linesize[0] * line_stride; in decode_speedhq_border() 284 int linesize_cb = frame->linesize[1] * line_stride; in decode_speedhq_border() 285 int linesize_cr = frame->linesize[2] * line_stride; in decode_speedhq_border() 290 linesize_a = frame->linesize[3] * line_stride; in decode_speedhq_border() 292 for (int y = 0; y < frame->height; y += 16 * line_stride) { in decode_speedhq_border() 296 int x = frame->width - 8; in decode_speedhq_border() 298 dest_y = frame->data[0] + frame->linesize[0] * (y + field_number) + x; in decode_speedhq_border() 300 dest_cb = frame in decode_speedhq_border() 354 decode_speedhq_field(const SHQContext *s, const uint8_t *buf, int buf_size, AVFrame *frame, int field_number, int start, int end, int line_stride) decode_speedhq_field() argument 492 speedhq_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *avpkt) speedhq_decode_frame() argument [all...] |
H A D | agm.c | 315 const int *quant_matrix, AVFrame *frame, in decode_intra_plane() 337 s->idsp.idct_put(frame->data[plane] + (s->blocks_h - 1 - y) * 8 * frame->linesize[plane] + x * 8, in decode_intra_plane() 338 frame->linesize[plane], s->wblocks + 64 * x); in decode_intra_plane() 348 s->idsp.idct_put(frame->data[plane] + (s->blocks_h - 1 - y) * 8 * frame->linesize[plane] + x * 8, in decode_intra_plane() 349 frame->linesize[plane], s->block); in decode_intra_plane() 394 const int *quant_matrix, AVFrame *frame, in decode_inter_plane() 433 copy_block8(frame->data[plane] + (s->blocks_h - 1 - y) * 8 * frame in decode_inter_plane() 314 decode_intra_plane(AGMContext *s, GetBitContext *gb, int size, const int *quant_matrix, AVFrame *frame, int plane) decode_intra_plane() argument 393 decode_inter_plane(AGMContext *s, GetBitContext *gb, int size, const int *quant_matrix, AVFrame *frame, AVFrame *prev, int plane) decode_inter_plane() argument 574 decode_raw_intra_rgb(AVCodecContext *avctx, GetByteContext *gbyte, AVFrame *frame) decode_raw_intra_rgb() argument 662 decode_runlen_rgb(AVCodecContext *avctx, GetByteContext *gbyte, AVFrame *frame) decode_runlen_rgb() argument 715 decode_runlen(AVCodecContext *avctx, GetByteContext *gbyte, AVFrame *frame) decode_runlen() argument 765 decode_raw_intra(AVCodecContext *avctx, GetByteContext *gbyte, AVFrame *frame) decode_raw_intra() argument 798 decode_intra(AVCodecContext *avctx, GetBitContext *gb, AVFrame *frame) decode_intra() argument 874 decode_inter(AVCodecContext *avctx, GetBitContext *gb, AVFrame *frame, AVFrame *prev) decode_inter() argument 1097 decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *avpkt) decode_frame() argument [all...] |
H A D | utvideodec.c | 560 static int decode_frame(AVCodecContext *avctx, AVFrame *frame, in decode_frame() argument 572 if ((ret = ff_thread_get_buffer(avctx, frame, 0)) < 0) in decode_frame() 575 /* parse plane structure to get frame flags and validate slice offsets */ in decode_frame() 630 av_log(avctx, AV_LOG_ERROR, "Not enough data for frame information\n"); in decode_frame() 685 av_log(avctx, AV_LOG_ERROR, "Not enough data for frame information\n"); in decode_frame() 690 av_log(avctx, AV_LOG_DEBUG, "frame information flags %"PRIX32"\n", in decode_frame() 711 ret = decode_plane(c, i, frame->data[i], in decode_frame() 712 frame->linesize[i], avctx->width, in decode_frame() 719 restore_median_planar(c, frame->data[i], in decode_frame() 720 frame in decode_frame() [all...] |
H A D | aasc.c | 39 AVFrame *frame; member 74 s->frame = av_frame_alloc(); in aasc_decode_init() 75 if (!s->frame) in aasc_decode_init() 90 av_log(avctx, AV_LOG_ERROR, "frame too short\n"); in aasc_decode_frame() 94 if ((ret = ff_reget_buffer(avctx, s->frame, 0)) < 0) in aasc_decode_frame() 104 ff_msrle_decode(avctx, s->frame, 8, &s->gb); in aasc_decode_frame() 113 memcpy(s->frame->data[0] + i * s->frame->linesize[0], buf, avctx->width * psize); in aasc_decode_frame() 120 ff_msrle_decode(avctx, s->frame, 8, &s->gb); in aasc_decode_frame() 133 memcpy(s->frame in aasc_decode_frame() [all...] |
/third_party/python/Objects/ |
H A D | frameobject.c | 591 _PyFrame_GetState(PyFrameObject *frame) in _PyFrame_GetState() argument 593 assert(!_PyFrame_IsIncomplete(frame->f_frame)); in _PyFrame_GetState() 594 if (frame->f_frame->stacktop == 0) { in _PyFrame_GetState() 597 switch(frame->f_frame->owner) { in _PyFrame_GetState() 600 PyGenObject *gen = _PyFrame_GetGenerator(frame->f_frame); in _PyFrame_GetState() 605 if (_PyInterpreterFrame_LASTI(frame->f_frame) < 0) { in _PyFrame_GetState() 608 switch (_PyOpcode_Deopt[_Py_OPCODE(*frame->f_frame->prev_instr)]) in _PyFrame_GetState() 657 * setting the line number of a frame. in frame_setlineno() 670 "can't jump from the 'call' trace event of a new frame"); in frame_setlineno() 843 frame i 867 _PyInterpreterFrame *frame = (_PyInterpreterFrame *)f->_f_frame_data; frame_dealloc() local 1007 init_frame(_PyInterpreterFrame *frame, PyFunctionObject *func, PyObject *locals) init_frame() argument 1077 _PyFrame_OpAlreadyRan(_PyInterpreterFrame *frame, int opcode, int oparg) _PyFrame_OpAlreadyRan() argument 1102 _PyFrame_FastToLocalsWithError(_PyInterpreterFrame *frame) _PyFrame_FastToLocalsWithError() argument 1222 _PyFrame_LocalsToFast(_PyInterpreterFrame *frame, int clear) _PyFrame_LocalsToFast() argument 1299 _PyFrame_IsEntryFrame(PyFrameObject *frame) _PyFrame_IsEntryFrame() argument 1308 PyFrame_GetCode(PyFrameObject *frame) PyFrame_GetCode() argument 1320 PyFrame_GetBack(PyFrameObject *frame) PyFrame_GetBack() argument 1339 PyFrame_GetLocals(PyFrameObject *frame) PyFrame_GetLocals() argument 1346 PyFrame_GetGlobals(PyFrameObject *frame) PyFrame_GetGlobals() argument 1353 PyFrame_GetBuiltins(PyFrameObject *frame) PyFrame_GetBuiltins() argument 1360 PyFrame_GetLasti(PyFrameObject *frame) PyFrame_GetLasti() argument 1371 PyFrame_GetGenerator(PyFrameObject *frame) PyFrame_GetGenerator() argument [all...] |
/third_party/ffmpeg/libavfilter/ |
H A D | vsrc_testsrc.c | 64 int draw_once; ///< draw only the first frame, always put out the same picture 65 int draw_once_reset; ///< draw only the first frame or in case of reset 68 void (* fill_picture_fn)(AVFilterContext *ctx, AVFrame *frame); 157 AVFrame *frame; in activate() local 179 frame = av_frame_clone(test->picref); in activate() 181 frame = ff_get_video_buffer(outlink, test->w, test->h); in activate() 183 if (!frame) in activate() 185 frame->pts = test->pts; in activate() 186 frame->key_frame = 1; in activate() 187 frame in activate() 300 haldclutsrc_fill_picture(AVFilterContext *ctx, AVFrame *frame) haldclutsrc_fill_picture() argument 552 test_fill_picture(AVFilterContext *ctx, AVFrame *frame) test_fill_picture() argument 726 draw_text(TestSourceContext *s, AVFrame *frame, FFDrawColor *color, int x0, int y0, const uint8_t *text) draw_text() argument 744 test2_fill_picture(AVFilterContext *ctx, AVFrame *frame) test2_fill_picture() argument 1035 rgbtest_fill_picture_complement(AVFilterContext *ctx, AVFrame *frame) rgbtest_fill_picture_complement() argument 1058 rgbtest_fill_picture(AVFilterContext *ctx, AVFrame *frame) rgbtest_fill_picture() argument 1133 yuvtest_fill_picture8(AVFilterContext *ctx, AVFrame *frame) yuvtest_fill_picture8() argument 1190 yuvtest_fill_picture16(AVFilterContext *ctx, AVFrame *frame) yuvtest_fill_picture16() argument 1358 draw_bar(TestSourceContext *test, const uint8_t color[4], int x, int y, int w, int h, AVFrame *frame) draw_bar() argument 1682 allyuv_fill_picture(AVFilterContext *ctx, AVFrame *frame) allyuv_fill_picture() argument 1742 allrgb_fill_picture(AVFilterContext *ctx, AVFrame *frame) allrgb_fill_picture() argument 1829 colorspectrum_fill_picture(AVFilterContext *ctx, AVFrame *frame) colorspectrum_fill_picture() argument 1984 colorchart_fill_picture(AVFilterContext *ctx, AVFrame *frame) colorchart_fill_picture() argument [all...] |
H A D | vf_colorcorrect.c | 67 AVFrame *frame = arg; in average_slice8() local 73 const int ulinesize = frame->linesize[1]; in average_slice8() 74 const int vlinesize = frame->linesize[2]; in average_slice8() 75 const uint8_t *uptr = (const uint8_t *)frame->data[1] + slice_start * ulinesize; in average_slice8() 76 const uint8_t *vptr = (const uint8_t *)frame->data[2] + slice_start * vlinesize; in average_slice8() 98 AVFrame *frame = arg; in average_slice16() local 104 const int ulinesize = frame->linesize[1] / 2; in average_slice16() 105 const int vlinesize = frame->linesize[2] / 2; in average_slice16() 106 const uint16_t *uptr = (const uint16_t *)frame->data[1] + slice_start * ulinesize; in average_slice16() 107 const uint16_t *vptr = (const uint16_t *)frame in average_slice16() 129 AVFrame *frame = arg; minmax_slice8() local 165 AVFrame *frame = arg; minmax_slice16() local 201 AVFrame *frame = arg; median_8() local 255 AVFrame *frame = arg; median_16() local 318 AVFrame *frame = arg; colorcorrect_slice8() local 358 AVFrame *frame = arg; colorcorrect_slice16() local 396 filter_frame(AVFilterLink *inlink, AVFrame *frame) filter_frame() argument [all...] |
H A D | vf_crop.c | 52 "n", ///< number of frame 240 // changing the frame size. in config_output() 250 static int filter_frame(AVFilterLink *link, AVFrame *frame) in filter_frame() argument 258 s->var_values[VAR_T] = frame->pts == AV_NOPTS_VALUE ? in filter_frame() 259 NAN : frame->pts * av_q2d(link->time_base); in filter_frame() 260 s->var_values[VAR_POS] = frame->pkt_pos == -1 ? in filter_frame() 261 NAN : frame->pkt_pos; in filter_frame() 288 frame->crop_top += s->y; in filter_frame() 289 frame->crop_left += s->x; in filter_frame() 290 frame in filter_frame() [all...] |
H A D | vf_codecview.c | 73 { "frame_type", "set frame types to visualize motion vectors of", OFFSET(frame_type), AV_OPT_TYPE_FLAGS, {.i64=0}, 0, INT_MAX, FLAGS, "frame_type" }, 74 { "ft", "set frame types to visualize motion vectors of", OFFSET(frame_type), AV_OPT_TYPE_FLAGS, {.i64=0}, 0, INT_MAX, FLAGS, "frame_type" }, 223 static int filter_frame(AVFilterLink *inlink, AVFrame *frame) in filter_frame() argument 234 ret = ff_qp_table_extract(frame, &qp_table, &qstride, NULL, &qp_type); in filter_frame() 236 av_frame_free(&frame); in filter_frame() 242 const int w = AV_CEIL_RSHIFT(frame->width, s->hsub); in filter_frame() 243 const int h = AV_CEIL_RSHIFT(frame->height, s->vsub); in filter_frame() 244 uint8_t *pu = frame->data[1]; in filter_frame() 245 uint8_t *pv = frame->data[2]; in filter_frame() 246 const int lzu = frame in filter_frame() [all...] |
/third_party/node/deps/v8/src/runtime/ |
H A D | runtime-trace.cc | 41 void PrintRegisterRange(UnoptimizedFrame* frame, std::ostream& os, in PrintRegisterRange() argument 47 Object reg_object = frame->ReadInterpreterRegister(reg_index); in PrintRegisterRange() 55 void PrintRegisters(UnoptimizedFrame* frame, std::ostream& os, bool is_input, in PrintRegisters() argument 91 PrintRegisterRange(frame, os, bytecode_iterator, kRegFieldWidth, in PrintRegisters() 96 PrintRegisterRange(frame, os, bytecode_iterator, kRegFieldWidth, in PrintRegisters() 113 UnoptimizedFrame* frame = in RUNTIME_FUNCTION() local 114 reinterpret_cast<UnoptimizedFrame*>(frame_iterator.frame()); in RUNTIME_FUNCTION() 116 if (frame->is_interpreted() && !FLAG_trace_ignition) { in RUNTIME_FUNCTION() 119 if (frame->is_baseline() && !FLAG_trace_baseline_exec) { in RUNTIME_FUNCTION() 140 if (frame in RUNTIME_FUNCTION() 163 UnoptimizedFrame* frame = RUNTIME_FUNCTION() local [all...] |
/third_party/node/deps/v8/src/execution/ |
H A D | frames.h | 17 // Frame inheritance hierarchy (please keep in sync with frame-constants.h): 131 // Used by FrameScope to indicate that the stack frame is constructed 137 // Used to mark the outermost JS entry frame. 158 // Convert a stack frame type to a marker that can be stored on the stack. 173 // Convert a marker back to a stack frame type. 181 // invalid frame markers. in MarkerToType() 195 // Check if a marker is a stack frame type marker or a tagged pointer. 197 // Returns true if the given marker is tagged as a stack frame type marker, 198 // and should be converted back to a stack frame type using MarkerToType. 262 // be signed, as the PC of the frame 500 cast(StackFrame* frame) cast() argument 631 cast(StackFrame* frame) cast() argument 686 cast(StackFrame* frame) cast() argument 712 cast(StackFrame* frame) cast() argument 733 cast(StackFrame* frame) cast() argument 865 cast(StackFrame* frame) cast() argument 894 cast(StackFrame* frame) cast() argument 898 cast(const StackFrame* frame) cast() argument 923 cast(StackFrame* frame) cast() argument 941 cast(StackFrame* frame) cast() argument 984 cast(StackFrame* frame) cast() argument 1019 cast(StackFrame* frame) cast() argument 1090 cast(StackFrame* frame) cast() argument 1110 cast(StackFrame* frame) cast() argument 1128 cast(StackFrame* frame) cast() argument 1144 cast(StackFrame* frame) cast() argument 1160 cast(StackFrame* frame) cast() argument 1186 cast(StackFrame* frame) cast() argument 1253 StackFrame* frame() const { frame() function in v8::internal::StackFrameIterator [all...] |
/base/hiviewdfx/faultloggerd/tools/process_dump/ |
H A D | dfx_fault_stack.cpp | 107 auto frame = frames.at(index); in CollectStackInfo() local 108 uintptr_t curSp = static_cast<uintptr_t>(frame.sp); in CollectStackInfo() 134 const auto& frame = frames.back(); in CreateBlockForCorruptedStack() local 136 if (frame.mapName.find("ld-musl") != std::string::npos || in CreateBlockForCorruptedStack() 137 frame.mapName.find("ffrt") != std::string::npos || in CreateBlockForCorruptedStack() 138 frame.mapName.find("bin") != std::string::npos) { in CreateBlockForCorruptedStack() 142 AdjustAndCreateMemoryBlock(frame.index, frame.sp, prevEndAddr, size); in CreateBlockForCorruptedStack() 291 DfxFrame frame; in ParseUnwindStack() local 292 frame in ParseUnwindStack() [all...] |
/third_party/skia/src/codec/ |
H A D | SkWebpCodec.cpp | 112 // Get the first frame and its "features" to determine the color and alpha types. in MakeFromStream() 113 WebPIterator frame; in MakeFromStream() local 114 SkAutoTCallVProc<WebPIterator, WebPDemuxReleaseIterator> autoFrame(&frame); in MakeFromStream() 115 if (!WebPDemuxGetFrame(demux, 1, &frame)) { in MakeFromStream() 121 switch (WebPGetFeatures(frame.fragment.bytes, frame.fragment.size, &features)) { in MakeFromStream() 133 const bool hasAlpha = SkToBool(frame.has_alpha) in MakeFromStream() 134 || frame.width != width || frame.height != height; in MakeFromStream() 261 Frame* frame in onGetFrameCount() local 281 const SkWebpCodec::Frame* SkWebpCodec::FrameHolder::frame(int i) const { frame() function in SkWebpCodec::FrameHolder 291 const Frame* frame = fFrameHolder.frame(i); onGetFrameInfo() local 362 WebPIterator frame; onGetPixels() local [all...] |
/third_party/ffmpeg/libavformat/ |
H A D | hls_sample_encryption.c | 269 static int get_next_adts_frame(CodecParserContext *ctx, AudioFrame *frame) in get_next_adts_frame() argument 285 frame->data = (uint8_t*)ctx->buf_ptr; in get_next_adts_frame() 287 ret = avpriv_adts_header_parse (&adts_hdr, frame->data, ctx->buf_end - frame->data); in get_next_adts_frame() 291 frame->header_length = adts_hdr->crc_absent ? AV_AAC_ADTS_HEADER_SIZE : AV_AAC_ADTS_HEADER_SIZE + 2; in get_next_adts_frame() 292 frame->length = adts_hdr->frame_length; in get_next_adts_frame() 299 static int get_next_ac3_eac3_sync_frame(CodecParserContext *ctx, AudioFrame *frame) in get_next_ac3_eac3_sync_frame() argument 315 frame->data = (uint8_t*)ctx->buf_ptr; in get_next_ac3_eac3_sync_frame() 316 frame->header_length = 0; in get_next_ac3_eac3_sync_frame() 318 ret = avpriv_ac3_parse_header(&hdr, frame in get_next_ac3_eac3_sync_frame() 332 get_next_sync_frame(enum AVCodecID codec_id, CodecParserContext *ctx, AudioFrame *frame) get_next_sync_frame() argument 342 decrypt_sync_frame(enum AVCodecID codec_id, HLSCryptoContext *crypto_ctx, AudioFrame *frame) decrypt_sync_frame() argument 365 AudioFrame frame; decrypt_audio_frame() local [all...] |
/third_party/node/deps/v8/third_party/wasm-api/example/ |
H A D | start.c | 11 void print_frame(wasm_frame_t* frame) { in print_frame() argument 13 wasm_frame_instance(frame), in print_frame() 14 wasm_frame_module_offset(frame), in print_frame() 15 wasm_frame_func_index(frame), in print_frame() 16 wasm_frame_func_offset(frame) in print_frame() 74 own wasm_frame_t* frame = wasm_trap_origin(trap); in main() local 75 if (frame) { in main() 76 print_frame(frame); in main() 77 wasm_frame_delete(frame); in main()
|
/foundation/CastEngine/castengine_wifi_display/services/codec/src/ |
H A D | audio_g711_codec.cpp | 18 #include "frame.h" 34 void AudioG711Encoder::OnFrame(const Frame::Ptr &frame) in OnFrame() argument 36 RETURN_IF_NULL(frame); in OnFrame() 37 if (!inited_ && frame->GetCodecId() != CODEC_PCM) { in OnFrame() 41 auto payload = frame->Data(); in OnFrame() 42 int32_t outLength = frame->Size() / 2; // 2: double size in OnFrame() 78 void AudioG711Decoder::OnFrame(const Frame::Ptr &frame) in OnFrame() argument 80 RETURN_IF_NULL(frame); in OnFrame() 86 if ((type_ == G711_ALAW && frame->GetCodecId() != CODEC_G711A) || in OnFrame() 87 (type_ == G711_ULAW && frame in OnFrame() [all...] |
/third_party/ffmpeg/libavutil/ |
H A D | frame.h | 22 * reference-counted frame API 80 * transformation that needs to be applied to the frame for correct 111 * This side data must be associated with an audio frame and corresponds to 116 * Mastering display metadata associated with a video frame. The payload is 123 * This is set on the first frame of a GOP that has a temporal reference of 0. 155 * HDR dynamic metadata associated with a video frame. The payload is 168 * Encoding parameters for a video frame, as described by AVVideoEncParams. 173 * User data unregistered metadata associated with a video frame. 181 * Film grain parameters for a frame, described by AVFilmGrainParams. 182 * Must be present for every frame whic [all...] |
/third_party/node/deps/v8/tools/ |
H A D | lldb_commands.py | 74 frame = current_frame(debugger) 75 js_entry_sp = frame.EvaluateExpression( 78 sizeof_void = frame.EvaluateExpression("sizeof(void*)").GetValue() 79 rbp = frame.FindRegister("rbp") 80 rsp = frame.FindRegister("rsp") 81 pc = frame.FindRegister("pc") 92 for frame in thread: 93 functionSignature = frame.GetDisplayFunctionName() 97 line = frame.GetLineEntry().GetLine() 98 sourceFile = frame [all...] |