/third_party/ffmpeg/doc/examples/ |
H A D | transcode_aac.c | 42 #include "libavutil/frame.h" 264 * Initialize one audio frame for reading from the input file. 265 * @param[out] frame Frame to be initialized 268 static int init_input_frame(AVFrame **frame) in init_input_frame() argument 270 if (!(*frame = av_frame_alloc())) { in init_input_frame() 271 fprintf(stderr, "Could not allocate input frame\n"); in init_input_frame() 358 * Decode one audio frame from the input file. 359 * @param frame Audio frame to be decoded 370 static int decode_audio_frame(AVFrame *frame, in decode_audio_frame() argument 619 init_output_frame(AVFrame **frame, AVCodecContext *output_codec_context, int frame_size) init_output_frame() argument 665 encode_audio_frame(AVFrame *frame, AVFormatContext *output_format_context, AVCodecContext *output_codec_context, int *data_present) encode_audio_frame() argument [all...] |
/third_party/ffmpeg/libavfilter/ |
H A D | vf_dnn_detect.c | 65 static int dnn_detect_post_proc_ov(AVFrame *frame, DNNData *output, AVFilterContext *filter_ctx) in dnn_detect_post_proc_ov() argument 77 sd = av_frame_get_side_data(frame, AV_FRAME_DATA_DETECTION_BBOXES); in dnn_detect_post_proc_ov() 92 av_log(filter_ctx, AV_LOG_VERBOSE, "nothing detected in this frame.\n"); in dnn_detect_post_proc_ov() 96 header = av_detection_bbox_create_side_data(frame, nb_bboxes); in dnn_detect_post_proc_ov() 119 bbox->x = (int)(x0 * frame->width); in dnn_detect_post_proc_ov() 120 bbox->w = (int)(x1 * frame->width) - bbox->x; in dnn_detect_post_proc_ov() 121 bbox->y = (int)(y0 * frame->height); in dnn_detect_post_proc_ov() 122 bbox->h = (int)(y1 * frame->height) - bbox->y; in dnn_detect_post_proc_ov() 142 static int dnn_detect_post_proc_tf(AVFrame *frame, DNNData *output, AVFilterContext *filter_ctx) in dnn_detect_post_proc_tf() argument 158 sd = av_frame_get_side_data(frame, AV_FRAME_DATA_DETECTION_BBOXE in dnn_detect_post_proc_tf() 217 dnn_detect_post_proc(AVFrame *frame, DNNData *output, uint32_t nb, AVFilterContext *filter_ctx) dnn_detect_post_proc() argument [all...] |
H A D | f_segment.c | 164 static int current_segment_finished(AVFilterContext *ctx, AVFrame *frame) in current_segment_finished() argument 171 ret = frame->pts >= s->points[s->current_point]; in current_segment_finished() 178 ret = inlink->sample_count_out - frame->nb_samples >= s->points[s->current_point]; in current_segment_finished() 190 AVFrame *frame = NULL; in activate() local 202 ret = ff_inlink_consume_frame(inlink, &frame); in activate() 220 ret = ff_inlink_consume_frame(inlink, &frame); in activate() 222 ret = ff_inlink_consume_samples(inlink, 1, max_samples, &frame); in activate() 229 s->last_pts = frame->pts; in activate() 230 while (current_segment_finished(ctx, frame)) { in activate() 231 ff_outlink_set_status(ctx->outputs[s->current_point], AVERROR_EOF, frame in activate() [all...] |
H A D | vf_find_rect.c | 65 AVFrame *frame = av_frame_alloc(); in downscale() local 67 if (!frame) in downscale() 70 frame->format = in->format; in downscale() 71 frame->width = (in->width + 1) / 2; in downscale() 72 frame->height = (in->height+ 1) / 2; in downscale() 74 if (av_frame_get_buffer(frame, 0) < 0) { in downscale() 75 av_frame_free(&frame); in downscale() 79 dst = frame->data[0]; in downscale() 81 for(y = 0; y < frame->height; y++) { in downscale() 82 for(x = 0; x < frame in downscale() [all...] |
H A D | fifo.c | 34 AVFrame *frame; member 40 Buf *last; ///< last buffered frame 65 av_frame_free(&buf->frame); in uninit() 72 static int add_to_queue(AVFilterLink *inlink, AVFrame *frame) in add_to_queue() argument 78 av_frame_free(&frame); in add_to_queue() 83 s->last->frame = frame; in add_to_queue() 108 ret = ff_filter_frame(outlink, s->root.next->frame); in request_frame()
|
H A D | vf_bbox.c | 76 static int filter_frame(AVFilterLink *inlink, AVFrame *frame) in filter_frame() argument 85 frame->data[0], frame->linesize[0], in filter_frame() 92 av_ts2str(frame->pts), av_ts2timestr(frame->pts, &inlink->time_base)); in filter_frame() 95 AVDictionary **metadata = &frame->metadata; in filter_frame() 113 return ff_filter_frame(inlink->dst->outputs[0], frame); in filter_frame() 148 .description = NULL_IF_CONFIG_SMALL("Compute bounding box for each frame."),
|
/third_party/ffmpeg/libavutil/ |
H A D | hwcontext_cuda.c | 174 static int cuda_get_buffer(AVHWFramesContext *ctx, AVFrame *frame) in cuda_get_buffer() argument 179 frame->buf[0] = av_buffer_pool_get(ctx->pool); in cuda_get_buffer() 180 if (!frame->buf[0]) in cuda_get_buffer() 183 res = av_image_fill_arrays(frame->data, frame->linesize, frame->buf[0]->data, in cuda_get_buffer() 191 frame->linesize[1] = frame->linesize[2] = frame->linesize[0] / 2; in cuda_get_buffer() 192 frame in cuda_get_buffer() [all...] |
/third_party/node/deps/undici/src/lib/websocket/ |
H A D | websocket.js | 19 const { WebsocketFrameSend } = require('./frame') 208 const frame = new WebsocketFrameSend() 216 frame.frameData = Buffer.allocUnsafe(2) 217 frame.frameData.writeUInt16BE(code, 0) 221 frame.frameData = Buffer.allocUnsafe(2 + reasonByteLength) 222 frame.frameData.writeUInt16BE(code, 0) 224 frame.frameData.write(reason, 2, 'utf-8') 226 frame.frameData = emptyBuffer 232 socket.write(frame.createFrame(opcodes.CLOSE), (err) => { 238 // Upon either sending or receiving a Close control frame, i [all...] |
/third_party/python/Lib/test/ |
H A D | test_sys_setprofile.py | 30 def callback(self, frame, event, arg): 34 self.add_event(event, frame) 36 def add_event(self, event, frame=None): 38 if frame is None: 39 frame = sys._getframe(1) 42 frameno = self.frames.index(frame) 45 self.frames.append(frame) 47 self.events.append((frameno, event, ident(frame))) 63 def callback(self, frame, event, arg): 65 self.dispatch[event](self, frame) [all...] |
/third_party/vk-gl-cts/modules/egl/ |
H A D | teglPartialUpdateTests.cpp | 196 void render (int width, int height, const Frame& frame) const; 246 void GLES2Renderer::render (int width, int height, const Frame& frame) const in render() 248 for (size_t drawNdx = 0; drawNdx < frame.draws.size(); drawNdx++) in render() 250 const ColoredRect& coloredRect = frame.draws[drawNdx].rect; in render() 252 if (frame.draws[drawNdx].drawType == PartialUpdateTest::DRAWTYPE_GLES2_RENDER) in render() 302 else if (frame.draws[drawNdx].drawType == PartialUpdateTest::DRAWTYPE_GLES2_CLEAR) in render() 320 void render (tcu::Surface* target, const Frame& frame) const; 330 void ReferenceRenderer::render (tcu::Surface* target, const Frame& frame) const in render() 332 for (size_t drawNdx = 0; drawNdx < frame.draws.size(); drawNdx++) in render() 334 const ColoredRect& coloredRect = frame in render() 476 getDamageRegion(const Frame& frame, int marginLeft, int marginBottom, int marginRight, int marginTop) getDamageRegion() argument [all...] |
/third_party/ffmpeg/libavformat/ |
H A D | vc1dec.c | 29 int seq = 0, entry = 0, invalid = 0, frame = 0, i; in vc1_probe() local 73 frame++; in vc1_probe() 79 if (frame > 1 && frame >> 1 > invalid) in vc1_probe() 81 if (frame >= 1) in vc1_probe()
|
/third_party/ffmpeg/libavcodec/ |
H A D | mvha.c | 151 static int decode_frame(AVCodecContext *avctx, AVFrame *frame, in decode_frame() argument 167 if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) in decode_frame() 183 zstream->next_out = frame->data[p] + (avctx->height - y - 1) * frame->linesize[p]; in decode_frame() 234 ptrdiff_t stride = frame->linesize[p]; in decode_frame() 237 dst = frame->data[p] + (avctx->height - 1) * frame->linesize[p]; in decode_frame() 259 ptrdiff_t stride = frame->linesize[p]; in decode_frame() 262 dst = frame->data[p] + (avctx->height - 1) * frame in decode_frame() [all...] |
H A D | msvideo1.c | 51 AVFrame *frame; member 81 s->frame = av_frame_alloc(); in msvideo1_decode_init() 82 if (!s->frame) in msvideo1_decode_init() 104 unsigned char *pixels = s->frame->data[0]; in msvideo1_decode_8bit() 105 int stride = s->frame->linesize[0]; in msvideo1_decode_8bit() 185 memcpy(s->frame->data[1], s->pal, AVPALETTE_SIZE); in msvideo1_decode_8bit() 204 unsigned short *pixels = (unsigned short *)s->frame->data[0]; in msvideo1_decode_16bit() 205 int stride = s->frame->linesize[0] / 2; in msvideo1_decode_16bit() 308 // Discard frame if its smaller than the minimum frame siz in msvideo1_decode_frame() [all...] |
H A D | s302menc.c | 73 const AVFrame *frame, int *got_packet_ptr) in s302m_encode2_frame() 78 (frame->nb_samples * nb_channels * in s302m_encode2_frame() 85 av_log(avctx, AV_LOG_ERROR, "number of samples in frame too big\n"); in s302m_encode2_frame() 103 const uint32_t *samples = (uint32_t *)frame->data[0]; in s302m_encode2_frame() 105 for (c = 0; c < frame->nb_samples; c++) { in s302m_encode2_frame() 125 const uint32_t *samples = (uint32_t *)frame->data[0]; in s302m_encode2_frame() 127 for (c = 0; c < frame->nb_samples; c++) { in s302m_encode2_frame() 146 const uint16_t *samples = (uint16_t *)frame->data[0]; in s302m_encode2_frame() 148 for (c = 0; c < frame->nb_samples; c++) { in s302m_encode2_frame() 72 s302m_encode2_frame(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr) s302m_encode2_frame() argument
|
H A D | 8bps.c | 57 static int decode_frame(AVCodecContext *avctx, AVFrame *frame, in decode_frame() argument 77 if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) in decode_frame() 93 pixptr = frame->data[0] + row * frame->linesize[0] + planemap[p]; in decode_frame() 94 pixptr_end = pixptr + frame->linesize[0]; in decode_frame() 129 frame->palette_has_changed = ff_copy_palette(c->pal, avpkt, avctx); in decode_frame() 131 memcpy (frame->data[1], c->pal, AVPALETTE_SIZE); in decode_frame()
|
H A D | qsv.c | 227 int ff_qsv_map_frame_to_surface(const AVFrame *frame, mfxFrameSurface1 *surface) in ff_qsv_map_frame_to_surface() argument 229 switch (frame->format) { in ff_qsv_map_frame_to_surface() 232 surface->Data.Y = frame->data[0]; in ff_qsv_map_frame_to_surface() 233 surface->Data.UV = frame->data[1]; in ff_qsv_map_frame_to_surface() 239 surface->Data.B = frame->data[0]; in ff_qsv_map_frame_to_surface() 240 surface->Data.G = frame->data[0] + 1; in ff_qsv_map_frame_to_surface() 241 surface->Data.R = frame->data[0] + 2; in ff_qsv_map_frame_to_surface() 242 surface->Data.A = frame->data[0] + 3; in ff_qsv_map_frame_to_surface() 245 surface->Data.Y = frame->data[0]; in ff_qsv_map_frame_to_surface() 246 surface->Data.U = frame in ff_qsv_map_frame_to_surface() 263 ff_qsv_find_surface_idx(QSVFramesContext *ctx, QSVFrame *frame) ff_qsv_find_surface_idx() argument 863 ff_qsv_frame_add_ext_param(AVCodecContext *avctx, QSVFrame *frame, mfxExtBuffer * param) ff_qsv_frame_add_ext_param() argument [all...] |
H A D | vqavideo.c | 51 * transporting an entire codebook every 8th frame, the new codebook is 54 * codebook. A full codebook is also sent on the very first frame of a 104 AVFrame *frame; member 110 int width; /* width of a frame */ 111 int height; /* height of a frame */ 181 s->frame = av_frame_alloc(); in vqa_decode_init() 182 if (!s->frame) in vqa_decode_init() 263 /* 0x80 means that frame is finished */ in decode_format80() 330 * important for decoding frame maps since each vector needs to have a in decode_format80() 343 static int vqa_decode_frame_pal8(VqaContext *s, AVFrame *frame) in vqa_decode_frame_pal8() argument 626 vqa_decode_frame_hicolor(VqaContext *s, AVFrame *frame) vqa_decode_frame_hicolor() argument [all...] |
H A D | opusenc.c | 54 CeltFrame *frame; member 94 if (s->frame[0].framebits == s->frame[1].framebits) { /* same size */ in opus_gen_toc() 98 *fsize_needed = 1; /* put frame sizes in the packet */ in opus_gen_toc() 109 *fsize_needed |= (s->frame[i].framebits != s->frame[i + 1].framebits); in opus_gen_toc() 152 /* Last frame isn't popped off and freed yet - we need it for overlap */ in celt_frame_setup_input() 503 s->frame[i].framebits >> 3); in opus_packet_assembler() 510 s->frame[i].framebits >> 3); in opus_packet_assembler() 511 offset += s->frame[ in opus_packet_assembler() 542 opus_encode_frame(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr) opus_encode_frame() argument [all...] |
/third_party/node/deps/v8/third_party/wasm-api/example/ |
H A D | trap.cc | 20 void print_frame(const wasm::Frame* frame) { in print_frame() argument 21 std::cout << "> " << frame->instance(); in print_frame() 22 std::cout << " @ 0x" << std::hex << frame->module_offset(); in print_frame() 23 std::cout << " = " << frame->func_index(); in print_frame() 24 std::cout << ".0x" << std::hex << frame->func_offset() << std::endl; in print_frame() 98 auto frame = trap->origin(); in run() local 99 if (frame) { in run() 100 print_frame(frame.get()); in run()
|
/third_party/nghttp2/fuzz/ |
H A D | fuzz_frames.cc | 38 nghttp2_headers frame, oframe; in check_frame_pack_headers() local 65 &frame, NGHTTP2_FLAG_END_STREAM | NGHTTP2_FLAG_END_HEADERS, 1000000007, in check_frame_pack_headers() 69 rv = nghttp2_frame_pack_headers(&bufs, &frame, &deflater); in check_frame_pack_headers() 90 nghttp2_frame_headers_free(&frame, mem); in check_frame_pack_headers() 98 nghttp2_push_promise frame, oframe; in check_frame_push_promise() local 125 nghttp2_frame_push_promise_init(&frame, NGHTTP2_FLAG_END_HEADERS, 1000000007, in check_frame_push_promise() 128 rv = nghttp2_frame_pack_push_promise(&bufs, &frame, &deflater); in check_frame_push_promise() 147 nghttp2_frame_push_promise_free(&frame, mem); in check_frame_push_promise()
|
/third_party/skia/docs/examples/ |
H A D | SkPath_cubicTo_example_parametric_animated.cpp | 47 SkPoint ab = interpolate(a, b, frame); in REG_FIDDLE_ANIMATED() 48 SkPoint bc = interpolate(b, c, frame); in REG_FIDDLE_ANIMATED() 49 SkPoint cd = interpolate(c, d, frame); in REG_FIDDLE_ANIMATED() 50 SkPoint abc = interpolate(ab, bc, frame); in REG_FIDDLE_ANIMATED() 51 SkPoint bcd = interpolate(bc, cd, frame); in REG_FIDDLE_ANIMATED() 84 SkString msg = SkStringPrintf("%.4f", frame); in REG_FIDDLE_ANIMATED() 94 canvas->drawPoint(interpolate(abc, bcd, frame), pointPaint); in REG_FIDDLE_ANIMATED() 98 canvas->drawPoint(cubic(a, b, c, d, frame), pointPaint); in REG_FIDDLE_ANIMATED()
|
/third_party/mesa3d/src/util/ |
H A D | u_debug_stack_android.cpp | 73 const backtrace_frame_data_t* frame = bt->GetFrame(i); in debug_backtrace_capture() local 74 if (frame) { in debug_backtrace_capture() 75 backtrace[i].procname = intern_symbol(frame->func_name.c_str()); in debug_backtrace_capture() 76 backtrace[i].start_ip = frame->pc; in debug_backtrace_capture() 77 backtrace[i].off = frame->func_offset; in debug_backtrace_capture() 78 backtrace[i].map = intern_symbol(frame->map.Name().c_str()); in debug_backtrace_capture() 79 backtrace[i].map_off = frame->rel_pc; in debug_backtrace_capture()
|
/third_party/nghttp2/src/ |
H A D | shrpx_http2_upstream.cc | 103 // frame chunk. in on_stream_close_callback() 162 int on_header_callback2(nghttp2_session *session, const nghttp2_frame *frame, in on_header_callback2() argument 170 verbose_on_header_callback(session, frame, namebuf.base, namebuf.len, in on_header_callback2() 173 if (frame->hd.type != NGHTTP2_HEADERS) { in on_header_callback2() 178 nghttp2_session_get_stream_user_data(session, frame->hd.stream_id)); in on_header_callback2() 201 if (frame->headers.cat == NGHTTP2_HCAT_HEADERS) { in on_header_callback2() 218 if (frame->headers.cat == NGHTTP2_HCAT_HEADERS) { in on_header_callback2() 235 const nghttp2_frame *frame, nghttp2_rcbuf *name, in on_invalid_header_callback2() 240 nghttp2_session_get_stream_user_data(session, frame->hd.stream_id)); in on_invalid_header_callback2() 250 << frame in on_invalid_header_callback2() 234 on_invalid_header_callback2(nghttp2_session *session, const nghttp2_frame *frame, nghttp2_rcbuf *name, nghttp2_rcbuf *value, uint8_t flags, void *user_data) on_invalid_header_callback2() argument 262 on_begin_headers_callback(nghttp2_session *session, const nghttp2_frame *frame, void *user_data) on_begin_headers_callback() argument 280 on_start_request(const nghttp2_frame *frame) on_start_request() argument 308 on_request_headers(Downstream *downstream, const nghttp2_frame *frame) on_request_headers() argument 546 on_frame_recv_callback(nghttp2_session *session, const nghttp2_frame *frame, void *user_data) on_frame_recv_callback() argument 663 on_frame_send_callback(nghttp2_session *session, const nghttp2_frame *frame, void *user_data) on_frame_send_callback() argument 809 on_frame_not_send_callback(nghttp2_session *session, const nghttp2_frame *frame, int lib_error_code, void *user_data) on_frame_not_send_callback() argument 838 send_data_callback(nghttp2_session *session, nghttp2_frame *frame, const uint8_t *framehd, size_t length, nghttp2_data_source *source, void *user_data) send_data_callback() argument [all...] |
/third_party/python/Tools/gdb/ |
H A D | libpython.py | 16 that we can emit useful visualizations e.g. a string, a list, a dict, a frame 92 FRAME_INFO_OPTIMIZED_OUT = '(frame information optimized out)' 93 UNABLE_READ_INFO_PYTHON_FRAME = 'Unable to read information on python frame' 346 'frame': PyFrameObjectPtr, 946 the local variables of this frame 955 the global variables of this frame 1033 the local variables of this frame 1080 the global variables of this frame 1150 return '(failed to get frame line number)' 1665 '''If supported, select this frame an [all...] |
/third_party/vk-gl-cts/external/vulkan-docs/src/config/chunkindex/ |
H A D | lunr.js | 1447 var frame = stack.pop() 1450 if (frame.str.length > 0) { 1451 var char = frame.str.charAt(0), 1454 if (char in frame.node.edges) { 1455 noEditNode = frame.node.edges[char] 1458 frame.node.edges[char] = noEditNode 1461 if (frame.str.length == 1) { 1467 editsRemaining: frame.editsRemaining, 1468 str: frame.str.slice(1) 1472 if (frame [all...] |