Home
last modified time | relevance | path

Searched refs:frame (Results 176 - 200 of 1891) sorted by relevance

12345678910>>...76

/third_party/ffmpeg/libavcodec/
H A Dflicvideo.c74 AVFrame *frame; member
142 s->frame = av_frame_alloc(); in flic_decode_init()
143 if (!s->frame) in flic_decode_init()
189 if ((ret = ff_reget_buffer(avctx, s->frame, 0)) < 0) in flic_decode_frame_8BPP()
192 pixels = s->frame->data[0]; in flic_decode_frame_8BPP()
193 pixel_limit = s->avctx->height * s->frame->linesize[0]; in flic_decode_frame_8BPP()
284 y_ptr += line_packets * s->frame->linesize[0]; in flic_decode_frame_8BPP()
289 pixel_ptr= y_ptr + s->frame->linesize[0] - 1; in flic_decode_frame_8BPP()
324 y_ptr += s->frame->linesize[0]; in flic_decode_frame_8BPP()
335 y_ptr += starting_line * s->frame in flic_decode_frame_8BPP()
1072 flic_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *avpkt) flic_decode_frame() argument
[all...]
H A Dlibdav1d.c259 av_log(c, AV_LOG_DEBUG, "Using %d frame threads, %d tile threads\n", in libdav1d_init()
293 static int libdav1d_receive_frame(AVCodecContext *c, AVFrame *frame) in libdav1d_receive_frame() argument
370 frame->buf[0] = av_buffer_ref(p->allocator_data); in libdav1d_receive_frame()
371 if (!frame->buf[0]) { in libdav1d_receive_frame()
376 frame->data[0] = p->data[0]; in libdav1d_receive_frame()
377 frame->data[1] = p->data[1]; in libdav1d_receive_frame()
378 frame->data[2] = p->data[2]; in libdav1d_receive_frame()
379 frame->linesize[0] = p->stride[0]; in libdav1d_receive_frame()
380 frame->linesize[1] = p->stride[1]; in libdav1d_receive_frame()
381 frame in libdav1d_receive_frame()
[all...]
H A Dtiertexseqv.c36 AVFrame *frame; member
97 dst += seq->frame->linesize[0]; in seq_decode_op1()
104 dst[b * seq->frame->linesize[0]] = block[i * 8 + b]; in seq_decode_op1()
121 dst += seq->frame->linesize[0]; in seq_decode_op1()
141 dst += seq->frame->linesize[0]; in seq_decode_op2()
158 offset = ((pos >> 3) & 7) * seq->frame->linesize[0] + (pos & 7); in seq_decode_op3()
177 palette = (uint32_t *)seq->frame->data[1]; in seqvideo_decode()
185 seq->frame->palette_has_changed = 1; in seqvideo_decode()
194 dst = &seq->frame->data[0][y * seq->frame in seqvideo_decode()
[all...]
H A Dlibkvazaar.c165 const AVFrame *frame, in libkvazaar_encode()
179 if (frame) { in libkvazaar_encode()
180 if (frame->width != ctx->config->width || in libkvazaar_encode()
181 frame->height != ctx->config->height) { in libkvazaar_encode()
186 frame->width, frame->height); in libkvazaar_encode()
191 if (frame->format != avctx->pix_fmt) { in libkvazaar_encode()
196 av_get_pix_fmt_name(frame->format)); in libkvazaar_encode()
202 input_pic = ctx->api->picture_alloc(frame->width, frame in libkvazaar_encode()
163 libkvazaar_encode(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr) libkvazaar_encode() argument
[all...]
H A Dqcelpdec.c46 I_F_Q = -1, /**< insufficient frame quality */
57 QCELPFrame frame; /**< unpacked data frame */ member
128 lspf[i] = (q->frame.lspv[i] ? QCELP_LSP_SPREAD_FACTOR in decode_lspf()
166 lspf[2 * i + 0] = tmp_lspf += qcelp_lspvq[i][q->frame.lspv[i]][0] * 0.0001; in decode_lspf()
167 lspf[2 * i + 1] = tmp_lspf += qcelp_lspvq[i][q->frame.lspv[i]][1] * 0.0001; in decode_lspf()
208 g1[i] = 4 * q->frame.cbgain[i]; in decode_gain_and_index()
215 if (q->frame.cbsign[i]) { in decode_gain_and_index()
217 q->frame.cindex[i] = (q->frame in decode_gain_and_index()
687 qcelp_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame_ptr, AVPacket *avpkt) qcelp_decode_frame() argument
[all...]
H A Dlcldec.c164 static int decode_frame(AVCodecContext *avctx, AVFrame *frame, in decode_frame() argument
183 if ((ret = ff_thread_get_buffer(avctx, frame, 0)) < 0) in decode_frame()
186 outptr = frame->data[0]; // Output image pointer in decode_frame()
188 /* Decompress frame */ in decode_frame()
257 av_log(avctx, AV_LOG_ERROR, "BUG! Unknown MSZH compression in frame decoder.\n"); in decode_frame()
264 * gives a file with ZLIB fourcc, but frame is really uncompressed. in decode_frame()
265 * To be sure that's true check also frame size */ in decode_frame()
294 av_log(avctx, AV_LOG_ERROR, "BUG! Unknown codec in frame decoder compression switch.\n"); in decode_frame()
384 y_out = frame->data[0] + (height - 1) * frame in decode_frame()
[all...]
H A Dexrenc.c222 static int encode_scanline_rle(EXRContext *s, const AVFrame *frame) in encode_scanline_rle() argument
226 for (int y = 0; y < frame->height; y++) { in encode_scanline_rle()
228 int64_t tmp_size = element_size * s->planes * frame->width; in encode_scanline_rle()
248 memcpy(scanline->uncompressed_data + frame->width * 4 * p, in encode_scanline_rle()
249 frame->data[ch] + y * frame->linesize[ch], frame->width * 4); in encode_scanline_rle()
255 uint16_t *dst = (uint16_t *)(scanline->uncompressed_data + frame->width * 2 * p); in encode_scanline_rle()
256 uint32_t *src = (uint32_t *)(frame->data[ch] + y * frame in encode_scanline_rle()
280 encode_scanline_zip(EXRContext *s, const AVFrame *frame) encode_scanline_zip() argument
351 encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *frame, int *got_packet) encode_frame() argument
[all...]
H A Dbmvvideo.c50 uint8_t *frame, frame_base[SCREEN_WIDE * (SCREEN_HIGH + 1)]; member
57 static int decode_bmv_frame(const uint8_t *source, int src_len, uint8_t *frame, int frame_off) in decode_bmv_frame() argument
62 uint8_t *frame_end = frame + SCREEN_WIDE * SCREEN_HIGH; in decode_bmv_frame()
76 dst = frame; in decode_bmv_frame()
81 dst_end = frame - 1; in decode_bmv_frame()
149 if (dst - frame + SCREEN_WIDE < frame_off || in decode_bmv_frame()
150 dst - frame + SCREEN_WIDE + frame_off < 0 || in decode_bmv_frame()
159 if (dst - frame + SCREEN_WIDE < frame_off || in decode_bmv_frame()
160 dst - frame + SCREEN_WIDE + frame_off < 0 || in decode_bmv_frame()
199 static int decode_frame(AVCodecContext *avctx, AVFrame *frame, in decode_frame() argument
[all...]
/foundation/multimedia/av_codec/test/unittest/reference_parser_test/reference_parser_demo/
H A Dreference_parser_demo.cpp46 void from_json(const nlohmann::json &j, JsonFrameLayerInfo &frame) in from_json() argument
48 j.at("frameId").get_to(frame.frameId); in from_json()
49 j.at("dts").get_to(frame.dts); in from_json()
50 j.at("layer").get_to(frame.layer); in from_json()
51 j.at("discardable").get_to(frame.discardable); in from_json()
125 JsonFrameLayerInfo frame = frameVec_[frameId + i]; in LoadJson() local
126 frameMap_.emplace(frame.dts, frame); in LoadJson()
127 cout << "FrameId " << frame.frameId << ", Layer " << frame in LoadJson()
139 JsonFrameLayerInfo frame = frameMap_[dts]; CheckFrameLayerResult() local
[all...]
/third_party/ffmpeg/libavfilter/
H A Daf_adelay.c318 static int filter_frame(AVFilterLink *inlink, AVFrame *frame) in filter_frame() argument
327 return ff_filter_frame(outlink, frame); in filter_frame()
329 out_frame = ff_get_audio_buffer(outlink, frame->nb_samples); in filter_frame()
331 av_frame_free(&frame); in filter_frame()
334 av_frame_copy_props(out_frame, frame); in filter_frame()
338 const uint8_t *src = frame->extended_data[i]; in filter_frame()
342 memcpy(dst, src, frame->nb_samples * s->block_align); in filter_frame()
344 s->delay_channel(d, frame->nb_samples, src, dst); in filter_frame()
348 s->next_pts += av_rescale_q(frame->nb_samples, (AVRational){1, outlink->sample_rate}, outlink->time_base); in filter_frame()
349 av_frame_free(&frame); in filter_frame()
358 AVFrame *frame = NULL; activate() local
[all...]
H A Dvf_cropdetect.c153 static int filter_frame(AVFilterLink *inlink, AVFrame *frame) in filter_frame() argument
165 metadata = &frame->metadata; in filter_frame()
169 s->x1 = frame->width - 1; in filter_frame()
170 s->y1 = frame->height - 1; in filter_frame()
179 if (checkline(ctx, frame->data[0] + STEP0 * y, STEP1, LEN, bpp) > limit) {\ in filter_frame()
188 FIND(s->y1, 0, y < s->y1, +1, frame->linesize[0], bpp, frame->width); in filter_frame()
189 FIND(s->y2, frame->height - 1, y > FFMAX(s->y2, s->y1), -1, frame->linesize[0], bpp, frame in filter_frame()
[all...]
H A Dvf_colorize.c51 AVFrame *frame = arg; in colorizey_slice8() local
56 const int ylinesize = frame->linesize[0]; in colorizey_slice8()
57 uint8_t *yptr = frame->data[0] + slice_start * ylinesize; in colorizey_slice8()
74 AVFrame *frame = arg; in colorizey_slice16() local
79 const int ylinesize = frame->linesize[0] / 2; in colorizey_slice16()
80 uint16_t *yptr = (uint16_t *)frame->data[0] + slice_start * ylinesize; in colorizey_slice16()
97 AVFrame *frame = arg; in colorize_slice8() local
102 const int ulinesize = frame->linesize[1]; in colorize_slice8()
103 const int vlinesize = frame->linesize[2]; in colorize_slice8()
104 uint8_t *uptr = frame in colorize_slice8()
125 AVFrame *frame = arg; colorize_slice16() local
197 filter_frame(AVFilterLink *inlink, AVFrame *frame) filter_frame() argument
[all...]
H A Dvf_vibrance.c57 AVFrame *frame = arg; in vibrance_slice8() local
58 const int width = frame->width; in vibrance_slice8()
59 const int height = frame->height; in vibrance_slice8()
74 const int glinesize = frame->linesize[0]; in vibrance_slice8()
75 const int blinesize = frame->linesize[1]; in vibrance_slice8()
76 const int rlinesize = frame->linesize[2]; in vibrance_slice8()
77 uint8_t *gptr = frame->data[0] + slice_start * glinesize; in vibrance_slice8()
78 uint8_t *bptr = frame->data[1] + slice_start * blinesize; in vibrance_slice8()
79 uint8_t *rptr = frame->data[2] + slice_start * rlinesize; in vibrance_slice8()
114 AVFrame *frame in vibrance_slice16() local
173 AVFrame *frame = arg; vibrance_slice8p() local
228 AVFrame *frame = arg; vibrance_slice16p() local
282 filter_frame(AVFilterLink *link, AVFrame *frame) filter_frame() argument
[all...]
H A Dsetpts.c43 "FRAME_RATE", ///< defined only for constant frame-rate video
44 "INTERLACED", ///< tell if the current frame is interlaced
45 "N", ///< frame / sample number (starting at zero)
47 "NB_SAMPLES", ///< number of samples in the current frame (only audio)
48 "POS", ///< original position in the file of the frame
53 "PTS", ///< original pts in the file of the frame
57 "T", ///< original time in the file of the frame
61 "S", // Number of samples in the current frame
63 "FR", ///< defined only for constant frame-rate video
156 static double eval_pts(SetPTSContext *setpts, AVFilterLink *inlink, AVFrame *frame, int64_ argument
180 filter_frame(AVFilterLink *inlink, AVFrame *frame) filter_frame() argument
[all...]
/third_party/ffmpeg/libavdevice/
H A Dlavfi.c361 static int create_subcc_packet(AVFormatContext *avctx, AVFrame *frame, in create_subcc_packet() argument
370 if (!(sd = av_frame_get_side_data(frame, AV_FRAME_DATA_A53_CC))) in create_subcc_packet()
376 lavfi->subcc_packet.pts = frame->pts; in create_subcc_packet()
377 lavfi->subcc_packet.pos = frame->pkt_pos; in create_subcc_packet()
386 AVFrame *frame = lavfi->decoded_frame; in lavfi_read_packet() local
407 ret = av_buffersink_get_frame_flags(lavfi->sinks[i], frame, in lavfi_read_packet()
415 d = av_rescale_q_rnd(frame->pts, tb, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX); in lavfi_read_packet()
417 av_frame_unref(frame); in lavfi_read_packet()
429 av_buffersink_get_frame_flags(lavfi->sinks[min_pts_sink_idx], frame, 0); in lavfi_read_packet()
434 size = av_image_get_buffer_size(frame in lavfi_read_packet()
[all...]
/third_party/python/Lib/idlelib/
H A Ddebugger_r.py19 barrier, in particular frame and traceback objects.
40 def wrap_frame(frame):
41 fid = id(frame)
42 frametable[fid] = frame
63 def interaction(self, message, frame, info=None):
65 # pass frame and traceback object IDs instead of the objects themselves
67 (message, wrap_frame(frame), wrap_info(info)),
87 frame = frametable[fid]
88 self.idb.set_next(frame)
91 frame
[all...]
/third_party/ffmpeg/tools/
H A Dtarget_dec_fuzzer.c84 static int subtitle_handler(AVCodecContext *avctx, void *frame, in subtitle_handler() argument
94 static int audio_video_handler(AVCodecContext *avctx, AVFrame *frame, in audio_video_handler() argument
97 int ret = avcodec_receive_frame(avctx, frame); in audio_video_handler()
107 static int fuzz_video_get_buffer(AVCodecContext *ctx, AVFrame *frame) in fuzz_video_get_buffer() argument
112 int i, ret, w = frame->width, h = frame->height; in fuzz_video_get_buffer()
115 ret = av_image_fill_linesizes(frame->linesize, ctx->pix_fmt, w); in fuzz_video_get_buffer()
119 for (i = 0; i < 4 && frame->linesize[i]; i++) in fuzz_video_get_buffer()
120 linesize1[i] = frame->linesize[i] = in fuzz_video_get_buffer()
121 FFALIGN(frame in fuzz_video_get_buffer()
147 fuzz_get_buffer2(AVCodecContext *ctx, AVFrame *frame, int flags) fuzz_get_buffer2() argument
439 AVFrame *frame = av_frame_alloc(); LLVMFuzzerTestOneInput() local
[all...]
/third_party/python/Include/cpython/
H A Dpyframe.h9 PyAPI_FUNC(PyFrameObject *) PyFrame_GetBack(PyFrameObject *frame); variable
10 PyAPI_FUNC(PyObject *) PyFrame_GetLocals(PyFrameObject *frame); variable
12 PyAPI_FUNC(PyObject *) PyFrame_GetGlobals(PyFrameObject *frame); variable
13 PyAPI_FUNC(PyObject *) PyFrame_GetBuiltins(PyFrameObject *frame); variable
15 PyAPI_FUNC(PyObject *) PyFrame_GetGenerator(PyFrameObject *frame); variable
16 PyAPI_FUNC(int) PyFrame_GetLasti(PyFrameObject *frame); variable
/third_party/libunwind/libunwind/doc/
H A Dunw_is_signal_frame.tex8 \begin{Name}{3}{unw\_is\_signal\_frame}{David Mosberger-Tang}{Programming Library}{unw\_is\_signal\_frame}unw\_is\_signal\_frame -- check if current frame is a signal frame
20 if the current frame identified by \Var{cp} is a signal frame, and a
22 frame is a frame that was created in response to a potentially
25 In a kernel-environment, a signal frame might, for example, correspond
26 to a frame created in response to a device interrupt.
35 positive value if the current frame is a signal frame, o
[all...]
/third_party/lame/libmp3lame/
H A Did3tag.c562 writeLoBytes(unsigned char *frame, unsigned short const *str, size_t n);
1024 return id3v2_add_ucs2_lng(gfp, frame_id, text, 0); /* iTunes expects WFED to be a text frame */ in id3tag_set_textinfo_utf16()
1073 return id3v2_add_latin1_lng(gfp, frame_id, text, 0); /* iTunes expects WFED to be a text frame */ in id3tag_set_textinfo_latin1()
1412 writeChars(unsigned char *frame, char const *str, size_t n) in writeChars() argument
1415 *frame++ = *str++; in writeChars()
1417 return frame; in writeChars()
1421 writeUcs2s(unsigned char *frame, unsigned short const *str, size_t n) in writeUcs2s() argument
1427 *frame++ = 0x00ffu & c; in writeUcs2s()
1428 *frame++ = 0x00ffu & (c >> 8); in writeUcs2s()
1431 return frame; in writeUcs2s()
1435 writeLoBytes(unsigned char *frame, unsigned short const *str, size_t n) writeLoBytes() argument
1456 set_frame_comment(unsigned char *frame, FrameDataNode const *node) set_frame_comment() argument
1493 set_frame_custom2(unsigned char *frame, FrameDataNode const *node) set_frame_custom2() argument
1526 set_frame_wxxx(unsigned char *frame, FrameDataNode const *node) set_frame_wxxx() argument
1559 set_frame_apic(unsigned char *frame, const char *mimetype, const unsigned char *data, size_t size) set_frame_apic() argument
[all...]
/foundation/CastEngine/castengine_wifi_display/services/protocol/rtp/src/
H A Drtp_codec_ts.cpp20 #include "frame/aac_frame.h"
21 #include "frame/h264_frame.h"
130 SHARING_LOGI("ignore av read frame."); in StartDecoding()
248 void RtpEncoderTs::InputFrame(const Frame::Ptr &frame) in InputFrame() argument
250 RETURN_IF_NULL(frame); in InputFrame()
256 switch (frame->GetCodecId()) { in InputFrame()
258 // merge sps, pps and key frame into one packet in InputFrame()
260 frame, buffer, [this](uint32_t dts, uint32_t pts, const DataBuffer::Ptr &buffer, bool have_key_frame) { in InputFrame()
269 SaveFrame(frame); in InputFrame()
272 SHARING_LOGW("Unknown codec: %d", frame in InputFrame()
340 auto frame = ReadFrame(packet); StartEncoding() local
350 SaveFrame(Frame::Ptr frame) SaveFrame() argument
372 Frame::Ptr frame = dataQueue_.front(); ReadFrame() local
[all...]
/third_party/nghttp2/tests/
H A Dnghttp2_test_helper.c35 int unpack_framebuf(nghttp2_frame *frame, nghttp2_bufs *bufs) { in unpack_framebuf() argument
41 return unpack_frame(frame, buf->pos, nghttp2_buf_len(buf)); in unpack_framebuf()
44 int unpack_frame(nghttp2_frame *frame, const uint8_t *in, size_t len) { in unpack_frame() argument
53 nghttp2_frame_unpack_frame_hd(&frame->hd, in); in unpack_frame()
54 switch (frame->hd.type) { in unpack_frame()
56 payloadoff = ((frame->hd.flags & NGHTTP2_FLAG_PADDED) > 0); in unpack_frame()
57 nghttp2_frame_unpack_headers_payload(&frame->headers, payload + payloadoff); in unpack_frame()
60 nghttp2_frame_unpack_priority_payload(&frame->priority, payload); in unpack_frame()
63 nghttp2_frame_unpack_rst_stream_payload(&frame->rst_stream, payload); in unpack_frame()
67 &frame in unpack_frame()
220 nghttp2_frame frame; pack_headers() local
239 nghttp2_frame frame; pack_push_promise() local
[all...]
/third_party/python/Lib/
H A Dprofile.py107 frame that is currently active (self.cur[-2]). The following are the
112 [-2:] intact (frame and previous tuple). In case an internal error is
115 [ 0] = Time that needs to be charged to the parent frame's function.
117 timing data for the parent frame.
118 [ 1] = Total time spent in this frame's function, excluding time in
121 frame's function (this latter is tallied in cur[1]).
122 [-3] = Name of the function that corresponds to this frame.
123 [-2] = Actual frame that we correspond to (used to sync exception handling).
124 [-1] = Our parent 6-tuple (corresponds to frame.f_back).
183 def trace_dispatch(self, frame, even
[all...]
/third_party/node/deps/v8/src/execution/
H A Dframes-inl.h9 #include "src/execution/frame-constants.h"
122 // The receiver is the first argument on the frame. in receiver_slot_object()
283 inline JavaScriptFrame* JavaScriptFrameIterator::frame() const { in frame() function in v8::internal::JavaScriptFrameIterator
284 StackFrame* frame = iterator_.frame(); in frame() local
285 return JavaScriptFrame::cast(frame); in frame()
289 StackFrame* frame = iterator_.Reframe(); in Reframe() local
290 return JavaScriptFrame::cast(frame); in Reframe()
293 inline CommonFrame* StackTraceFrameIterator::frame() const { in frame() function in v8::internal::StackTraceFrameIterator
294 StackFrame* frame in frame() local
320 inline StackFrame* SafeStackFrameIterator::frame() const { frame() function in v8::internal::SafeStackFrameIterator
[all...]
/foundation/CastEngine/castengine_wifi_display/tests/demo/player/
H A Dplayer_demo.cpp31 #include "frame.h"
93 void OnFrame(const Frame::Ptr &frame) override
95 player_->OnData((uint8_t *)frame->Data(), (size_t)frame->Size(), true);
96 SHARING_LOGD("recv decoded data(%p) len(%{public}d)", frame->Data(), frame->Size());
97 for (int i = 0; i < 12 && i < frame->Size(); i++) {
98 printf("%02x ", *(frame->Data() + i));
234 g711Unpack->SetOnRtpUnpack([=](uint32_t ssrc, const Frame::Ptr &frame) { in main()
236 if (frame in main()
[all...]

Completed in 16 milliseconds

12345678910>>...76