/third_party/node/deps/v8/src/compiler/ |
H A D | osr.cc | 8 #include "src/compiler/frame.h" 23 void OsrHelper::SetupFrame(Frame* frame) { in SetupFrame() argument 24 // The optimized frame will subsume the unoptimized frame. Do so by reserving in SetupFrame() 26 frame->ReserveSpillSlots(UnoptimizedFrameSlots()); in SetupFrame()
|
/third_party/skia/third_party/externals/oboe/src/flowgraph/resampler/ |
H A D | PolyphaseResamplerMono.h | 32 void writeFrame(const float *frame) override; 34 void readFrame(float *frame) override;
|
H A D | PolyphaseResamplerStereo.h | 32 void writeFrame(const float *frame) override; 34 void readFrame(float *frame) override;
|
H A D | SincResamplerStereo.h | 32 void writeFrame(const float *frame) override; 34 void readFrame(float *frame) override;
|
H A D | LinearResampler.h | 34 void writeFrame(const float *frame) override; 36 void readFrame(float *frame) override;
|
/third_party/ffmpeg/libavcodec/ |
H A D | adpcmenc.c | 600 const AVFrame *frame, int *got_packet_ptr) in adpcm_encode_frame() 609 samples = (const int16_t *)frame->data[0]; in adpcm_encode_frame() 610 samples_p = (int16_t **)frame->extended_data; in adpcm_encode_frame() 617 pkt_size = (frame->nb_samples * channels) / 2; in adpcm_encode_frame() 626 int blocks = (frame->nb_samples - 1) / 8; in adpcm_encode_frame() 704 for (int i = 0; i < frame->nb_samples; i++) { in adpcm_encode_frame() 718 for (int n = frame->nb_samples / 2; n > 0; n--) { in adpcm_encode_frame() 729 const int n = frame->nb_samples - 1; in adpcm_encode_frame() 762 for (int i = 1; i < frame->nb_samples; i++) { in adpcm_encode_frame() 822 int n = frame in adpcm_encode_frame() 599 adpcm_encode_frame(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, int *got_packet_ptr) adpcm_encode_frame() argument [all...] |
H A D | encode.h | 24 #include "libavutil/frame.h" 30 * Called by encoders to get the next frame for encoding. 32 * @param frame An empty frame to be filled with data. 33 * @return 0 if a new reference has been successfully written to frame 38 int ff_encode_get_frame(AVCodecContext *avctx, AVFrame *frame); 48 * Allocate buffers for a frame. Encoder equivalent to ff_get_buffer(). 50 int ff_encode_alloc_frame(AVCodecContext *avctx, AVFrame *frame);
|
H A D | dvenc.c | 872 y_ptr = s->frame->data[0] + (mb_y * s->frame->linesize[0] + mb_x) * 8; in dv_encode_video_segment() 873 linesize = s->frame->linesize[0]; in dv_encode_video_segment() 886 y_stride = s->frame->linesize[0] * (1 << (3*!enc_blk->dct_mode)); in dv_encode_video_segment() 890 y_ptr = s->frame->data[0] + in dv_encode_video_segment() 891 (mb_y * s->frame->linesize[0] + mb_x) * 8; in dv_encode_video_segment() 892 linesize = s->frame->linesize[0]; in dv_encode_video_segment() 910 c_offset = ((mb_y >> (s->sys->pix_fmt == AV_PIX_FMT_YUV420P)) * s->frame->linesize[1] + in dv_encode_video_segment() 913 uint8_t *c_ptr = s->frame->data[j] + c_offset; in dv_encode_video_segment() 914 linesize = s->frame in dv_encode_video_segment() 1170 dvvideo_encode_frame(AVCodecContext *c, AVPacket *pkt, const AVFrame *frame, int *got_packet) dvvideo_encode_frame() argument [all...] |
H A D | dxva2.c | 753 static void *get_surface(const AVCodecContext *avctx, const AVFrame *frame) in get_surface() argument 756 if (frame->format == AV_PIX_FMT_D3D11) { in get_surface() 758 intptr_t index = (intptr_t)frame->data[1]; in get_surface() 760 sctx->d3d11_texture != (ID3D11Texture2D *)frame->data[0]) { in get_surface() 761 av_log((void *)avctx, AV_LOG_ERROR, "get_buffer frame is invalid!\n"); in get_surface() 767 return frame->data[3]; in get_surface() 772 const AVFrame *frame) in ff_dxva2_get_surface_index() 774 void *surface = get_surface(avctx, frame); in ff_dxva2_get_surface_index() 779 return (intptr_t)frame->data[1]; in ff_dxva2_get_surface_index() 870 static int frame_add_buf(AVFrame *frame, AVBufferRe argument 770 ff_dxva2_get_surface_index(const AVCodecContext *avctx, const AVDXVAContext *ctx, const AVFrame *frame) ff_dxva2_get_surface_index() argument 886 ff_dxva2_common_end_frame(AVCodecContext *avctx, AVFrame *frame, const void *pp, unsigned pp_size, const void *qm, unsigned qm_size, int (*commit_bs_si)(AVCodecContext *, DECODER_BUFFER_DESC *bs, DECODER_BUFFER_DESC *slice)) ff_dxva2_common_end_frame() argument [all...] |
H A D | wmaprodec.c | 39 * a frame. 51 * || frame 0 || frame 1 || frame 2 || frames 58 * The frame layouts for the individual channels of a wma frame does not need 61 * However, if the offsets and lengths of several subframes of a frame are the 76 * Scale factors are submitted for every frame but they might be shared 80 * Every subframe length and offset combination in the frame layout shares a 113 #define MAX_FRAMESIZE 32768 ///< maximum compressed frame siz 1440 decode_frame(WMAProDecodeCtx *s, AVFrame *frame, int *got_frame_ptr) decode_frame() argument 1612 decode_packet(AVCodecContext *avctx, WMAProDecodeCtx *s, AVFrame *frame, int *got_frame_ptr, AVPacket *avpkt) decode_packet() argument 1812 wmapro_decode_packet(AVCodecContext *avctx, AVFrame *frame, int *got_frame_ptr, AVPacket *avpkt) wmapro_decode_packet() argument 1828 xma_decode_packet(AVCodecContext *avctx, AVFrame *frame, int *got_frame_ptr, AVPacket *avpkt) xma_decode_packet() argument [all...] |
/third_party/vk-gl-cts/external/vulkan-docs/src/scripts/spec_tools/ |
H A D | macro_checker_file.py | 1286 replacement=None, fix=None, see_also=None, frame=None): 1289 Wrapper around self.diag() that automatically sets severity as well as frame. 1304 frame -- The 'inspect' stack frame corresponding to the location that raised this message. 1307 if not frame: 1308 frame = currentframe().f_back 1310 replacement=replacement, context=context, fix=fix, see_also=see_also, frame=frame) 1313 context=None, fix=None, see_also=None, frame=None): 1316 Wrapper around self.diag() that automatically sets severity as well as frame [all...] |
/third_party/elfutils/libdw/ |
H A D | dwarf_cfi_addrframe.c | 1 /* Compute frame state at PC. 36 dwarf_cfi_addrframe (Dwarf_CFI *cache, Dwarf_Addr address, Dwarf_Frame **frame) in dwarf_cfi_addrframe() argument 46 int error = __libdw_frame_at_address (cache, fde, address, frame); in dwarf_cfi_addrframe()
|
/third_party/ffmpeg/libavutil/ |
H A D | film_grain_params.c | 31 AVFilmGrainParams *av_film_grain_params_create_side_data(AVFrame *frame) in av_film_grain_params_create_side_data() argument 33 AVFrameSideData *side_data = av_frame_new_side_data(frame, in av_film_grain_params_create_side_data()
|
H A D | mastering_display_metadata.h | 24 #include "frame.h" 81 * Allocate a complete AVMasteringDisplayMetadata and add it to the frame. 83 * @param frame The frame which side data is added to. 87 AVMasteringDisplayMetadata *av_mastering_display_metadata_create_side_data(AVFrame *frame); 105 * Max average light level per frame (cd/m^2). 120 * Allocate a complete AVContentLightMetadata and add it to the frame. 122 * @param frame The frame which side data is added to. 126 AVContentLightMetadata *av_content_light_metadata_create_side_data(AVFrame *frame); [all...] |
H A D | hdr_dynamic_vivid_metadata.c | 36 AVDynamicHDRVivid *av_dynamic_hdr_vivid_create_side_data(AVFrame *frame) in av_dynamic_hdr_vivid_create_side_data() argument 38 AVFrameSideData *side_data = av_frame_new_side_data(frame, in av_dynamic_hdr_vivid_create_side_data()
|
H A D | hdr_dynamic_metadata.c | 36 AVDynamicHDRPlus *av_dynamic_hdr_plus_create_side_data(AVFrame *frame) in av_dynamic_hdr_plus_create_side_data() argument 38 AVFrameSideData *side_data = av_frame_new_side_data(frame, in av_dynamic_hdr_plus_create_side_data()
|
/third_party/ffmpeg/tools/ |
H A D | decode_simple.h | 30 #include "libavutil/frame.h" 39 AVFrame *frame; member 41 int (*process_frame)(struct DecodeContext *dc, AVFrame *frame);
|
/third_party/skia/docs/examples/ |
H A D | software_bitmap_w_perspective.cpp | 9 const SkPoint src[] = {{ 0, 0}, {width, 0}, {width * (float)frame, height * (float)frame}, { 0, height}}; in REG_FIDDLE_ANIMATED()
|
/drivers/peripheral/audio/test/benchmarktest/capture/ |
H A D | audio_capture_benchmarktest.cpp | 209 int8_t *frame = (int8_t *)calloc(1, frameLen);
in BENCHMARK_F() local 210 EXPECT_NE(nullptr, frame);
in BENCHMARK_F() 213 ret = capture_->CaptureFrame(capture_, frame, &frameLen, &requestBytes);
in BENCHMARK_F() 218 if (frame != nullptr) {
in BENCHMARK_F() 219 free(frame);
in BENCHMARK_F() 220 frame = nullptr;
in BENCHMARK_F() 238 int8_t *frame = (int8_t *)calloc(1, frameLen);
in BENCHMARK_F() local 239 EXPECT_NE(nullptr, frame);
in BENCHMARK_F() 241 ret = capture_->CaptureFrame(capture_, frame, &frameLen, &requestBytes);
in BENCHMARK_F() 250 if (frame ! in BENCHMARK_F() [all...] |
/test/xts/hats/hdf/audio/idl/benchmarktest/capture/ |
H A D | audio_capture_benchmarktest.cpp | 210 int8_t *frame = (int8_t *)calloc(1, frameLen);
in BENCHMARK_F() local 211 EXPECT_NE(nullptr, frame);
in BENCHMARK_F() 214 ret = capture_->CaptureFrame(capture_, frame, &frameLen, &requestBytes);
in BENCHMARK_F() 220 if (frame != nullptr) {
in BENCHMARK_F() 221 free(frame);
in BENCHMARK_F() 222 frame = nullptr;
in BENCHMARK_F() 240 int8_t *frame = (int8_t *)calloc(1, frameLen);
in BENCHMARK_F() local 241 EXPECT_NE(nullptr, frame);
in BENCHMARK_F() 243 ret = capture_->CaptureFrame(capture_, frame, &frameLen, &requestBytes);
in BENCHMARK_F() 253 if (frame ! in BENCHMARK_F() [all...] |
/third_party/node/src/ |
H A D | inspector_socket.cc | 232 // Constants for hybi-10 frame format. 257 std::vector<char> frame; in encode_frame_hybi17() local 259 frame.push_back(kFinalBit | op_code); in encode_frame_hybi17() 262 frame.push_back(static_cast<char>(data_length)); in encode_frame_hybi17() 264 frame.push_back(kTwoBytePayloadLengthField); in encode_frame_hybi17() 265 frame.push_back((data_length & 0xFF00) >> 8); in encode_frame_hybi17() 266 frame.push_back(data_length & 0xFF); in encode_frame_hybi17() 268 frame.push_back(kEightBytePayloadLengthField); in encode_frame_hybi17() 276 frame.insert(frame in encode_frame_hybi17() [all...] |
/third_party/littlefs/scripts/ |
H A D | stack.py | 102 'file', 'function', 'frame', 'limit', 'children'])): 104 _fields = ['frame', 'limit'] 105 _sort = ['limit', 'frame'] 106 _types = {'frame': Int, 'limit': Int} 110 frame=0, limit=0, children=set()): 112 Int(frame), Int(limit), 117 self.frame + other.frame, 198 for source, (s_file, s_function, frame, targets) in callgraph.items(): 223 callgraph_[source] = (s_file, s_function, frame, target [all...] |
/third_party/vk-gl-cts/modules/egl/ |
H A D | teglBufferAgeTests.cpp | 208 void render (int width, int height, const Frame& frame) const; 258 void GLES2Renderer::render (int width, int height, const Frame& frame) const in render() 260 for (size_t drawNdx = 0; drawNdx < frame.draws.size(); drawNdx++) in render() 262 const ColoredRect& coloredRect = frame.draws[drawNdx].rect; in render() 263 if (frame.draws[drawNdx].drawType == BufferAgeTest::DRAWTYPE_GLES2_RENDER) in render() 313 else if (frame.draws[drawNdx].drawType == BufferAgeTest::DRAWTYPE_GLES2_CLEAR) in render() 331 void render (tcu::Surface* target, const Frame& frame) const; 341 void ReferenceRenderer::render (tcu::Surface* target, const Frame& frame) const in render() 343 for (size_t drawNdx = 0; drawNdx < frame.draws.size(); drawNdx++) in render() 345 const ColoredRect& coloredRect = frame in render() [all...] |
/third_party/NuttX/drivers/usbdev/gadget/ |
H A D | usbd_video.c | 507 /* get the first frame of video data */ in uvc_video_tran_nocp() 710 /* determine whether reached the frame end */ in uvc_nocp_continue() 717 /* get the next frame of video data */ in uvc_nocp_continue() 753 /* increment frame offset */ in uvc_nocp_continue() 770 struct uvc_transfer_data *frame; in uvc_copy_continue() local 778 frame = &(uvc->tdata); in uvc_copy_continue() 779 frame->data = tran->data; in uvc_copy_continue() 780 frame->length = tran->length; in uvc_copy_continue() 781 frame->last = 0; in uvc_copy_continue() 784 ret = copy(uvc, frame, uv in uvc_copy_continue() [all...] |
/third_party/skia/third_party/externals/libwebp/imageio/ |
H A D | image_enc.c | 82 IWICBitmapFrameEncode* frame = NULL; in WriteUsingWIC() local 106 IFS(IWICBitmapEncoder_CreateNewFrame(encoder, &frame, NULL)); in WriteUsingWIC() 107 IFS(IWICBitmapFrameEncode_Initialize(frame, NULL)); in WriteUsingWIC() 108 IFS(IWICBitmapFrameEncode_SetSize(frame, width, height)); in WriteUsingWIC() 109 IFS(IWICBitmapFrameEncode_SetPixelFormat(frame, &pixel_format)); in WriteUsingWIC() 110 IFS(IWICBitmapFrameEncode_WritePixels(frame, height, stride, in WriteUsingWIC() 112 IFS(IWICBitmapFrameEncode_Commit(frame)); in WriteUsingWIC() 137 if (frame != NULL) IUnknown_Release(frame); in WriteUsingWIC()
|