/third_party/skia/tests/ |
H A D | MultiPictureDocumentTest.cpp | 152 // Examine each frame. in DEF_TEST() 154 for (const auto& frame : frames) { in DEF_TEST() 155 SkRect bounds = frame.fPicture->cullRect(); in DEF_TEST() 162 surf->getCanvas()->drawPicture(frame.fPicture); in DEF_TEST() 345 // Record single frame in DEF_GPUTEST_FOR_RENDERING_CONTEXTS() 356 // Note that this only works because we're doing one frame only. If this test were recording in DEF_GPUTEST_FOR_RENDERING_CONTEXTS() 377 "Expected 1 frame, got %d. \n 0 frames may indicate the written file was not a " in DEF_GPUTEST_FOR_RENDERING_CONTEXTS() 386 // Examine frame. in DEF_GPUTEST_FOR_RENDERING_CONTEXTS()
|
/third_party/python/Lib/tkinter/ |
H A D | simpledialog.py | 48 self.frame = Frame(self.root) 49 self.frame.pack() 56 b = Button(self.frame, text=s,
|
/third_party/wpa_supplicant/wpa_supplicant-2.9/src/drivers/ |
H A D | wpa_hal.c | 1001 uint8_t *frame = NULL; in WifiWpaWpaSendEapol() local 1011 frame = os_zalloc(frameLen); in WifiWpaWpaSendEapol() 1012 if (frame == NULL) { in WifiWpaWpaSendEapol() 1016 l2_ethhdr = (struct l2_ethhdr *)frame; in WifiWpaWpaSendEapol() 1019 os_free(frame); in WifiWpaWpaSendEapol() 1020 frame = NULL; in WifiWpaWpaSendEapol() 1025 os_free(frame); in WifiWpaWpaSendEapol() 1026 frame = NULL; in WifiWpaWpaSendEapol() 1034 os_free(frame); in WifiWpaWpaSendEapol() 1035 frame in WifiWpaWpaSendEapol() [all...] |
/third_party/wpa_supplicant/wpa_supplicant-2.9_standard/src/drivers/ |
H A D | wpa_hal.c | 1004 uint8_t *frame = NULL; in WifiWpaWpaSendEapol() local 1014 frame = os_zalloc(frameLen); in WifiWpaWpaSendEapol() 1015 if (frame == NULL) { in WifiWpaWpaSendEapol() 1019 l2_ethhdr = (struct l2_ethhdr *)frame; in WifiWpaWpaSendEapol() 1022 os_free(frame); in WifiWpaWpaSendEapol() 1023 frame = NULL; in WifiWpaWpaSendEapol() 1028 os_free(frame); in WifiWpaWpaSendEapol() 1029 frame = NULL; in WifiWpaWpaSendEapol() 1037 os_free(frame); in WifiWpaWpaSendEapol() 1038 frame in WifiWpaWpaSendEapol() [all...] |
/third_party/ffmpeg/libavfilter/ |
H A D | vf_minterpolate.c | 205 { "fps", "output's frame rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "60"}, 0, INT_MAX, FLAGS }, 231 CONST("fdiff", "frame difference", SCD_METHOD_FDIFF, "scene"), 350 Frame *frame = &mi_ctx->frames[i]; in config_input() local 351 frame->blocks = av_calloc(mi_ctx->b_count, sizeof(*frame->blocks)); in config_input() 352 if (!frame->blocks) in config_input() 456 //left mb in current frame in search_mv() 460 //top mb in current frame in search_mv() 464 //top-right mb in current frame in search_mv() 483 //collocated mb in prev frame in search_mv() 944 Frame *frame = &mi_ctx->frames[pixel_refs->refs[i]]; set_frame_data() local 1224 Frame *frame = &mi_ctx->frames[i]; uninit() local [all...] |
H A D | vf_lut3d.c | 1330 static void update_clut_packed(LUT3DContext *lut3d, const AVFrame *frame) in update_clut_packed() argument 1332 const uint8_t *data = frame->data[0]; in update_clut_packed() 1333 const int linesize = frame->linesize[0]; in update_clut_packed() 1367 static void update_clut_planar(LUT3DContext *lut3d, const AVFrame *frame) in update_clut_planar() argument 1369 const uint8_t *datag = frame->data[0]; in update_clut_planar() 1370 const uint8_t *datab = frame->data[1]; in update_clut_planar() 1371 const uint8_t *datar = frame->data[2]; in update_clut_planar() 1372 const int glinesize = frame->linesize[0]; in update_clut_planar() 1373 const int blinesize = frame->linesize[1]; in update_clut_planar() 1374 const int rlinesize = frame in update_clut_planar() 1414 update_clut_float(LUT3DContext *lut3d, const AVFrame *frame) update_clut_float() argument [all...] |
H A D | vf_dnn_classify.c | 64 static int dnn_classify_post_proc(AVFrame *frame, DNNData *output, uint32_t bbox_index, AVFilterContext *filter_ctx) in dnn_classify_post_proc() argument 79 sd = av_frame_get_side_data(frame, AV_FRAME_DATA_DETECTION_BBOXES); in dnn_classify_post_proc() 275 // if frame got, schedule to next filter in dnn_classify_activate()
|
H A D | fflcms2.c | 173 int ff_icc_profile_attach(FFIccContext *s, cmsHPROFILE profile, AVFrame *frame) in ff_icc_profile_attach() argument 190 if (!av_frame_new_side_data_from_buf(frame, AV_FRAME_DATA_ICC_PROFILE, buf)) { in ff_icc_profile_attach()
|
H A D | vf_kerndeint.c | 40 int frame; ///< frame count, starting from 0 member 127 int x, y, plane, val, hi, lo, g, h, n = kerndeint->frame++; in filter_frame()
|
/third_party/node/deps/v8/src/compiler/backend/ |
H A D | code-generator.cc | 16 #include "src/diagnostics/eh-frame.h" 44 CodeGenerator::CodeGenerator(Zone* codegen_zone, Frame* frame, Linkage* linkage, in CodeGenerator() argument 91 CreateFrameAccessState(frame); in CodeGenerator() 114 void CodeGenerator::CreateFrameAccessState(Frame* frame) { in CreateFrameAccessState() argument 115 FinishFrame(frame); in CreateFrameAccessState() 116 frame_access_state_ = zone()->New<FrameAccessState>(frame); in CreateFrameAccessState() 139 frame()->GetTotalFrameSlotCount() * kSystemPointerSize; in GetStackCheckOffset() 145 // interpreted frame, or the maximal number of bytes pushed to the stack in GetStackCheckOffset() 194 // Open a frame scope to indicate that there is a frame o in AssembleCode() [all...] |
/foundation/multimedia/audio_framework/frameworks/native/hdiadapter/source/common/ |
H A D | i_audio_capturer_source.cpp | 179 int32_t IAudioCapturerSourceFrame(void *wapper, char *frame, uint64_t requestBytes, uint64_t *replyBytes) in IAudioCapturerSourceFrame() argument 187 int32_t ret = iAudioCapturerSource->CaptureFrame(frame, requestBytes, *replyBytes); in IAudioCapturerSourceFrame()
|
/foundation/graphic/graphic_2d/rosen/modules/render_service/core/pipeline/ |
H A D | rs_base_render_engine.h | 65 RSRenderFrame(const std::shared_ptr<RSSurfaceOhos>& target, std::unique_ptr<RSSurfaceFrame>&& frame) in RSRenderFrame() argument 66 : targetSurface_(target), surfaceFrame_(std::move(frame)) in RSRenderFrame() 133 // There would only one user(thread) to renderFrame(request frame) at one time. 142 // There would only one user(thread) to renderFrame(request frame) at one time.
|
/foundation/graphic/graphic_2d/rosen/test/render_service/render_service_client/fuzztest/ui/rsnode_fuzzer/ |
H A D | rsnode_fuzzer.cpp | 100 Vector4f frame(GetData<float>(), GetData<float>(), GetData<float>(), GetData<float>()); in RSNodeFuzzTestInner01() 114 surfaceNode->SetFrame(frame); in RSNodeFuzzTestInner01()
|
/foundation/multimedia/media_foundation/test/unittest/ |
H A D | TestFFmpegUtils.cpp | 219 AVFrame* frame = av_frame_alloc(); in HWTEST() local 221 auto res = FillAVPicture(frame, &ptr, AVPixelFormat::AV_PIX_FMT_ABGR, 1920, 1080); in HWTEST()
|
/foundation/communication/dsoftbus/components/nstackx/nstackx_core/dfile/include/ |
H A D | nstackx_dfile_transfer.h | 41 DFILE_TRANS_MSG_FILE_SENT, /* Sender send TRANSFER DONE ACK frame and come to end */ 53 DFILE_TRANS_FILE_HEADER_CONFIRM_TIMEOUT, /* Wait for HEADER CONFIRM frame timeout */ 54 DFILE_TRANS_FILE_DATA_ACK_TIMEOUT, /* Heart beat (DATA ACK frame) timeout */ 55 DFILE_TRANS_TRANSFER_DONE_TIMEOUT, /* Wait for TRANSFER DONE frame timeout */ 57 DFILE_TRANS_FILE_HEADER_TIMEOUT, /* Receive HEADER frame timeout (partially received) */ 59 /* Receiver wait for TRANSFER DONE ACK frame timeout, for debug purpose, won't report to user */ 94 typedef int32_t (*DFileTransWriteHandle)(const uint8_t *frame, size_t len, void *context); 113 uint8_t fileTransferReqReceived; /* Flag: Receive File Transfer REQ frame */ 114 uint8_t fileTransferDoneReceived; /* Flag: Receive File Transfer Done frame */ 295 int32_t SendFrame(DFileTrans *dFileTrans, uint8_t *frame, size_ [all...] |
/third_party/ffmpeg/libavcodec/ |
H A D | imm5.c | 86 static int imm5_decode_frame(AVCodecContext *avctx, AVFrame *frame, in imm5_decode_frame() argument 141 ret = avcodec_receive_frame(codec_avctx, frame); in imm5_decode_frame()
|
H A D | sgienc.c | 94 const AVFrame *frame, int *got_packet) in encode_frame() 97 const AVFrame * const p = frame; in encode_frame() 93 encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *frame, int *got_packet) encode_frame() argument
|
H A D | qsvenc_hevc.c | 209 const AVFrame *frame, int *got_packet) in qsv_enc_frame() 213 return ff_qsv_encode(avctx, &q->qsv, pkt, frame, got_packet); in qsv_enc_frame() 239 { "begin_only", "Output an IDR-frame only at the beginning of the stream", 0, AV_OPT_TYPE_CONST, { .i64 = -1 }, 0, 0, VE, "idr_interval" }, 259 { "gpb", "1: GPB (generalized P/B frame); 0: regular P frame", OFFSET(qsv.gpb), AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, VE}, 208 qsv_enc_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *frame, int *got_packet) qsv_enc_frame() argument
|
H A D | wcmv.c | 44 static int decode_frame(AVCodecContext *avctx, AVFrame *frame, in decode_frame() argument 203 if ((ret = av_frame_ref(frame, s->prev_frame)) < 0) in decode_frame()
|
/third_party/ffmpeg/libavutil/ |
H A D | Makefile | 35 frame.h \ 135 frame.o \
|
/third_party/backends/backend/ |
H A D | pieusb_scancmd.h | 242 SANE_Int index; /* scan frame index (0-7) */ 333 void sanei_pieusb_cmd_get_scan_frame(SANE_Int device_number, SANE_Int index, struct Pieusb_Scan_Frame* frame, struct Pieusb_Command_Status *status); 334 void sanei_pieusb_cmd_set_scan_frame(SANE_Int device_number, SANE_Int index, struct Pieusb_Scan_Frame* frame, struct Pieusb_Command_Status *status);
|
/third_party/libunwind/libunwind/include/tdep-x86_64/ |
H A D | libunwind_i.h | 45 UNW_X86_64_FRAME_ALIGNED = -3, /* frame stack pointer aligned */ 47 UNW_X86_64_FRAME_SIGRETURN = -1, /* special sigreturn frame */ 57 int64_t last_frame : 1; /* non-zero if last frame in chain */ 86 X86_64_SCF_NONE, /* no signal frame encountered */ 88 X86_64_SCF_FREEBSD_SIGFRAME, /* FreeBSD signal frame */ 90 X86_64_SCF_SOLARIS_SIGFRAME, /* illumos/Solaris signal frame */ 244 # define tdep_reuse_frame(c,frame) do {} while(0) 290 int frame);
|
/third_party/node/tools/inspector_protocol/jinja2/ |
H A D | debug.py | 37 """Proxies a traceback frame.""" 66 def make_frame_proxy(frame): 67 proxy = TracebackFrameProxy(frame) 118 """Exception info tuple with a proxy around the frame objects.""" 125 # the frame will be an actual traceback (or transparent proxy) if 175 # save a reference to the next frame if we override the current 250 # frame for the ProcessedTraceback 294 # return without this frame
|
/third_party/nghttp2/src/ |
H A D | shrpx_http.cc | 171 const nghttp2_frame *frame, size_t max_payload, in select_padding_callback() 173 return std::min(max_payload, frame->hd.length + get_config()->padding); in select_padding_callback() 170 select_padding_callback(nghttp2_session *session, const nghttp2_frame *frame, size_t max_payload, void *user_data) select_padding_callback() argument
|
/third_party/libinput/tools/ |
H A D | libinput-replay.py | 171 def collect_events(frame): 174 for (sec, usec, evtype, evcode, value) in frame:
|