/third_party/ffmpeg/libavcodec/ |
H A D | cdgraphics.c | 69 AVFrame *frame; member 80 cc->frame = av_frame_alloc(); in cdg_decode_init() 81 if (!cc->frame) in cdg_decode_init() 94 int lsize = cc->frame->linesize[0]; in cdg_border_preset() 95 uint8_t *buf = cc->frame->data[0]; in cdg_border_preset() 119 uint32_t *palette = (uint32_t *) cc->frame->data[1]; in cdg_load_palette() 128 cc->frame->palette_has_changed = 1; in cdg_load_palette() 137 int stride = cc->frame->linesize[0]; in cdg_tile_block() 138 uint8_t *buf = cc->frame->data[0]; in cdg_tile_block() 209 int stride = cc->frame in cdg_scroll() 265 cdg_decode_frame(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *avpkt) cdg_decode_frame() argument [all...] |
H A D | tmv.c | 39 static int tmv_decode_frame(AVCodecContext *avctx, AVFrame *frame, in tmv_decode_frame() argument 49 if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) in tmv_decode_frame() 59 frame->pict_type = AV_PICTURE_TYPE_I; in tmv_decode_frame() 60 frame->key_frame = 1; in tmv_decode_frame() 61 dst = frame->data[0]; in tmv_decode_frame() 63 frame->palette_has_changed = 1; in tmv_decode_frame() 64 memcpy(frame->data[1], ff_cga_palette, 16 * 4); in tmv_decode_frame() 65 memset(frame->data[1] + 16 * 4, 0, AVPALETTE_SIZE - 16 * 4); in tmv_decode_frame() 72 ff_draw_pc_font(dst + x * 8, frame->linesize[0], in tmv_decode_frame() 75 dst += frame in tmv_decode_frame() [all...] |
H A D | jvdec.c | 38 AVFrame *frame; member 54 s->frame = av_frame_alloc(); in decode_init() 55 if (!s->frame) in decode_init() 172 if ((ret = ff_reget_buffer(avctx, s->frame, 0)) < 0) in decode_frame() 183 s->frame->data[0] + j * s->frame->linesize[0] + i, in decode_frame() 184 s->frame->linesize[0], &s->bdsp); in decode_frame() 190 av_frame_unref(s->frame); in decode_frame() 191 if ((ret = ff_get_buffer(avctx, s->frame, AV_GET_BUFFER_FLAG_REF)) < 0) in decode_frame() 195 memset(s->frame in decode_frame() [all...] |
H A D | arbc.c | 41 static int fill_tile4(AVCodecContext *avctx, int color, AVFrame *frame) in fill_tile4() argument 66 AV_WB24(&frame->data[0][frame->linesize[0] * (h - j) + 3 * k], color); in fill_tile4() 77 int color, AVFrame *frame) in fill_tileX() 107 AV_WB24(&frame->data[0][frame->linesize[0] * (h - (j + m)) + 3 * (k + n)], color); in fill_tileX() 119 static int decode_frame(AVCodecContext *avctx, AVFrame *frame, in decode_frame() argument 138 if ((ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF)) < 0) in decode_frame() 142 ret = av_frame_copy(frame, s->prev_frame); in decode_frame() 163 prev_pixels -= fill_tileX(avctx, 1024, 1024, fill, frame); in decode_frame() 76 fill_tileX(AVCodecContext *avctx, int tile_width, int tile_height, int color, AVFrame *frame) fill_tileX() argument [all...] |
H A D | v4l2_buffers.c | 299 static int v4l2_buffer_buf_to_swframe(AVFrame *frame, V4L2Buffer *avbuf) in v4l2_buffer_buf_to_swframe() argument 303 frame->format = avbuf->context->av_pix_fmt; in v4l2_buffer_buf_to_swframe() 306 ret = v4l2_buf_to_bufref(avbuf, i, &frame->buf[i]); in v4l2_buffer_buf_to_swframe() 310 frame->linesize[i] = avbuf->plane_info[i].bytesperline; in v4l2_buffer_buf_to_swframe() 311 frame->data[i] = frame->buf[i]->data; in v4l2_buffer_buf_to_swframe() 320 frame->linesize[1] = avbuf->plane_info[0].bytesperline; in v4l2_buffer_buf_to_swframe() 321 frame->data[1] = frame->buf[0]->data + avbuf->plane_info[0].bytesperline * avbuf->context->format.fmt.pix_mp.height; in v4l2_buffer_buf_to_swframe() 327 frame in v4l2_buffer_buf_to_swframe() 340 v4l2_buffer_swframe_to_buf(const AVFrame *frame, V4L2Buffer *out) v4l2_buffer_swframe_to_buf() argument 411 ff_v4l2_buffer_avframe_to_buf(const AVFrame *frame, V4L2Buffer *out) ff_v4l2_buffer_avframe_to_buf() argument 418 ff_v4l2_buffer_buf_to_avframe(AVFrame *frame, V4L2Buffer *avbuf) ff_v4l2_buffer_buf_to_avframe() argument [all...] |
H A D | dsddec.c | 69 AVFrame *frame; member 78 AVFrame *frame = td->frame; in dsd_channel() local 81 float *dst = ((float **)frame->extended_data)[j]; in dsd_channel() 84 src_next = frame->nb_samples; in dsd_channel() 91 ff_dsd2pcm_translate(&s[j], frame->nb_samples, lsbf, in dsd_channel() 98 static int decode_frame(AVCodecContext *avctx, AVFrame *frame, in decode_frame() argument 104 frame->nb_samples = avpkt->size / avctx->ch_layout.nb_channels; in decode_frame() 106 if ((ret = ff_get_buffer(avctx, frame, 0)) < 0) in decode_frame() 109 td.frame in decode_frame() [all...] |
H A D | msrle.c | 44 AVFrame *frame; member 74 s->frame = av_frame_alloc(); in msrle_decode_init() 75 if (!s->frame) in msrle_decode_init() 97 if ((ret = ff_reget_buffer(avctx, s->frame, 0)) < 0) in msrle_decode_frame() 101 s->frame->palette_has_changed = ff_copy_palette(s->pal, avpkt, avctx); in msrle_decode_frame() 104 memcpy(s->frame->data[1], s->pal, AVPALETTE_SIZE); in msrle_decode_frame() 110 uint8_t *ptr = s->frame->data[0]; in msrle_decode_frame() 129 ptr += s->frame->linesize[0]; in msrle_decode_frame() 133 ff_msrle_decode(avctx, s->frame, avctx->bits_per_coded_sample, &s->gb); in msrle_decode_frame() 136 if ((ret = av_frame_ref(rframe, s->frame)) < in msrle_decode_frame() [all...] |
/third_party/ffmpeg/libavfilter/ |
H A D | vf_colortemperature.c | 107 AVFrame *frame = arg; in temperature_slice8() local 108 const int width = frame->width; in temperature_slice8() 109 const int height = frame->height; in temperature_slice8() 115 const int glinesize = frame->linesize[0]; in temperature_slice8() 116 const int blinesize = frame->linesize[1]; in temperature_slice8() 117 const int rlinesize = frame->linesize[2]; in temperature_slice8() 118 uint8_t *gptr = frame->data[0] + slice_start * glinesize; in temperature_slice8() 119 uint8_t *bptr = frame->data[1] + slice_start * blinesize; in temperature_slice8() 120 uint8_t *rptr = frame->data[2] + slice_start * rlinesize; in temperature_slice8() 148 AVFrame *frame in temperature_slice16() local 190 AVFrame *frame = arg; temperature_slice8p() local 229 AVFrame *frame = arg; temperature_slice16p() local 266 filter_frame(AVFilterLink *inlink, AVFrame *frame) filter_frame() argument [all...] |
H A D | vf_colorcontrast.c | 95 AVFrame *frame = arg; in colorcontrast_slice8() local 96 const int width = frame->width; in colorcontrast_slice8() 97 const int height = frame->height; in colorcontrast_slice8() 100 const int glinesize = frame->linesize[0]; in colorcontrast_slice8() 101 const int blinesize = frame->linesize[1]; in colorcontrast_slice8() 102 const int rlinesize = frame->linesize[2]; in colorcontrast_slice8() 103 uint8_t *gptr = frame->data[0] + slice_start * glinesize; in colorcontrast_slice8() 104 uint8_t *bptr = frame->data[1] + slice_start * blinesize; in colorcontrast_slice8() 105 uint8_t *rptr = frame->data[2] + slice_start * rlinesize; in colorcontrast_slice8() 147 AVFrame *frame in colorcontrast_slice16() local 201 AVFrame *frame = arg; colorcontrast_slice8p() local 251 AVFrame *frame = arg; colorcontrast_slice16p() local 300 filter_frame(AVFilterLink *link, AVFrame *frame) filter_frame() argument [all...] |
H A D | af_dynaudnorm.c | 100 { "framelen", "set the frame length in msec", OFFSET(frame_len_msec), AV_OPT_TYPE_INT, {.i64 = 500}, 10, 8000, FLAGS }, 101 { "f", "set the frame length in msec", OFFSET(frame_len_msec), AV_OPT_TYPE_INT, {.i64 = 500}, 10, 8000, FLAGS }, 122 { "overlap", "set the frame overlap", OFFSET(overlap), AV_OPT_TYPE_DOUBLE, {.dbl=.0}, 0.0, 1.0, FLAGS }, 123 { "o", "set the frame overlap", OFFSET(overlap), AV_OPT_TYPE_DOUBLE, {.dbl=.0}, 0.0, 1.0, FLAGS }, 318 av_log(ctx, AV_LOG_DEBUG, "frame len %d\n", s->frame_len); in config_input() 378 static double find_peak_magnitude(AVFrame *frame, int channel) in find_peak_magnitude() argument 383 for (int c = 0; c < frame->ch_layout.nb_channels; c++) { in find_peak_magnitude() 384 double *data_ptr = (double *)frame->extended_data[c]; in find_peak_magnitude() 386 for (int i = 0; i < frame->nb_samples; i++) in find_peak_magnitude() 390 double *data_ptr = (double *)frame in find_peak_magnitude() 399 compute_frame_rms(AVFrame *frame, int channel) compute_frame_rms() argument 425 get_max_local_gain(DynamicAudioNormalizerContext *s, AVFrame *frame, int channel) get_max_local_gain() argument 530 bypass_channel(DynamicAudioNormalizerContext *s, AVFrame *frame, int ch) bypass_channel() argument 537 perform_dc_correction(DynamicAudioNormalizerContext *s, AVFrame *frame) perform_dc_correction() argument 582 compute_frame_std_dev(DynamicAudioNormalizerContext *s, AVFrame *frame, int channel) compute_frame_std_dev() argument 608 perform_compression(DynamicAudioNormalizerContext *s, AVFrame *frame) perform_compression() argument 658 analyze_frame(DynamicAudioNormalizerContext *s, AVFilterLink *outlink, AVFrame **frame) analyze_frame() argument 726 amplify_frame(DynamicAudioNormalizerContext *s, AVFrame *in, AVFrame *frame, int enabled) amplify_frame() argument [all...] |
H A D | vf_fieldhint.c | 44 AVFrame *frame[3]; member 124 av_frame_free(&s->frame[0]); in filter_frame() 125 s->frame[0] = s->frame[1]; in filter_frame() 126 s->frame[1] = s->frame[2]; in filter_frame() 127 s->frame[2] = in; in filter_frame() 128 if (!s->frame[1]) in filter_frame() 130 else if (!s->frame[0]) { in filter_frame() 131 s->frame[ in filter_frame() [all...] |
H A D | audio.c | 39 AVFrame *frame = NULL; in ff_default_get_audio_buffer() local 78 frame = ff_frame_pool_get(link->frame_pool); in ff_default_get_audio_buffer() 79 if (!frame) in ff_default_get_audio_buffer() 82 frame->nb_samples = nb_samples; in ff_default_get_audio_buffer() 85 frame->channel_layout = link->channel_layout; in ff_default_get_audio_buffer() 89 av_channel_layout_copy(&frame->ch_layout, &link->ch_layout) < 0) { in ff_default_get_audio_buffer() 90 av_frame_free(&frame); in ff_default_get_audio_buffer() 93 frame->sample_rate = link->sample_rate; in ff_default_get_audio_buffer() 95 av_samples_set_silence(frame->extended_data, 0, nb_samples, channels, link->format); in ff_default_get_audio_buffer() 97 return frame; in ff_default_get_audio_buffer() [all...] |
H A D | video.c | 43 AVFrame *frame = NULL; in ff_default_get_video_buffer2() local 52 frame = av_frame_alloc(); in ff_default_get_video_buffer2() 54 if (!frame) in ff_default_get_video_buffer2() 57 ret = av_hwframe_get_buffer(link->hw_frames_ctx, frame, 0); in ff_default_get_video_buffer2() 59 av_frame_free(&frame); in ff_default_get_video_buffer2() 61 return frame; in ff_default_get_video_buffer2() 87 frame = ff_frame_pool_get(link->frame_pool); in ff_default_get_video_buffer2() 88 if (!frame) in ff_default_get_video_buffer2() 91 frame->sample_aspect_ratio = link->sample_aspect_ratio; in ff_default_get_video_buffer2() 93 return frame; in ff_default_get_video_buffer2() [all...] |
/foundation/CastEngine/castengine_wifi_display/services/codec/src/ |
H A D | media_frame_pipeline.cpp | 18 #include "frame.h" 23 inline bool IsAudioFrame(const Frame::Ptr &frame) in IsAudioFrame() argument 25 RETURN_FALSE_IF_NULL(frame); in IsAudioFrame() 26 return frame->GetCodecId() == CODEC_G711A || frame->GetCodecId() == CODEC_G711U || in IsAudioFrame() 27 frame->GetCodecId() == CODEC_AAC || frame->GetCodecId() == CODEC_PCM; in IsAudioFrame() 63 void FrameSource::DeliverFrame(const Frame::Ptr &frame) in DeliverFrame() argument 65 RETURN_IF_NULL(frame); in DeliverFrame() 66 if (IsAudioFrame(frame)) { in DeliverFrame() [all...] |
/third_party/ffmpeg/doc/examples/ |
H A D | filtering_video.c | 179 static void display_frame(const AVFrame *frame, AVRational time_base) in display_frame() argument 185 if (frame->pts != AV_NOPTS_VALUE) { in display_frame() 189 delay = av_rescale_q(frame->pts - last_pts, in display_frame() 194 last_pts = frame->pts; in display_frame() 198 p0 = frame->data[0]; in display_frame() 200 for (y = 0; y < frame->height; y++) { in display_frame() 202 for (x = 0; x < frame->width; x++) in display_frame() 205 p0 += frame->linesize[0]; in display_frame() 214 AVFrame *frame; in main() local 222 frame in main() [all...] |
H A D | encode_audio.c | 38 #include <libavutil/frame.h> 94 static void encode(AVCodecContext *ctx, AVFrame *frame, AVPacket *pkt, in encode() argument 99 /* send the frame for encoding */ in encode() 100 ret = avcodec_send_frame(ctx, frame); in encode() 102 fprintf(stderr, "Error sending the frame to the encoder\n"); in encode() 113 fprintf(stderr, "Error encoding audio frame\n"); in encode() 127 AVFrame *frame; in main() local 189 /* frame containing input raw audio */ in main() 190 frame = av_frame_alloc(); in main() 191 if (!frame) { in main() [all...] |
H A D | decode_video.c | 51 static void decode(AVCodecContext *dec_ctx, AVFrame *frame, AVPacket *pkt, in decode() argument 64 ret = avcodec_receive_frame(dec_ctx, frame); in decode() 72 printf("saving frame %3d\n", dec_ctx->frame_number); in decode() 78 pgm_save(frame->data[0], frame->linesize[0], in decode() 79 frame->width, frame->height, buf); in decode() 90 AVFrame *frame; in main() local 148 frame = av_frame_alloc(); in main() 149 if (!frame) { in main() [all...] |
/third_party/ffmpeg/tests/api/ |
H A D | api-threadmessage-test.c | 27 #include "libavutil/frame.h" 47 AVFrame *frame; member 59 av_frame_free(&msg->frame); in free_frame() 77 .frame = av_frame_alloc(), in sender_thread() 80 if (!msg.frame) { in sender_thread() 86 val = av_asprintf("frame %d/%d from sender %d", in sender_thread() 89 av_frame_free(&msg.frame); in sender_thread() 95 av_frame_free(&msg.frame); in sender_thread() 98 msg.frame->metadata = meta; in sender_thread() 100 /* allocate a real frame i in sender_thread() [all...] |
/foundation/graphic/graphic_2d/rosen/modules/render_service_base/src/render_backend/ |
H A D | drawing_context.cpp | 35 sk_sp<SkSurface> DrawingContext::AcquireSurface(const std::shared_ptr<RSRenderSurfaceFrame>& frame) in AcquireSurface() argument 37 if (frame == nullptr) { in AcquireSurface() 38 LOGE("Failed to acquire Surface, frame is nullptr"); in AcquireSurface() 42 return AcquireSurfaceInGLES(frame); in AcquireSurface() 44 return AcquireSurfaceInRaster(frame); in AcquireSurface() 46 return AcquireSurfaceInVulkan(frame); in AcquireSurface() 89 sk_sp<SkSurface> DrawingContext::AcquireSurfaceInGLES(const std::shared_ptr<RSRenderSurfaceFrame>& frame) in AcquireSurfaceInGLES() argument 102 std::shared_ptr<FrameConfig> frameConfig = frame->frameConfig; in AcquireSurfaceInGLES() 115 sk_sp<SkColorSpace> skColorSpace = GetSkColorSpace(frame); in AcquireSurfaceInGLES() 128 sk_sp<SkSurface> DrawingContext::AcquireSurfaceInRaster(const std::shared_ptr<RSRenderSurfaceFrame>& frame) in AcquireSurfaceInRaster() argument 153 AcquireSurfaceInVulkan(const std::shared_ptr<RSRenderSurfaceFrame>& frame) AcquireSurfaceInVulkan() argument 176 GetSkColorSpace(const std::shared_ptr<RSRenderSurfaceFrame>& frame) GetSkColorSpace() argument [all...] |
/foundation/graphic/graphic_2d/rosen/modules/render_service_base/src/platform/windows/ |
H A D | rs_surface_windows.cpp | 45 void RSSurfaceWindows::SetUiTimeStamp(const std::unique_ptr<RSSurfaceFrame>& frame, uint64_t uiTimestamp) in SetUiTimeStamp() argument 57 auto frame = std::make_unique<RSSurfaceFrameWindows>(width, height); in RequestFrame() local 59 return frame; in RequestFrame() 63 bufferInfo.width = frame->width_; in RequestFrame() 64 bufferInfo.height = frame->height_; in RequestFrame() 70 frame->surface_ = std::make_shared<Drawing::Surface>(); in RequestFrame() 71 if (!frame->surface_->Bind(bufferInfo)) { in RequestFrame() 73 return frame; in RequestFrame() 76 const auto canvas = frame->surface_->GetCanvas(); in RequestFrame() 78 canvas->Translate(0, frame in RequestFrame() 85 FlushFrame(std::unique_ptr<RSSurfaceFrame>& frame, uint64_t uiTimestamp) FlushFrame() argument [all...] |
/foundation/graphic/graphic_2d/rosen/modules/render_service_base/src/platform/darwin/ |
H A D | rs_surface_darwin.cpp | 45 void RSSurfaceDarwin::SetUiTimeStamp(const std::unique_ptr<RSSurfaceFrame>& frame, uint64_t uiTimestamp) in SetUiTimeStamp() argument 56 auto frame = std::make_unique<RSSurfaceFrameDarwin>(width, height); in RequestFrame() local 58 return frame; in RequestFrame() 61 bufferInfo.width = frame->width_; in RequestFrame() 62 bufferInfo.height = frame->height_; in RequestFrame() 68 frame->surface_ = std::make_shared<Drawing::Surface>(); in RequestFrame() 69 if (!frame->surface_->Bind(bufferInfo)) { in RequestFrame() 71 return frame; in RequestFrame() 74 const auto canvas = frame->surface_->GetCanvas(); in RequestFrame() 76 canvas->Translate(0, frame in RequestFrame() 83 FlushFrame(std::unique_ptr<RSSurfaceFrame>& frame, uint64_t uiTimestamp) FlushFrame() argument [all...] |
/foundation/CastEngine/castengine_wifi_display/services/protocol/rtp/src/ |
H A D | rtp_codec_g711.cpp | 69 auto frame = FrameImpl::Create(); in ObtainFrame() local 70 frame->codecId_ = CODEC_G711A; in ObtainFrame() 71 return frame; in ObtainFrame() 83 void RtpEncoderG711::InputFrame(const Frame::Ptr &frame) in InputFrame() argument 85 RETURN_IF_NULL(frame); in InputFrame() 91 cacheFrame_->pts_ = frame->Pts(); in InputFrame() 93 if ((next_pts + 20) < frame->Pts()) { // 20:interval ms in InputFrame() 94 cacheFrame_->pts_ = frame->Pts() - dur; in InputFrame() 98 cacheFrame_->Append(frame->Data() + frame in InputFrame() [all...] |
/third_party/skia/third_party/externals/swiftshader/src/Reactor/ |
H A D | ReactorDebugInfo.cpp | 64 // Return a stack trace with every stack frame address offset by -1. We do this so that we get in getCallerBacktrace() 67 // perform this offsetting on the top-most stack frame, but it doesn't matter as we discard it in getCallerBacktrace() 70 std::vector<bs::frame> result; in getCallerBacktrace() 73 for(bs::frame frame : st) in getCallerBacktrace() 75 result.emplace_back(reinterpret_cast<void *>(reinterpret_cast<size_t>(frame.address()) - 1)); in getCallerBacktrace() 85 static std::unordered_map<bs::frame::native_frame_ptr_t, Location> cache; in getCallerBacktrace() 87 for(bs::frame frame : offsetStackFrames(bs::stacktrace())) in getCallerBacktrace() 91 auto iter = cache.find(frame in getCallerBacktrace() [all...] |
/vendor/hisilicon/hispark_pegasus/demo/oledplayer_demo/oled/ |
H A D | img2code.py | 37 def resize_and_binarize_image(frame, width, height, threshold): 41 frame = cv2.resize(frame, (width, height)) # 缩放 42 frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY) # 转为灰度图 43 _, binary = cv2.threshold(frame, threshold, 255, cv2.THRESH_BINARY) # 二值化 48 pixels = frame[r, colStart: colStart + PIXEL_PER_BYTE] 53 cv2.imwrite(os.path.join('debug', str(frameCount) + '.png'), frame) 60 def convert_frame_to_bytes(frame): 61 return resize_and_binarize_image(frame, TARGET_WIDT [all...] |
/vendor/hisilicon/hispark_pegasus/demo/ssd1306_demo/ |
H A D | img2code.py | 38 def resize_and_binarize_image(frame, width, height, threshold): 42 frame = cv2.resize(frame, (width, height)) # 缩放 43 frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY) # 转为灰度图 44 _, binary = cv2.threshold(frame, threshold, 255, cv2.THRESH_BINARY) # 二值化 49 pixels = frame[r, colStart: colStart + PIXEL_PER_BYTE] 54 cv2.imwrite(os.path.join('debug', str(frameCount) + '.png'), frame) 61 def convert_frame_to_bytes(frame): 62 return resize_and_binarize_image(frame, TARGET_WIDT [all...] |