Home
last modified time | relevance | path

Searched refs:data (Results 32201 - 32225 of 49187) sorted by relevance

1...<<1281128212831284128512861287128812891290>>...1968

/test/xts/hats/hdf/codec/hdi_idlomx/
H A Dcodec_function_utils.cpp104 std::vector<int8_t> data; in InitBufferHandleParameter() local
110 ObjectToVector(type, data); in InitBufferHandleParameter()
111 ret = component->SetParameter(OMX_IndexParamUseBufferType, data); in InitBufferHandleParameter()
/third_party/ffmpeg/libavcodec/
H A Ddpcm.c27 * for more information on the specific data formats, visit:
224 bytestream2_init(&gb, avpkt->data, buf_size); in dpcm_decode_frame()
261 output_samples = (int16_t *)frame->data[0]; in dpcm_decode_frame()
340 uint8_t *output_samples_u8 = frame->data[0], in dpcm_decode_frame()
H A Ddvbsubenc.c362 uint32_t x= ((uint32_t*)h->rects[clut_id]->data[1])[i]; in dvbsub_encode()
467 h->rects[object_id]->data[0], in dvbsub_encode()
476 h->rects[object_id]->data[0] + h->rects[object_id]->w, in dvbsub_encode()
H A Dlibjxlenc.c355 if (sd && sd->size && JxlEncoderSetICCProfile(ctx->encoder, sd->data, sd->size) != JXL_ENC_SUCCESS) in libjxl_encode_frame()
370 if (JxlEncoderAddImageFrame(ctx->options, &jxl_fmt, frame->data[0], jxl_fmt.align * info.ysize) != JXL_ENC_SUCCESS) { in libjxl_encode_frame()
388 /* all data passed has been encoded */ in libjxl_encode_frame()
416 memcpy(pkt->data, ctx->buffer, bytes_written); in libjxl_encode_frame()
H A Dhevc_sei.c141 sps = (HEVCSPS*)ps->sps_list[s->active_seq_parameter_set_id]->data; in decode_nal_sei_pic_timing()
195 bytestream2_get_bufferu(gb, buf_ref->data, size); in decode_nal_sei_user_data_unregistered()
196 buf_ref->data[size] = 0; in decode_nal_sei_user_data_unregistered()
H A Dfic.c189 uint8_t* dst = ctx->frame->data[p] + (y_off >> !!p) * stride; in fic_decode_slice()
246 dstptr[i] = ctx->final_frame->data[i] + in fic_draw_cursor()
274 const uint8_t *src = avpkt->data; in fic_decode_frame()
288 av_log(avctx, AV_LOG_ERROR, "Frame data is too small.\n"); in fic_decode_frame()
314 /* Skip cursor data. */ in fic_decode_frame()
328 "Cursor data too small. Skipping cursor.\n"); in fic_decode_frame()
357 /* First slice offset and remaining data. */ in fic_decode_frame()
362 av_log(avctx, AV_LOG_ERROR, "Not enough frame data to decode.\n"); in fic_decode_frame()
366 /* Allocate slice data. */ in fic_decode_frame()
370 av_log(avctx, AV_LOG_ERROR, "Could not allocate slice data in fic_decode_frame()
[all...]
H A Dffwavesynth.c429 ts = AV_RL64(packet->data); in wavesynth_decode()
432 duration = AV_RL32(packet->data + 8); in wavesynth_decode()
439 pcm = (int16_t *)frame->data[0]; in wavesynth_decode()
H A Daic.c334 Y = ctx->frame->data[0] + mb_x * 16 + y_pos * ystride; in aic_decode_slice()
336 C[i] = ctx->frame->data[i + 1] + mb_x * 8 in aic_decode_slice()
388 const uint8_t *buf = avpkt->data; in aic_decode_frame()
H A Drscc.c28 * Lossless codec, data stored in tiles, with optional deflate compression.
31 * and it can be deflated or not. Similarly, pixel data comes after the header
162 bytestream2_init(gbc, avpkt->data, avpkt->size); in rscc_decode_frame()
224 * and point it to read the newly uncompressed data */ in rscc_decode_frame()
271 /* Extract how much pixel data the tiles contain */ in rscc_decode_frame()
322 /* Pointer to actual pixels, will be updated when data is consumed */ in rscc_decode_frame()
325 uint8_t *dst = ctx->reference->data[0] + ctx->reference->linesize[0] * in rscc_decode_frame()
351 memcpy(frame->data[1], ctx->palette, AVPALETTE_SIZE); in rscc_decode_frame()
H A Dsmc.c28 * The SMC decoder outputs PAL8 colorspace data.
98 uint8_t * const pixels = s->frame->data[0]; in smc_decode_stream()
118 memcpy(s->frame->data[1], s->pal, AVPALETTE_SIZE); in smc_decode_stream()
434 const uint8_t *buf = avpkt->data; in smc_decode_frame()
/third_party/elfutils/src/
H A Delfclassify.c364 Elf_Data *data = elf_getdata_rawchunk (elf, dyn_seg.p_offset, in run_classify() local
367 if (data != NULL) in run_classify()
371 GElf_Dyn *dyn = gelf_getdyn (data, dyn_idx, &dyn_storage); in run_classify()
/third_party/ffmpeg/libavfilter/
H A Dvf_avgblur_vulkan.c212 AVVkFrame *in = (AVVkFrame *)in_f->data[0]; in process_frames()
213 AVVkFrame *tmp = (AVVkFrame *)tmp_f->data[0]; in process_frames()
214 AVVkFrame *out = (AVVkFrame *)out_f->data[0]; in process_frames()
H A Dvf_deinterlace_vaapi.c219 input_surface = (VASurfaceID)(uintptr_t)input_frame->data[3]; in deint_vaapi_filter_frame()
222 ctx->frame_queue[current_frame_index - i - 1]->data[3]; in deint_vaapi_filter_frame()
225 ctx->frame_queue[current_frame_index + i + 1]->data[3]; in deint_vaapi_filter_frame()
H A Dvf_convolution_opencl.c215 for (p = 0; p < FF_ARRAY_ELEMS(output->data); p++) { in convolution_opencl_filter_frame()
216 src = (cl_mem) input->data[p]; in convolution_opencl_filter_frame()
217 dst = (cl_mem)output->data[p]; in convolution_opencl_filter_frame()
H A Davf_abitscope.c167 uint8_t *dst = outpicref->data[0] + (b * h + j) * outpicref->linesize[0] + w * ch * 4; \
195 uint8_t *dst = outpicref->data[0] + w * ch * 4 + wb * b * 4 + \
224 memset(outpicref->data[0] + i * outpicref->linesize[0], 0, outlink->w * 4); in filter_frame()
H A Dvf_frei0r.c378 (const uint32_t *)in->data[0], in filter_frame()
379 (uint32_t *)out->data[0]); in filter_frame()
493 NULL, (uint32_t *)frame->data[0]); in source_request_frame()
H A Dvf_zoompan.c214 for (k = 0; in->data[k]; k++) in output_single_frame()
215 input[k] = in->data[k] + py[k] * in->linesize[k] + px[k]; in output_single_frame()
228 sws_scale(s->sws, (const uint8_t *const *)&input, in->linesize, 0, h, out->data, out->linesize); in output_single_frame()
H A Dvf_transpose.c291 dst = out->data[plane] + start * dstlinesize; in filter_slice()
292 src = in->data[plane]; in filter_slice()
301 dst = out->data[plane] + dstlinesize * (outh - start - 1); in filter_slice()
H A Dvf_unsharp_opencl.c244 for (p = 0; p < FF_ARRAY_ELEMS(output->data); p++) { in unsharp_opencl_filter_frame()
245 src = (cl_mem) input->data[p]; in unsharp_opencl_filter_frame()
246 dst = (cl_mem)output->data[p]; in unsharp_opencl_filter_frame()
H A Dvf_idet.c135 uint8_t *prev = &idet->prev->data[i][y*refs]; in filter()
136 uint8_t *cur = &idet->cur ->data[i][y*refs]; in filter()
137 uint8_t *next = &idet->next->data[i][y*refs]; in filter()
H A Dvf_overlay_qsv.c167 fctx = (AVHWFramesContext *)link->hw_frames_ctx->data; in have_alpha_planar()
284 AVHWFramesContext *hw_frame0 = (AVHWFramesContext *)in0->hw_frames_ctx->data; in config_output()
285 AVHWFramesContext *hw_frame1 = (AVHWFramesContext *)in1->hw_frames_ctx->data; in config_output()
H A Dvf_program_opencl.c108 for (plane = 0; plane < FF_ARRAY_ELEMS(output->data); plane++) { in program_opencl_run()
109 dst = (cl_mem)output->data[plane]; in program_opencl_run()
131 src = (cl_mem)ctx->frames[input]->data[plane]; in program_opencl_run()
/third_party/ffmpeg/libavformat/
H A Damvenc.c192 memset(amv->apad->data, 0, amv->ablock_align); in amv_init()
193 AV_WL32(amv->apad->data + 4, amv->aframe_size); in amv_init()
315 avio_write(s->pb, pkt->data, pkt->size); in amv_write_packet_internal()
H A Davformat.h37 * data in a specified container format. It also has an @ref lavf_io
38 * "I/O module" which supports a number of protocols for accessing the data (e.g.
95 * Demuxers read a media file and split it into chunks of data (@em packets). A
122 * for reading input data instead of lavf internal I/O layer.
142 * cannot know how to interpret raw video data otherwise. If the format turns
160 * Reading data from an opened AVFormatContext is done by repeatedly calling
162 * containing encoded data for one AVStream, identified by
165 * caller wishes to decode the data.
183 * Muxers take encoded data in the form of @ref AVPacket "AVPackets" and write
226 * The data i
[all...]
H A Daadec.c256 avio_skip(s->pb, 4); // data start offset in aa_read_packet()
271 av_tea_crypt(c->tea_ctx, pkt->data, pkt->data, in aa_read_packet()
281 pkt->data += c->seek_offset; in aa_read_packet()

Completed in 42 milliseconds

1...<<1281128212831284128512861287128812891290>>...1968