/kernel/linux/linux-6.6/drivers/gpu/drm/msm/ |
H A D | msm_gem_submit.c | 30 struct msm_gem_submit *submit; in submit_create() local 34 sz = struct_size(submit, bos, nr_bos) + in submit_create() 35 ((u64)nr_cmds * sizeof(submit->cmd[0])); in submit_create() 40 submit = kzalloc(sz, GFP_KERNEL); in submit_create() 41 if (!submit) in submit_create() 44 submit->hw_fence = msm_fence_alloc(); in submit_create() 45 if (IS_ERR(submit->hw_fence)) { in submit_create() 46 ret = PTR_ERR(submit->hw_fence); in submit_create() 47 kfree(submit); in submit_create() 51 ret = drm_sched_job_init(&submit in submit_create() 78 struct msm_gem_submit *submit = __msm_gem_submit_destroy() local 112 submit_lookup_objects(struct msm_gem_submit *submit, struct drm_msm_gem_submit *args, struct drm_file *file) submit_lookup_objects() argument 180 submit_lookup_cmds(struct msm_gem_submit *submit, struct drm_msm_gem_submit *args, struct drm_file *file) submit_lookup_cmds() argument 251 submit_cleanup_bo(struct msm_gem_submit *submit, int i, unsigned cleanup_flags) submit_cleanup_bo() argument 271 submit_unlock_unpin_bo(struct msm_gem_submit *submit, int i) submit_unlock_unpin_bo() argument 281 submit_lock_objects(struct msm_gem_submit *submit) submit_lock_objects() argument 341 submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit) submit_fence_sync() argument 382 submit_pin_objects(struct msm_gem_submit *submit) submit_pin_objects() argument 431 submit_attach_object_fences(struct msm_gem_submit *submit) submit_attach_object_fences() argument 447 submit_bo(struct msm_gem_submit *submit, uint32_t idx, struct drm_gem_object **obj, uint64_t *iova, bool *valid) submit_bo() argument 467 submit_reloc(struct msm_gem_submit *submit, struct drm_gem_object *obj, uint32_t offset, uint32_t nr_relocs, struct drm_msm_gem_submit_reloc *relocs) submit_reloc() argument 545 submit_cleanup(struct msm_gem_submit *submit, bool error) submit_cleanup() argument 561 msm_submit_retire(struct msm_gem_submit *submit) msm_submit_retire() argument 578 msm_parse_deps(struct msm_gem_submit *submit, struct drm_file *file, uint64_t in_syncobjs_addr, uint32_t nr_in_syncobjs, size_t syncobj_stride) msm_parse_deps() argument 745 struct msm_gem_submit *submit = NULL; msm_ioctl_gem_submit() local [all...] |
H A D | msm_gpu.c | 258 struct msm_gem_submit *submit, char *comm, char *cmd) in msm_gpu_crashstate_capture() 279 if (submit) { in msm_gpu_crashstate_capture() 282 state->bos = kcalloc(submit->nr_bos, in msm_gpu_crashstate_capture() 285 for (i = 0; state->bos && i < submit->nr_bos; i++) { in msm_gpu_crashstate_capture() 286 msm_gpu_crashstate_get_bo(state, submit->bos[i].obj, in msm_gpu_crashstate_capture() 287 submit->bos[i].iova, in msm_gpu_crashstate_capture() 288 should_dump(submit, i)); in msm_gpu_crashstate_capture() 301 struct msm_gem_submit *submit, char *comm, char *cmd) in msm_gpu_crashstate_capture() 313 struct msm_gem_submit *submit; in find_submit() local 317 list_for_each_entry(submit, in find_submit() 257 msm_gpu_crashstate_capture(struct msm_gpu *gpu, struct msm_gem_submit *submit, char *comm, char *cmd) msm_gpu_crashstate_capture() argument 300 msm_gpu_crashstate_capture(struct msm_gpu *gpu, struct msm_gem_submit *submit, char *comm, char *cmd) msm_gpu_crashstate_capture() argument 330 get_comm_cmdline(struct msm_gem_submit *submit, char **comm, char **cmd) get_comm_cmdline() argument 359 struct msm_gem_submit *submit; recover_worker() local 452 struct msm_gem_submit *submit; fault_worker() local 645 retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring, struct msm_gem_submit *submit) retire_submit() argument 703 struct msm_gem_submit *submit = NULL; retire_submits() local 747 msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) msm_gpu_submit() argument [all...] |
H A D | msm_rd.c | 13 * cmdstream for the next fence, you can narrow down which process and submit 27 * all (non-written) buffers in the submit, rather than just cmdstream bo's. 310 struct msm_gem_submit *submit, int idx, in snapshot_buf() 313 struct drm_gem_object *obj = submit->bos[idx].obj; in snapshot_buf() 318 offset = iova - submit->bos[idx].iova; in snapshot_buf() 320 iova = submit->bos[idx].iova; in snapshot_buf() 335 if (!(submit->bos[idx].flags & MSM_SUBMIT_BO_READ)) in snapshot_buf() 350 void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit, in msm_rd_dump_submit() argument 373 task = pid_task(submit->pid, PIDTYPE_PID); in msm_rd_dump_submit() 377 pid_nr(submit in msm_rd_dump_submit() 309 snapshot_buf(struct msm_rd_state *rd, struct msm_gem_submit *submit, int idx, uint64_t iova, uint32_t size, bool full) snapshot_buf() argument [all...] |
/kernel/linux/linux-6.6/drivers/gpu/drm/virtio/ |
H A D | virtgpu_submit.c | 48 static int virtio_gpu_do_fence_wait(struct virtio_gpu_submit *submit, in virtio_gpu_do_fence_wait() argument 51 u32 context = submit->fence_ctx + submit->ring_idx; in virtio_gpu_do_fence_wait() 59 static int virtio_gpu_dma_fence_wait(struct virtio_gpu_submit *submit, in virtio_gpu_dma_fence_wait() argument 67 err = virtio_gpu_do_fence_wait(submit, f); in virtio_gpu_dma_fence_wait() 89 virtio_gpu_parse_deps(struct virtio_gpu_submit *submit) in virtio_gpu_parse_deps() argument 91 struct drm_virtgpu_execbuffer *exbuf = submit->exbuf; in virtio_gpu_parse_deps() 129 ret = drm_syncobj_find_fence(submit->file, syncobj_desc.handle, in virtio_gpu_parse_deps() 134 ret = virtio_gpu_dma_fence_wait(submit, fence); in virtio_gpu_parse_deps() 141 syncobjs[i] = drm_syncobj_find(submit in virtio_gpu_parse_deps() 186 virtio_gpu_parse_post_deps(struct virtio_gpu_submit *submit) virtio_gpu_parse_post_deps() argument 250 virtio_gpu_process_post_deps(struct virtio_gpu_submit *submit) virtio_gpu_process_post_deps() argument 298 virtio_gpu_init_submit_buflist(struct virtio_gpu_submit *submit) virtio_gpu_init_submit_buflist() argument 329 virtio_gpu_cleanup_submit(struct virtio_gpu_submit *submit) virtio_gpu_cleanup_submit() argument 351 virtio_gpu_submit(struct virtio_gpu_submit *submit) virtio_gpu_submit() argument 359 virtio_gpu_complete_submit(struct virtio_gpu_submit *submit) virtio_gpu_complete_submit() argument 367 virtio_gpu_init_submit(struct virtio_gpu_submit *submit, struct drm_virtgpu_execbuffer *exbuf, struct drm_device *dev, struct drm_file *file, u64 fence_ctx, u32 ring_idx) virtio_gpu_init_submit() argument 435 virtio_gpu_wait_in_fence(struct virtio_gpu_submit *submit) virtio_gpu_wait_in_fence() argument 457 virtio_gpu_install_out_fence_fd(struct virtio_gpu_submit *submit) virtio_gpu_install_out_fence_fd() argument 465 virtio_gpu_lock_buflist(struct virtio_gpu_submit *submit) virtio_gpu_lock_buflist() argument 480 struct virtio_gpu_submit submit; virtio_gpu_execbuffer_ioctl() local [all...] |
/kernel/linux/linux-5.10/drivers/gpu/drm/etnaviv/ |
H A D | etnaviv_gem_submit.c | 34 struct etnaviv_gem_submit *submit; in submit_create() local 35 size_t sz = size_vstruct(nr_bos, sizeof(submit->bos[0]), sizeof(*submit)); in submit_create() 37 submit = kzalloc(sz, GFP_KERNEL); in submit_create() 38 if (!submit) in submit_create() 41 submit->pmrs = kcalloc(nr_pmrs, sizeof(struct etnaviv_perfmon_request), in submit_create() 43 if (!submit->pmrs) { in submit_create() 44 kfree(submit); in submit_create() 47 submit->nr_pmrs = nr_pmrs; in submit_create() 49 submit in submit_create() 55 submit_lookup_objects(struct etnaviv_gem_submit *submit, struct drm_file *file, struct drm_etnaviv_gem_submit_bo *submit_bos, unsigned nr_bos) submit_lookup_objects() argument 111 submit_unlock_object(struct etnaviv_gem_submit *submit, int i) submit_unlock_object() argument 121 submit_lock_objects(struct etnaviv_gem_submit *submit, struct ww_acquire_ctx *ticket) submit_lock_objects() argument 174 submit_fence_sync(struct etnaviv_gem_submit *submit) submit_fence_sync() argument 206 submit_attach_object_fences(struct etnaviv_gem_submit *submit) submit_attach_object_fences() argument 224 submit_pin_objects(struct etnaviv_gem_submit *submit) submit_pin_objects() argument 255 submit_bo(struct etnaviv_gem_submit *submit, u32 idx, struct etnaviv_gem_submit_bo **bo) submit_bo() argument 270 submit_reloc(struct etnaviv_gem_submit *submit, void *stream, u32 size, const struct drm_etnaviv_gem_submit_reloc *relocs, u32 nr_relocs) submit_reloc() argument 324 submit_perfmon_validate(struct etnaviv_gem_submit *submit, u32 exec_state, const struct drm_etnaviv_gem_submit_pmr *pmrs) submit_perfmon_validate() argument 372 struct etnaviv_gem_submit *submit = submit_cleanup() local 419 etnaviv_submit_put(struct etnaviv_gem_submit *submit) etnaviv_submit_put() argument 433 struct etnaviv_gem_submit *submit; etnaviv_ioctl_gem_submit() local [all...] |
H A D | etnaviv_sched.c | 24 struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job); in etnaviv_sched_dependency() local 28 if (unlikely(submit->in_fence)) { in etnaviv_sched_dependency() 29 fence = submit->in_fence; in etnaviv_sched_dependency() 30 submit->in_fence = NULL; in etnaviv_sched_dependency() 38 for (i = 0; i < submit->nr_bos; i++) { in etnaviv_sched_dependency() 39 struct etnaviv_gem_submit_bo *bo = &submit->bos[i]; in etnaviv_sched_dependency() 74 struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job); in etnaviv_sched_run_job() local 78 fence = etnaviv_gpu_submit(submit); in etnaviv_sched_run_job() 80 dev_dbg(submit->gpu->dev, "skipping bad job\n"); in etnaviv_sched_run_job() 87 struct etnaviv_gem_submit *submit in etnaviv_sched_timedout_job() local 132 struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job); etnaviv_sched_free_job() local 146 etnaviv_sched_push_job(struct drm_sched_entity *sched_entity, struct etnaviv_gem_submit *submit) etnaviv_sched_push_job() argument [all...] |
H A D | etnaviv_dump.c | 113 void etnaviv_core_dump(struct etnaviv_gem_submit *submit) in etnaviv_core_dump() argument 115 struct etnaviv_gpu *gpu = submit->gpu; in etnaviv_core_dump() 128 mutex_lock(&submit->mmu_context->lock); in etnaviv_core_dump() 130 mmu_size = etnaviv_iommu_dump_size(submit->mmu_context); in etnaviv_core_dump() 137 mmu_size + gpu->buffer.size + submit->cmdbuf.size; in etnaviv_core_dump() 140 for (i = 0; i < submit->nr_bos; i++) { in etnaviv_core_dump() 141 obj = submit->bos[i].obj; in etnaviv_core_dump() 160 mutex_unlock(&submit->mmu_context->lock); in etnaviv_core_dump() 172 etnaviv_core_dump_mmu(&iter, submit->mmu_context, mmu_size); in etnaviv_core_dump() 176 &submit in etnaviv_core_dump() [all...] |
/kernel/linux/linux-6.6/drivers/gpu/drm/etnaviv/ |
H A D | etnaviv_gem_submit.c | 34 struct etnaviv_gem_submit *submit; in submit_create() local 35 size_t sz = size_vstruct(nr_bos, sizeof(submit->bos[0]), sizeof(*submit)); in submit_create() 37 submit = kzalloc(sz, GFP_KERNEL); in submit_create() 38 if (!submit) in submit_create() 41 submit->pmrs = kcalloc(nr_pmrs, sizeof(struct etnaviv_perfmon_request), in submit_create() 43 if (!submit->pmrs) { in submit_create() 44 kfree(submit); in submit_create() 47 submit->nr_pmrs = nr_pmrs; in submit_create() 49 submit in submit_create() 55 submit_lookup_objects(struct etnaviv_gem_submit *submit, struct drm_file *file, struct drm_etnaviv_gem_submit_bo *submit_bos, unsigned nr_bos) submit_lookup_objects() argument 111 submit_unlock_object(struct etnaviv_gem_submit *submit, int i) submit_unlock_object() argument 121 submit_lock_objects(struct etnaviv_gem_submit *submit, struct ww_acquire_ctx *ticket) submit_lock_objects() argument 174 submit_fence_sync(struct etnaviv_gem_submit *submit) submit_fence_sync() argument 199 submit_attach_object_fences(struct etnaviv_gem_submit *submit) submit_attach_object_fences() argument 213 submit_pin_objects(struct etnaviv_gem_submit *submit) submit_pin_objects() argument 244 submit_bo(struct etnaviv_gem_submit *submit, u32 idx, struct etnaviv_gem_submit_bo **bo) submit_bo() argument 259 submit_reloc(struct etnaviv_gem_submit *submit, void *stream, u32 size, const struct drm_etnaviv_gem_submit_reloc *relocs, u32 nr_relocs) submit_reloc() argument 313 submit_perfmon_validate(struct etnaviv_gem_submit *submit, u32 exec_state, const struct drm_etnaviv_gem_submit_pmr *pmrs) submit_perfmon_validate() argument 361 struct etnaviv_gem_submit *submit = submit_cleanup() local 407 etnaviv_submit_put(struct etnaviv_gem_submit *submit) etnaviv_submit_put() argument 421 struct etnaviv_gem_submit *submit; etnaviv_ioctl_gem_submit() local [all...] |
H A D | etnaviv_sched.c | 22 struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job); in etnaviv_sched_run_job() local 26 fence = etnaviv_gpu_submit(submit); in etnaviv_sched_run_job() 28 dev_dbg(submit->gpu->dev, "skipping bad job\n"); in etnaviv_sched_run_job() 36 struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job); in etnaviv_sched_timedout_job() local 37 struct etnaviv_gpu *gpu = submit->gpu; in etnaviv_sched_timedout_job() 48 if (dma_fence_is_signaled(submit->out_fence)) in etnaviv_sched_timedout_job() 70 etnaviv_core_dump(submit); in etnaviv_sched_timedout_job() 71 etnaviv_gpu_recover_hang(submit); in etnaviv_sched_timedout_job() 86 struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job); in etnaviv_sched_free_job() local 90 etnaviv_submit_put(submit); in etnaviv_sched_free_job() 99 etnaviv_sched_push_job(struct etnaviv_gem_submit *submit) etnaviv_sched_push_job() argument [all...] |
H A D | etnaviv_dump.c | 118 void etnaviv_core_dump(struct etnaviv_gem_submit *submit) in etnaviv_core_dump() argument 120 struct etnaviv_gpu *gpu = submit->gpu; in etnaviv_core_dump() 133 mutex_lock(&submit->mmu_context->lock); in etnaviv_core_dump() 135 mmu_size = etnaviv_iommu_dump_size(submit->mmu_context); in etnaviv_core_dump() 142 mmu_size + gpu->buffer.size + submit->cmdbuf.size; in etnaviv_core_dump() 145 for (i = 0; i < submit->nr_bos; i++) { in etnaviv_core_dump() 146 obj = submit->bos[i].obj; in etnaviv_core_dump() 165 mutex_unlock(&submit->mmu_context->lock); in etnaviv_core_dump() 177 etnaviv_core_dump_mmu(&iter, submit->mmu_context, mmu_size); in etnaviv_core_dump() 181 &submit in etnaviv_core_dump() [all...] |
/kernel/linux/linux-5.10/drivers/gpu/drm/msm/ |
H A D | msm_gem_submit.c | 34 struct msm_gem_submit *submit; in submit_create() local 35 uint64_t sz = struct_size(submit, bos, nr_bos) + in submit_create() 36 ((u64)nr_cmds * sizeof(submit->cmd[0])); in submit_create() 41 submit = kmalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY); in submit_create() 42 if (!submit) in submit_create() 45 submit->dev = dev; in submit_create() 46 submit->aspace = queue->ctx->aspace; in submit_create() 47 submit->gpu = gpu; in submit_create() 48 submit->fence = NULL; in submit_create() 49 submit in submit_create() 63 msm_gem_submit_free(struct msm_gem_submit *submit) msm_gem_submit_free() argument 73 submit_lookup_objects(struct msm_gem_submit *submit, struct drm_msm_gem_submit *args, struct drm_file *file) submit_lookup_objects() argument 153 submit_unlock_unpin_bo(struct msm_gem_submit *submit, int i, bool backoff) submit_unlock_unpin_bo() argument 171 submit_lock_objects(struct msm_gem_submit *submit) submit_lock_objects() argument 219 submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit) submit_fence_sync() argument 251 submit_pin_objects(struct msm_gem_submit *submit) submit_pin_objects() argument 283 submit_bo(struct msm_gem_submit *submit, uint32_t idx, struct msm_gem_object **obj, uint64_t *iova, bool *valid) submit_bo() argument 303 submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *obj, uint32_t offset, uint32_t nr_relocs, uint64_t relocs) submit_reloc() argument 384 submit_cleanup(struct msm_gem_submit *submit) submit_cleanup() argument 582 struct msm_gem_submit *submit; msm_ioctl_gem_submit() local [all...] |
H A D | msm_rd.c | 13 * cmdstream for the next fence, you can narrow down which process and submit 27 * all (non-written) buffers in the submit, rather than just cmdstream bo's. 85 /* current submit to read out: */ 86 struct msm_gem_submit *submit; member 89 * struct_mutex held by submit code (otherwise we could 303 struct msm_gem_submit *submit, int idx, in snapshot_buf() 306 struct msm_gem_object *obj = submit->bos[idx].obj; in snapshot_buf() 311 offset = iova - submit->bos[idx].iova; in snapshot_buf() 313 iova = submit->bos[idx].iova; in snapshot_buf() 328 if (!(submit in snapshot_buf() 302 snapshot_buf(struct msm_rd_state *rd, struct msm_gem_submit *submit, int idx, uint64_t iova, uint32_t size, bool full) snapshot_buf() argument 343 msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit, const char *fmt, ...) msm_rd_dump_submit() argument [all...] |
H A D | msm_gpu.c | 344 struct msm_gem_submit *submit, char *comm, char *cmd) in msm_gpu_crashstate_capture() 364 if (submit) { in msm_gpu_crashstate_capture() 368 for (i = 0; i < submit->nr_bos; i++) in msm_gpu_crashstate_capture() 369 if (should_dump(submit, i)) in msm_gpu_crashstate_capture() 372 for (i = 0; i < submit->nr_cmds; i++) in msm_gpu_crashstate_capture() 373 if (!should_dump(submit, submit->cmd[i].idx)) in msm_gpu_crashstate_capture() 379 for (i = 0; state->bos && i < submit->nr_bos; i++) { in msm_gpu_crashstate_capture() 380 if (should_dump(submit, i)) { in msm_gpu_crashstate_capture() 381 msm_gpu_crashstate_get_bo(state, submit in msm_gpu_crashstate_capture() 343 msm_gpu_crashstate_capture(struct msm_gpu *gpu, struct msm_gem_submit *submit, char *comm, char *cmd) msm_gpu_crashstate_capture() argument 404 msm_gpu_crashstate_capture(struct msm_gpu *gpu, struct msm_gem_submit *submit, char *comm, char *cmd) msm_gpu_crashstate_capture() argument 417 struct msm_gem_submit *submit; update_fences() local 431 struct msm_gem_submit *submit; find_submit() local 449 struct msm_gem_submit *submit; recover_worker() local 677 retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring, struct msm_gem_submit *submit) retire_submit() argument 715 struct msm_gem_submit *submit, *tmp; retire_submits() local 754 msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) msm_gpu_submit() argument [all...] |
/kernel/linux/linux-5.10/crypto/async_tx/ |
H A D | async_xor.c | 24 struct async_submit_ctl *submit) in do_async_xor() 28 dma_async_tx_callback cb_fn_orig = submit->cb_fn; in do_async_xor() 29 void *cb_param_orig = submit->cb_param; in do_async_xor() 30 enum async_tx_flags flags_orig = submit->flags; in do_async_xor() 40 submit->flags = flags_orig; in do_async_xor() 46 submit->flags &= ~ASYNC_TX_ACK; in do_async_xor() 47 submit->flags |= ASYNC_TX_FENCE; in do_async_xor() 48 submit->cb_fn = NULL; in do_async_xor() 49 submit->cb_param = NULL; in do_async_xor() 51 submit in do_async_xor() 23 do_async_xor(struct dma_chan *chan, struct dmaengine_unmap_data *unmap, struct async_submit_ctl *submit) do_async_xor() argument 100 do_sync_xor_offs(struct page *dest, unsigned int offset, struct page **src_list, unsigned int *src_offs, int src_cnt, size_t len, struct async_submit_ctl *submit) do_sync_xor_offs() argument 182 async_xor_offs(struct page *dest, unsigned int offset, struct page **src_list, unsigned int *src_offs, int src_cnt, size_t len, struct async_submit_ctl *submit) async_xor_offs() argument 273 async_xor(struct page *dest, struct page **src_list, unsigned int offset, int src_cnt, size_t len, struct async_submit_ctl *submit) async_xor() argument 287 xor_val_chan(struct async_submit_ctl *submit, struct page *dest, struct page **src_list, int src_cnt, size_t len) xor_val_chan() argument 315 async_xor_val_offs(struct page *dest, unsigned int offset, struct page **src_list, unsigned int *src_offs, int src_cnt, size_t len, enum sum_check_flags *result, struct async_submit_ctl *submit) async_xor_val_offs() argument 409 async_xor_val(struct page *dest, struct page **src_list, unsigned int offset, int src_cnt, size_t len, enum sum_check_flags *result, struct async_submit_ctl *submit) async_xor_val() argument [all...] |
H A D | async_raid6_recov.c | 20 size_t len, struct async_submit_ctl *submit) in async_sum_product() 22 struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ, in async_sum_product() 39 if (submit->flags & ASYNC_TX_FENCE) in async_sum_product() 58 async_tx_submit(chan, tx, submit); in async_sum_product() 70 async_tx_quiesce(&submit->depend_tx); in async_sum_product() 89 struct async_submit_ctl *submit) in async_mult() 91 struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ, in async_mult() 107 if (submit->flags & ASYNC_TX_FENCE) in async_mult() 128 async_tx_submit(chan, tx, submit); in async_mult() 141 async_tx_quiesce(&submit in async_mult() 18 async_sum_product(struct page *dest, unsigned int d_off, struct page **srcs, unsigned int *src_offs, unsigned char *coef, size_t len, struct async_submit_ctl *submit) async_sum_product() argument 87 async_mult(struct page *dest, unsigned int d_off, struct page *src, unsigned int s_off, u8 coef, size_t len, struct async_submit_ctl *submit) async_mult() argument 153 __2data_recov_4(int disks, size_t bytes, int faila, int failb, struct page **blocks, unsigned int *offs, struct async_submit_ctl *submit) __2data_recov_4() argument 203 __2data_recov_5(int disks, size_t bytes, int faila, int failb, struct page **blocks, unsigned int *offs, struct async_submit_ctl *submit) __2data_recov_5() argument 294 __2data_recov_n(int disks, size_t bytes, int faila, int failb, struct page **blocks, unsigned int *offs, struct async_submit_ctl *submit) __2data_recov_n() argument 393 async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb, struct page **blocks, unsigned int *offs, struct async_submit_ctl *submit) async_raid6_2data_recov() argument 471 async_raid6_datap_recov(int disks, size_t bytes, int faila, struct page **blocks, unsigned int *offs, struct async_submit_ctl *submit) async_raid6_datap_recov() argument [all...] |
H A D | async_pq.c | 39 struct async_submit_ctl *submit) in do_async_gen_syndrome() 43 enum async_tx_flags flags_orig = submit->flags; in do_async_gen_syndrome() 44 dma_async_tx_callback cb_fn_orig = submit->cb_fn; in do_async_gen_syndrome() 45 dma_async_tx_callback cb_param_orig = submit->cb_param; in do_async_gen_syndrome() 52 submit->flags = flags_orig; in do_async_gen_syndrome() 59 submit->flags &= ~ASYNC_TX_ACK; in do_async_gen_syndrome() 60 submit->flags |= ASYNC_TX_FENCE; in do_async_gen_syndrome() 61 submit->cb_fn = NULL; in do_async_gen_syndrome() 62 submit->cb_param = NULL; in do_async_gen_syndrome() 64 submit in do_async_gen_syndrome() 35 do_async_gen_syndrome(struct dma_chan *chan, const unsigned char *scfs, int disks, struct dmaengine_unmap_data *unmap, enum dma_ctrl_flags dma_flags, struct async_submit_ctl *submit) do_async_gen_syndrome() argument 107 do_sync_gen_syndrome(struct page **blocks, unsigned int *offsets, int disks, size_t len, struct async_submit_ctl *submit) do_sync_gen_syndrome() argument 177 async_gen_syndrome(struct page **blocks, unsigned int *offsets, int disks, size_t len, struct async_submit_ctl *submit) async_gen_syndrome() argument 272 pq_val_chan(struct async_submit_ctl *submit, struct page **blocks, int disks, size_t len) pq_val_chan() argument 298 async_syndrome_val(struct page **blocks, unsigned int *offsets, int disks, size_t len, enum sum_check_flags *pqres, struct page *spare, unsigned int s_off, struct async_submit_ctl *submit) async_syndrome_val() argument [all...] |
/kernel/linux/linux-6.6/crypto/async_tx/ |
H A D | async_xor.c | 24 struct async_submit_ctl *submit) in do_async_xor() 28 dma_async_tx_callback cb_fn_orig = submit->cb_fn; in do_async_xor() 29 void *cb_param_orig = submit->cb_param; in do_async_xor() 30 enum async_tx_flags flags_orig = submit->flags; in do_async_xor() 40 submit->flags = flags_orig; in do_async_xor() 46 submit->flags &= ~ASYNC_TX_ACK; in do_async_xor() 47 submit->flags |= ASYNC_TX_FENCE; in do_async_xor() 48 submit->cb_fn = NULL; in do_async_xor() 49 submit->cb_param = NULL; in do_async_xor() 51 submit in do_async_xor() 23 do_async_xor(struct dma_chan *chan, struct dmaengine_unmap_data *unmap, struct async_submit_ctl *submit) do_async_xor() argument 100 do_sync_xor_offs(struct page *dest, unsigned int offset, struct page **src_list, unsigned int *src_offs, int src_cnt, size_t len, struct async_submit_ctl *submit) do_sync_xor_offs() argument 182 async_xor_offs(struct page *dest, unsigned int offset, struct page **src_list, unsigned int *src_offs, int src_cnt, size_t len, struct async_submit_ctl *submit) async_xor_offs() argument 273 async_xor(struct page *dest, struct page **src_list, unsigned int offset, int src_cnt, size_t len, struct async_submit_ctl *submit) async_xor() argument 287 xor_val_chan(struct async_submit_ctl *submit, struct page *dest, struct page **src_list, int src_cnt, size_t len) xor_val_chan() argument 315 async_xor_val_offs(struct page *dest, unsigned int offset, struct page **src_list, unsigned int *src_offs, int src_cnt, size_t len, enum sum_check_flags *result, struct async_submit_ctl *submit) async_xor_val_offs() argument 409 async_xor_val(struct page *dest, struct page **src_list, unsigned int offset, int src_cnt, size_t len, enum sum_check_flags *result, struct async_submit_ctl *submit) async_xor_val() argument [all...] |
H A D | async_raid6_recov.c | 20 size_t len, struct async_submit_ctl *submit) in async_sum_product() 22 struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ, in async_sum_product() 39 if (submit->flags & ASYNC_TX_FENCE) in async_sum_product() 58 async_tx_submit(chan, tx, submit); in async_sum_product() 70 async_tx_quiesce(&submit->depend_tx); in async_sum_product() 89 struct async_submit_ctl *submit) in async_mult() 91 struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ, in async_mult() 107 if (submit->flags & ASYNC_TX_FENCE) in async_mult() 128 async_tx_submit(chan, tx, submit); in async_mult() 141 async_tx_quiesce(&submit in async_mult() 18 async_sum_product(struct page *dest, unsigned int d_off, struct page **srcs, unsigned int *src_offs, unsigned char *coef, size_t len, struct async_submit_ctl *submit) async_sum_product() argument 87 async_mult(struct page *dest, unsigned int d_off, struct page *src, unsigned int s_off, u8 coef, size_t len, struct async_submit_ctl *submit) async_mult() argument 153 __2data_recov_4(int disks, size_t bytes, int faila, int failb, struct page **blocks, unsigned int *offs, struct async_submit_ctl *submit) __2data_recov_4() argument 203 __2data_recov_5(int disks, size_t bytes, int faila, int failb, struct page **blocks, unsigned int *offs, struct async_submit_ctl *submit) __2data_recov_5() argument 294 __2data_recov_n(int disks, size_t bytes, int faila, int failb, struct page **blocks, unsigned int *offs, struct async_submit_ctl *submit) __2data_recov_n() argument 393 async_raid6_2data_recov(int disks, size_t bytes, int faila, int failb, struct page **blocks, unsigned int *offs, struct async_submit_ctl *submit) async_raid6_2data_recov() argument 471 async_raid6_datap_recov(int disks, size_t bytes, int faila, struct page **blocks, unsigned int *offs, struct async_submit_ctl *submit) async_raid6_datap_recov() argument [all...] |
H A D | async_pq.c | 39 struct async_submit_ctl *submit) in do_async_gen_syndrome() 43 enum async_tx_flags flags_orig = submit->flags; in do_async_gen_syndrome() 44 dma_async_tx_callback cb_fn_orig = submit->cb_fn; in do_async_gen_syndrome() 45 dma_async_tx_callback cb_param_orig = submit->cb_param; in do_async_gen_syndrome() 52 submit->flags = flags_orig; in do_async_gen_syndrome() 59 submit->flags &= ~ASYNC_TX_ACK; in do_async_gen_syndrome() 60 submit->flags |= ASYNC_TX_FENCE; in do_async_gen_syndrome() 61 submit->cb_fn = NULL; in do_async_gen_syndrome() 62 submit->cb_param = NULL; in do_async_gen_syndrome() 64 submit in do_async_gen_syndrome() 35 do_async_gen_syndrome(struct dma_chan *chan, const unsigned char *scfs, int disks, struct dmaengine_unmap_data *unmap, enum dma_ctrl_flags dma_flags, struct async_submit_ctl *submit) do_async_gen_syndrome() argument 107 do_sync_gen_syndrome(struct page **blocks, unsigned int *offsets, int disks, size_t len, struct async_submit_ctl *submit) do_sync_gen_syndrome() argument 177 async_gen_syndrome(struct page **blocks, unsigned int *offsets, int disks, size_t len, struct async_submit_ctl *submit) async_gen_syndrome() argument 272 pq_val_chan(struct async_submit_ctl *submit, struct page **blocks, int disks, size_t len) pq_val_chan() argument 298 async_syndrome_val(struct page **blocks, unsigned int *offsets, int disks, size_t len, enum sum_check_flags *pqres, struct page *spare, unsigned int s_off, struct async_submit_ctl *submit) async_syndrome_val() argument [all...] |
/kernel/linux/linux-5.10/include/linux/ |
H A D | async_tx.h | 103 __async_tx_find_channel(struct async_submit_ctl *submit, 118 async_tx_find_channel(struct async_submit_ctl *submit, in async_tx_find_channel() argument 133 async_tx_sync_epilog(struct async_submit_ctl *submit) in async_tx_sync_epilog() argument 135 if (submit->cb_fn) in async_tx_sync_epilog() 136 submit->cb_fn(submit->cb_param); in async_tx_sync_epilog() 159 struct async_submit_ctl *submit); 163 int src_cnt, size_t len, struct async_submit_ctl *submit); 168 int src_cnt, size_t len, struct async_submit_ctl *submit); 173 struct async_submit_ctl *submit); [all...] |
/kernel/linux/linux-6.6/include/linux/ |
H A D | async_tx.h | 103 __async_tx_find_channel(struct async_submit_ctl *submit, 118 async_tx_find_channel(struct async_submit_ctl *submit, in async_tx_find_channel() argument 133 async_tx_sync_epilog(struct async_submit_ctl *submit) in async_tx_sync_epilog() argument 135 if (submit->cb_fn) in async_tx_sync_epilog() 136 submit->cb_fn(submit->cb_param); in async_tx_sync_epilog() 159 struct async_submit_ctl *submit); 163 int src_cnt, size_t len, struct async_submit_ctl *submit); 168 int src_cnt, size_t len, struct async_submit_ctl *submit); 173 struct async_submit_ctl *submit); [all...] |
/kernel/linux/linux-5.10/drivers/dma/ti/ |
H A D | cppi41.c | 119 u16 submit; member 157 [ 0] = { .submit = 32, .complete = 93}, 158 [ 1] = { .submit = 34, .complete = 94}, 159 [ 2] = { .submit = 36, .complete = 95}, 160 [ 3] = { .submit = 38, .complete = 96}, 161 [ 4] = { .submit = 40, .complete = 97}, 162 [ 5] = { .submit = 42, .complete = 98}, 163 [ 6] = { .submit = 44, .complete = 99}, 164 [ 7] = { .submit = 46, .complete = 100}, 165 [ 8] = { .submit [all...] |
/kernel/linux/linux-6.6/drivers/dma/ti/ |
H A D | cppi41.c | 119 u16 submit; member 157 [ 0] = { .submit = 32, .complete = 93}, 158 [ 1] = { .submit = 34, .complete = 94}, 159 [ 2] = { .submit = 36, .complete = 95}, 160 [ 3] = { .submit = 38, .complete = 96}, 161 [ 4] = { .submit = 40, .complete = 97}, 162 [ 5] = { .submit = 42, .complete = 98}, 163 [ 6] = { .submit = 44, .complete = 99}, 164 [ 7] = { .submit = 46, .complete = 100}, 165 [ 8] = { .submit [all...] |
/kernel/linux/linux-5.10/drivers/gpu/drm/lima/ |
H A D | lima_gem.c | 273 static int lima_gem_add_deps(struct drm_file *file, struct lima_submit *submit) in lima_gem_add_deps() argument 277 for (i = 0; i < ARRAY_SIZE(submit->in_sync); i++) { in lima_gem_add_deps() 280 if (!submit->in_sync[i]) in lima_gem_add_deps() 283 err = drm_syncobj_find_fence(file, submit->in_sync[i], in lima_gem_add_deps() 288 err = drm_gem_fence_array_add(&submit->task->deps, fence); in lima_gem_add_deps() 298 int lima_gem_submit(struct drm_file *file, struct lima_submit *submit) in lima_gem_submit() argument 306 struct lima_bo **bos = submit->lbos; in lima_gem_submit() 308 if (submit->out_sync) { in lima_gem_submit() 309 out_sync = drm_syncobj_find(file, submit->out_sync); in lima_gem_submit() 314 for (i = 0; i < submit in lima_gem_submit() [all...] |
/kernel/linux/linux-6.6/drivers/gpu/drm/lima/ |
H A D | lima_gem.c | 280 static int lima_gem_add_deps(struct drm_file *file, struct lima_submit *submit) in lima_gem_add_deps() argument 284 for (i = 0; i < ARRAY_SIZE(submit->in_sync); i++) { in lima_gem_add_deps() 285 if (!submit->in_sync[i]) in lima_gem_add_deps() 288 err = drm_sched_job_add_syncobj_dependency(&submit->task->base, file, in lima_gem_add_deps() 289 submit->in_sync[i], 0); in lima_gem_add_deps() 297 int lima_gem_submit(struct drm_file *file, struct lima_submit *submit) in lima_gem_submit() argument 305 struct lima_bo **bos = submit->lbos; in lima_gem_submit() 307 if (submit->out_sync) { in lima_gem_submit() 308 out_sync = drm_syncobj_find(file, submit->out_sync); in lima_gem_submit() 313 for (i = 0; i < submit in lima_gem_submit() [all...] |