Home
last modified time | relevance | path

Searched refs:job (Results 1 - 25 of 265) sorted by relevance

1234567891011

/kernel/linux/linux-6.6/drivers/gpu/drm/amd/amdgpu/
H A Damdgpu_job.c37 struct amdgpu_job *job = to_amdgpu_job(s_job); in amdgpu_job_timedout() local
47 /* Effectively the job is aborted as the device is gone */ in amdgpu_job_timedout()
55 amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) { in amdgpu_job_timedout()
61 amdgpu_vm_get_task_info(ring->adev, job->pasid, &ti); in amdgpu_job_timedout()
63 job->base.sched->name, atomic_read(&ring->fence_drv.last_seq), in amdgpu_job_timedout()
78 r = amdgpu_device_gpu_recover(ring->adev, job, &reset_context); in amdgpu_job_timedout()
95 unsigned int num_ibs, struct amdgpu_job **job) in amdgpu_job_alloc()
100 *job = kzalloc(struct_size(*job, ibs, num_ibs), GFP_KERNEL); in amdgpu_job_alloc()
101 if (!*job) in amdgpu_job_alloc()
93 amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct drm_sched_entity *entity, void *owner, unsigned int num_ibs, struct amdgpu_job **job) amdgpu_job_alloc() argument
121 amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, struct drm_sched_entity *entity, void *owner, size_t size, enum amdgpu_ib_pool_type pool_type, struct amdgpu_job **job) amdgpu_job_alloc_with_ib() argument
143 amdgpu_job_set_resources(struct amdgpu_job *job, struct amdgpu_bo *gds, struct amdgpu_bo *gws, struct amdgpu_bo *oa) amdgpu_job_set_resources() argument
160 amdgpu_job_free_resources(struct amdgpu_job *job) amdgpu_job_free_resources() argument
180 struct amdgpu_job *job = to_amdgpu_job(s_job); amdgpu_job_free_cb() local
193 amdgpu_job_set_gang_leader(struct amdgpu_job *job, struct amdgpu_job *leader) amdgpu_job_set_gang_leader() argument
209 amdgpu_job_free(struct amdgpu_job *job) amdgpu_job_free() argument
225 amdgpu_job_submit(struct amdgpu_job *job) amdgpu_job_submit() argument
237 amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring, struct dma_fence **fence) amdgpu_job_submit_direct() argument
257 struct amdgpu_job *job = to_amdgpu_job(sched_job); amdgpu_job_prepare_job() local
289 struct amdgpu_job *job; amdgpu_job_run() local
[all...]
H A Damdgpu_ib.c110 * @job: job to schedule
127 struct amdgpu_ib *ibs, struct amdgpu_job *job, in amdgpu_ib_schedule()
141 int vmid = AMDGPU_JOB_GET_VMID(job); in amdgpu_ib_schedule()
150 /* ring tests don't use a job */ in amdgpu_ib_schedule()
151 if (job) { in amdgpu_ib_schedule()
152 vm = job->vm; in amdgpu_ib_schedule()
153 fence_ctx = job->base.s_fence ? in amdgpu_ib_schedule()
154 job->base.s_fence->scheduled.context : 0; in amdgpu_ib_schedule()
155 shadow_va = job in amdgpu_ib_schedule()
126 amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs, struct amdgpu_ib *ibs, struct amdgpu_job *job, struct dma_fence **f) amdgpu_ib_schedule() argument
[all...]
H A Damdgpu_job.h42 #define AMDGPU_JOB_GET_VMID(job) ((job) ? (job)->vmid : 0)
76 /* job_run_counter >= 1 means a resubmit job */
83 static inline struct amdgpu_ring *amdgpu_job_ring(struct amdgpu_job *job) in amdgpu_job_ring() argument
85 return to_amdgpu_ring(job->base.entity->rq->sched); in amdgpu_job_ring()
90 unsigned int num_ibs, struct amdgpu_job **job);
94 struct amdgpu_job **job);
95 void amdgpu_job_set_resources(struct amdgpu_job *job, struct amdgpu_bo *gds,
97 void amdgpu_job_free_resources(struct amdgpu_job *job);
[all...]
/kernel/linux/linux-6.6/drivers/gpu/drm/nouveau/
H A Dnouveau_sched.c18 * other jobs competing for the hardware. Otherwise we might end up with job
20 * want jobs to time out because of system load, but because of the job being
30 nouveau_job_init(struct nouveau_job *job, in nouveau_job_init() argument
36 job->file_priv = args->file_priv; in nouveau_job_init()
37 job->cli = nouveau_cli(args->file_priv); in nouveau_job_init()
38 job->entity = entity; in nouveau_job_init()
40 job->sync = args->sync; in nouveau_job_init()
41 job->resv_usage = args->resv_usage; in nouveau_job_init()
43 job->ops = args->ops; in nouveau_job_init()
45 job in nouveau_job_init()
112 nouveau_job_free(struct nouveau_job *job) nouveau_job_free() argument
120 nouveau_job_fini(struct nouveau_job *job) nouveau_job_fini() argument
128 sync_find_fence(struct nouveau_job *job, struct drm_nouveau_sync *sync, struct dma_fence **fence) sync_find_fence() argument
153 nouveau_job_add_deps(struct nouveau_job *job) nouveau_job_add_deps() argument
178 nouveau_job_fence_attach_cleanup(struct nouveau_job *job) nouveau_job_fence_attach_cleanup() argument
195 nouveau_job_fence_attach_prepare(struct nouveau_job *job) nouveau_job_fence_attach_prepare() argument
237 nouveau_job_fence_attach(struct nouveau_job *job) nouveau_job_fence_attach() argument
262 nouveau_job_submit(struct nouveau_job *job) nouveau_job_submit() argument
354 nouveau_job_run(struct nouveau_job *job) nouveau_job_run() argument
370 struct nouveau_job *job = to_nouveau_job(sched_job); nouveau_sched_run_job() local
379 struct nouveau_job *job = to_nouveau_job(sched_job); nouveau_sched_timedout_job() local
397 struct nouveau_job *job = to_nouveau_job(sched_job); nouveau_sched_free_job() local
[all...]
H A Dnouveau_exec.c67 * DRM_NOUVEAU_EXEC ioctl is called to submit an exec job.
69 * A VM_BIND job can be executed either synchronously or asynchronously. If
70 * exectued asynchronously, userspace may provide a list of syncobjs this job
72 * VM_BIND job finished execution. If executed synchronously the ioctl will
73 * block until the bind job is finished. For synchronous jobs the kernel will
82 * Since VM_BIND jobs update the GPU's VA space on job submit, EXEC jobs do have
89 nouveau_exec_job_submit(struct nouveau_job *job) in nouveau_exec_job_submit() argument
91 struct nouveau_exec_job *exec_job = to_nouveau_exec_job(job); in nouveau_exec_job_submit()
92 struct nouveau_cli *cli = job->cli; in nouveau_exec_job_submit()
94 struct drm_exec *exec = &job in nouveau_exec_job_submit()
141 nouveau_exec_job_armed_submit(struct nouveau_job *job) nouveau_exec_job_armed_submit() argument
154 nouveau_exec_job_run(struct nouveau_job *job) nouveau_exec_job_run() argument
191 nouveau_exec_job_free(struct nouveau_job *job) nouveau_exec_job_free() argument
203 nouveau_exec_job_timeout(struct nouveau_job *job) nouveau_exec_job_timeout() argument
231 struct nouveau_exec_job *job; nouveau_exec_job_init() local
294 struct nouveau_exec_job *job; nouveau_exec() local
[all...]
/kernel/linux/linux-6.6/drivers/gpu/host1x/
H A Djob.c21 #include "job.h"
30 struct host1x_job *job = NULL; in host1x_job_alloc() local
51 mem = job = kzalloc(total, GFP_KERNEL); in host1x_job_alloc()
52 if (!job) in host1x_job_alloc()
55 job->enable_firewall = enable_firewall; in host1x_job_alloc()
57 kref_init(&job->ref); in host1x_job_alloc()
58 job->channel = ch; in host1x_job_alloc()
62 job->relocs = num_relocs ? mem : NULL; in host1x_job_alloc()
64 job->unpins = num_unpins ? mem : NULL; in host1x_job_alloc()
66 job in host1x_job_alloc()
77 host1x_job_get(struct host1x_job *job) host1x_job_get() argument
86 struct host1x_job *job = container_of(ref, struct host1x_job, ref); job_free() local
107 host1x_job_put(struct host1x_job *job) host1x_job_put() argument
113 host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *bo, unsigned int words, unsigned int offset) host1x_job_add_gather() argument
126 host1x_job_add_wait(struct host1x_job *job, u32 id, u32 thresh, bool relative, u32 next_class) host1x_job_add_wait() argument
141 pin_job(struct host1x *host, struct host1x_job *job) pin_job() argument
279 do_relocs(struct host1x_job *job, struct host1x_job_gather *g) do_relocs() argument
339 struct host1x_job *job; global() member
524 copy_gathers(struct device *host, struct host1x_job *job, struct device *dev) copy_gathers() argument
598 host1x_job_pin(struct host1x_job *job, struct device *dev) host1x_job_pin() argument
653 host1x_job_unpin(struct host1x_job *job) host1x_job_unpin() argument
682 host1x_job_dump(struct device *dev, struct host1x_job *job) host1x_job_dump() argument
[all...]
H A Dcdma.c23 #include "job.h"
270 * Start timer that tracks the time spent by the job.
274 struct host1x_job *job) in cdma_start_timer_locked()
281 cdma->timeout.client = job->client; in cdma_start_timer_locked()
282 cdma->timeout.syncpt = job->syncpt; in cdma_start_timer_locked()
283 cdma->timeout.syncpt_val = job->syncpt_end; in cdma_start_timer_locked()
287 msecs_to_jiffies(job->timeout)); in cdma_start_timer_locked()
313 struct host1x_job *job, *n; in update_cdma_locked() local
319 list_for_each_entry_safe(job, n, &cdma->sync_queue, list) { in update_cdma_locked()
320 struct host1x_syncpt *sp = job in update_cdma_locked()
273 cdma_start_timer_locked(struct host1x_cdma *cdma, struct host1x_job *job) cdma_start_timer_locked() argument
368 struct host1x_job *job, *next_job = NULL; host1x_cdma_update_sync_queue() local
548 host1x_cdma_begin(struct host1x_cdma *cdma, struct host1x_job *job) host1x_cdma_begin() argument
666 host1x_cdma_end(struct host1x_cdma *cdma, struct host1x_job *job) host1x_cdma_end() argument
[all...]
/kernel/linux/linux-5.10/drivers/gpu/drm/amd/amdgpu/
H A Damdgpu_job.c34 struct amdgpu_job *job = to_amdgpu_job(s_job); in amdgpu_job_timedout() local
41 amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) { in amdgpu_job_timedout()
47 amdgpu_vm_get_task_info(ring->adev, job->pasid, &ti); in amdgpu_job_timedout()
49 job->base.sched->name, atomic_read(&ring->fence_drv.last_seq), in amdgpu_job_timedout()
55 amdgpu_device_gpu_recover(ring->adev, job); in amdgpu_job_timedout()
64 struct amdgpu_job **job, struct amdgpu_vm *vm) in amdgpu_job_alloc()
73 *job = kzalloc(size, GFP_KERNEL); in amdgpu_job_alloc()
74 if (!*job) in amdgpu_job_alloc()
81 (*job)->base.sched = &adev->rings[0]->sched; in amdgpu_job_alloc()
82 (*job) in amdgpu_job_alloc()
63 amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, struct amdgpu_job **job, struct amdgpu_vm *vm) amdgpu_job_alloc() argument
94 amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size, enum amdgpu_ib_pool_type pool_type, struct amdgpu_job **job) amdgpu_job_alloc_with_ib() argument
111 amdgpu_job_free_resources(struct amdgpu_job *job) amdgpu_job_free_resources() argument
126 struct amdgpu_job *job = to_amdgpu_job(s_job); amdgpu_job_free_cb() local
136 amdgpu_job_free(struct amdgpu_job *job) amdgpu_job_free() argument
146 amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity, void *owner, struct dma_fence **f) amdgpu_job_submit() argument
165 amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring, struct dma_fence **fence) amdgpu_job_submit_direct() argument
184 struct amdgpu_job *job = to_amdgpu_job(sched_job); amdgpu_job_dependency() local
213 struct amdgpu_job *job; amdgpu_job_run() local
[all...]
/kernel/linux/linux-5.10/drivers/gpu/host1x/
H A Djob.c21 #include "job.h"
29 struct host1x_job *job = NULL; in host1x_job_alloc() local
47 mem = job = kzalloc(total, GFP_KERNEL); in host1x_job_alloc()
48 if (!job) in host1x_job_alloc()
51 kref_init(&job->ref); in host1x_job_alloc()
52 job->channel = ch; in host1x_job_alloc()
56 job->relocs = num_relocs ? mem : NULL; in host1x_job_alloc()
58 job->unpins = num_unpins ? mem : NULL; in host1x_job_alloc()
60 job->gathers = num_cmdbufs ? mem : NULL; in host1x_job_alloc()
62 job in host1x_job_alloc()
71 host1x_job_get(struct host1x_job *job) host1x_job_get() argument
80 struct host1x_job *job = container_of(ref, struct host1x_job, ref); job_free() local
85 host1x_job_put(struct host1x_job *job) host1x_job_put() argument
91 host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *bo, unsigned int words, unsigned int offset) host1x_job_add_gather() argument
104 pin_job(struct host1x *host, struct host1x_job *job) pin_job() argument
279 do_relocs(struct host1x_job *job, struct host1x_job_gather *g) do_relocs() argument
339 struct host1x_job *job; global() member
524 copy_gathers(struct device *host, struct host1x_job *job, struct device *dev) copy_gathers() argument
589 host1x_job_pin(struct host1x_job *job, struct device *dev) host1x_job_pin() argument
639 host1x_job_unpin(struct host1x_job *job) host1x_job_unpin() argument
675 host1x_job_dump(struct device *dev, struct host1x_job *job) host1x_job_dump() argument
[all...]
H A Dcdma.c23 #include "job.h"
270 * Start timer that tracks the time spent by the job.
274 struct host1x_job *job) in cdma_start_timer_locked()
283 cdma->timeout.client = job->client; in cdma_start_timer_locked()
284 cdma->timeout.syncpt = host1x_syncpt_get(host, job->syncpt_id); in cdma_start_timer_locked()
285 cdma->timeout.syncpt_val = job->syncpt_end; in cdma_start_timer_locked()
289 msecs_to_jiffies(job->timeout)); in cdma_start_timer_locked()
316 struct host1x_job *job, *n; in update_cdma_locked() local
326 list_for_each_entry_safe(job, n, &cdma->sync_queue, list) { in update_cdma_locked()
328 host1x_syncpt_get(host1x, job in update_cdma_locked()
273 cdma_start_timer_locked(struct host1x_cdma *cdma, struct host1x_job *job) cdma_start_timer_locked() argument
375 struct host1x_job *job, *next_job = NULL; host1x_cdma_update_sync_queue() local
491 host1x_cdma_begin(struct host1x_cdma *cdma, struct host1x_job *job) host1x_cdma_begin() argument
606 host1x_cdma_end(struct host1x_cdma *cdma, struct host1x_job *job) host1x_cdma_end() argument
[all...]
/kernel/linux/linux-6.6/drivers/gpu/host1x/hw/
H A Dchannel_hw.c17 #include "../job.h"
50 static void submit_wait(struct host1x_job *job, u32 id, u32 threshold, in submit_wait() argument
53 struct host1x_cdma *cdma = &job->channel->cdma; in submit_wait()
63 if (job->memory_context) in submit_wait()
64 stream_id = job->memory_context->stream_id; in submit_wait()
66 stream_id = job->engine_fallback_streamid; in submit_wait()
79 host1x_cdma_push_wide(&job->channel->cdma, in submit_wait()
80 host1x_opcode_setclass(job->class, 0, 0), in submit_wait()
82 host1x_opcode_setstreamid(job->engine_streamid_offset / 4), in submit_wait()
113 static void submit_gathers(struct host1x_job *job, u3 argument
163 synchronize_syncpt_base(struct host1x_job *job) synchronize_syncpt_base() argument
212 channel_program_cdma(struct host1x_job *job) channel_program_cdma() argument
281 struct host1x_job *job = container_of(cb, struct host1x_job, fence_cb); job_complete_callback() local
287 channel_submit(struct host1x_job *job) channel_submit() argument
[all...]
/kernel/linux/linux-5.10/drivers/md/
H A Ddm-kcopyd.c40 MODULE_PARM_DESC(kcopyd_subjob_size_kb, "Sub-job size for dm-kcopyd clients");
347 * Error state of the job.
367 * Set this to ensure you are notified when the job has
374 * These fields are only used if the job has been split
408 * Functions to push and pop a job onto the head of a given job
414 struct kcopyd_job *job; in pop_io_job() local
420 list_for_each_entry(job, jobs, list) { in pop_io_job()
421 if (job->rw == READ || !test_bit(DM_KCOPYD_WRITE_SEQ, &job in pop_io_job()
439 struct kcopyd_job *job = NULL; pop() local
457 push(struct list_head *jobs, struct kcopyd_job *job) push() argument
468 push_head(struct list_head *jobs, struct kcopyd_job *job) push_head() argument
487 run_complete_job(struct kcopyd_job *job) run_complete_job() argument
517 struct kcopyd_job *job = (struct kcopyd_job *) context; complete_io() local
550 run_io_job(struct kcopyd_job *job) run_io_job() argument
584 run_pages_job(struct kcopyd_job *job) run_pages_job() argument
610 struct kcopyd_job *job; process_jobs() local
676 dispatch_job(struct kcopyd_job *job) dispatch_job() argument
696 struct kcopyd_job *job = sub_job->master_job; segment_complete() local
778 struct kcopyd_job *job; dm_kcopyd_copy() local
865 struct kcopyd_job *job; dm_kcopyd_prepare_callback() local
883 struct kcopyd_job *job = j; dm_kcopyd_do_callback() local
[all...]
/kernel/linux/linux-6.6/drivers/md/
H A Ddm-kcopyd.c41 MODULE_PARM_DESC(kcopyd_subjob_size_kb, "Sub-job size for dm-kcopyd clients");
353 * Error state of the job.
373 * Set this to ensure you are notified when the job has
380 * These fields are only used if the job has been split
414 * Functions to push and pop a job onto the head of a given job
420 struct kcopyd_job *job; in pop_io_job() local
426 list_for_each_entry(job, jobs, list) { in pop_io_job()
427 if (job->op == REQ_OP_READ || in pop_io_job()
428 !(job in pop_io_job()
446 struct kcopyd_job *job = NULL; pop() local
463 push(struct list_head *jobs, struct kcopyd_job *job) push() argument
474 push_head(struct list_head *jobs, struct kcopyd_job *job) push_head() argument
492 run_complete_job(struct kcopyd_job *job) run_complete_job() argument
522 struct kcopyd_job *job = context; complete_io() local
555 run_io_job(struct kcopyd_job *job) run_io_job() argument
588 run_pages_job(struct kcopyd_job *job) run_pages_job() argument
614 struct kcopyd_job *job; process_jobs() local
679 dispatch_job(struct kcopyd_job *job) dispatch_job() argument
700 struct kcopyd_job *job = sub_job->master_job; segment_complete() local
782 struct kcopyd_job *job; dm_kcopyd_copy() local
869 struct kcopyd_job *job; dm_kcopyd_prepare_callback() local
887 struct kcopyd_job *job = j; dm_kcopyd_do_callback() local
[all...]
/kernel/linux/linux-6.6/drivers/gpu/drm/v3d/
H A Dv3d_sched.c10 * scheduler will round-robin between clients to submit the next job.
13 * jobs when bulk background jobs are queued up, we submit a new job
60 struct v3d_job *job = to_v3d_job(sched_job); in v3d_sched_job_free() local
62 v3d_job_cleanup(job); in v3d_sched_job_free()
66 v3d_switch_perfmon(struct v3d_dev *v3d, struct v3d_job *job) in v3d_switch_perfmon() argument
68 if (job->perfmon != v3d->active_perfmon) in v3d_switch_perfmon()
71 if (job->perfmon && v3d->active_perfmon != job->perfmon) in v3d_switch_perfmon()
72 v3d_perfmon_start(v3d, job->perfmon); in v3d_switch_perfmon()
77 struct v3d_bin_job *job in v3d_bin_job_run() local
132 struct v3d_render_job *job = to_render_job(sched_job); v3d_render_job_run() local
177 struct v3d_tfu_job *job = to_tfu_job(sched_job); v3d_tfu_job_run() local
214 struct v3d_csd_job *job = to_csd_job(sched_job); v3d_csd_job_run() local
247 struct v3d_job *job = to_v3d_job(sched_job); v3d_cache_clean_job_run() local
294 struct v3d_job *job = to_v3d_job(sched_job); v3d_cl_job_timedout() local
311 struct v3d_bin_job *job = to_bin_job(sched_job); v3d_bin_job_timedout() local
320 struct v3d_render_job *job = to_render_job(sched_job); v3d_render_job_timedout() local
329 struct v3d_job *job = to_v3d_job(sched_job); v3d_generic_job_timedout() local
337 struct v3d_csd_job *job = to_csd_job(sched_job); v3d_csd_job_timedout() local
[all...]
H A Dv3d_gem.c169 * need to wait for completion before dispatching the job -- in v3d_flush_l2t()
173 * synchronously clean after a job. in v3d_flush_l2t()
186 * signaling job completion. So, we synchronously wait before
252 v3d_lock_bo_reservations(struct v3d_job *job, in v3d_lock_bo_reservations() argument
257 ret = drm_gem_lock_reservations(job->bo, job->bo_count, acquire_ctx); in v3d_lock_bo_reservations()
261 for (i = 0; i < job->bo_count; i++) { in v3d_lock_bo_reservations()
262 ret = dma_resv_reserve_fences(job->bo[i]->resv, 1); in v3d_lock_bo_reservations()
266 ret = drm_sched_job_add_implicit_dependencies(&job->base, in v3d_lock_bo_reservations()
267 job in v3d_lock_bo_reservations()
296 v3d_lookup_bos(struct drm_device *dev, struct drm_file *file_priv, struct v3d_job *job, u64 bo_handles, u32 bo_count) v3d_lookup_bos() argument
320 struct v3d_job *job = container_of(ref, struct v3d_job, refcount); v3d_job_free() local
341 struct v3d_render_job *job = container_of(ref, struct v3d_render_job, v3d_render_job_free() local
352 v3d_job_cleanup(struct v3d_job *job) v3d_job_cleanup() argument
361 v3d_job_put(struct v3d_job *job) v3d_job_put() argument
405 struct v3d_job *job; v3d_job_init() local
465 v3d_push_job(struct v3d_job *job) v3d_push_job() argument
478 v3d_attach_fences_and_unlock_reservation(struct drm_file *file_priv, struct v3d_job *job, struct ww_acquire_ctx *acquire_ctx, u32 out_sync, struct v3d_submit_ext *se, struct dma_fence *done_fence) v3d_attach_fences_and_unlock_reservation() argument
821 struct v3d_tfu_job *job = NULL; v3d_submit_tfu_ioctl() local
915 struct v3d_csd_job *job = NULL; v3d_submit_csd_ioctl() local
[all...]
/kernel/linux/linux-5.10/drivers/gpu/drm/v3d/
H A Dv3d_sched.c10 * scheduler will round-robin between clients to submit the next job.
13 * jobs when bulk background jobs are queued up, we submit a new job
60 struct v3d_job *job = to_v3d_job(sched_job); in v3d_job_free() local
63 v3d_job_put(job); in v3d_job_free()
67 * Returns the fences that the job depends on, one by one.
76 struct v3d_job *job = to_v3d_job(sched_job); in v3d_job_dependency() local
82 if (!xa_empty(&job->deps)) in v3d_job_dependency()
83 return xa_erase(&job->deps, job->last_dep++); in v3d_job_dependency()
90 struct v3d_bin_job *job in v3d_bin_job_run() local
143 struct v3d_render_job *job = to_render_job(sched_job); v3d_render_job_run() local
186 struct v3d_tfu_job *job = to_tfu_job(sched_job); v3d_tfu_job_run() local
223 struct v3d_csd_job *job = to_csd_job(sched_job); v3d_csd_job_run() local
254 struct v3d_job *job = to_v3d_job(sched_job); v3d_cache_clean_job_run() local
299 struct v3d_job *job = to_v3d_job(sched_job); v3d_cl_job_timedout() local
316 struct v3d_bin_job *job = to_bin_job(sched_job); v3d_bin_job_timedout() local
325 struct v3d_render_job *job = to_render_job(sched_job); v3d_render_job_timedout() local
334 struct v3d_job *job = to_v3d_job(sched_job); v3d_generic_job_timedout() local
342 struct v3d_csd_job *job = to_csd_job(sched_job); v3d_csd_job_timedout() local
[all...]
H A Dv3d_gem.c167 * need to wait for completion before dispatching the job -- in v3d_flush_l2t()
171 * synchronously clean after a job. in v3d_flush_l2t()
184 * signaling job completion. So, we synchronously wait before
250 v3d_lock_bo_reservations(struct v3d_job *job, in v3d_lock_bo_reservations() argument
255 ret = drm_gem_lock_reservations(job->bo, job->bo_count, acquire_ctx); in v3d_lock_bo_reservations()
259 for (i = 0; i < job->bo_count; i++) { in v3d_lock_bo_reservations()
260 ret = drm_gem_fence_array_add_implicit(&job->deps, in v3d_lock_bo_reservations()
261 job->bo[i], true); in v3d_lock_bo_reservations()
263 drm_gem_unlock_reservations(job in v3d_lock_bo_reservations()
287 v3d_lookup_bos(struct drm_device *dev, struct drm_file *file_priv, struct v3d_job *job, u64 bo_handles, u32 bo_count) v3d_lookup_bos() argument
354 struct v3d_job *job = container_of(ref, struct v3d_job, refcount); v3d_job_free() local
382 struct v3d_render_job *job = container_of(ref, struct v3d_render_job, v3d_render_job_free() local
393 v3d_job_put(struct v3d_job *job) v3d_job_put() argument
432 v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv, struct v3d_job *job, void (*free)(struct kref *ref), u32 in_sync) v3d_job_init() argument
466 v3d_push_job(struct v3d_file_priv *v3d_priv, struct v3d_job *job, enum v3d_queue queue) v3d_push_job() argument
487 v3d_attach_fences_and_unlock_reservation(struct drm_file *file_priv, struct v3d_job *job, struct ww_acquire_ctx *acquire_ctx, u32 out_sync, struct dma_fence *done_fence) v3d_attach_fences_and_unlock_reservation() argument
685 struct v3d_tfu_job *job; v3d_submit_tfu_ioctl() local
780 struct v3d_csd_job *job; v3d_submit_csd_ioctl() local
[all...]
/kernel/linux/linux-5.10/block/
H A Dbsg-lib.c39 struct bsg_job *job = blk_mq_rq_to_pdu(rq); in bsg_transport_fill_hdr() local
42 job->request_len = hdr->request_len; in bsg_transport_fill_hdr()
43 job->request = memdup_user(uptr64(hdr->request), hdr->request_len); in bsg_transport_fill_hdr()
44 if (IS_ERR(job->request)) in bsg_transport_fill_hdr()
45 return PTR_ERR(job->request); in bsg_transport_fill_hdr()
48 job->bidi_rq = blk_get_request(rq->q, REQ_OP_SCSI_IN, 0); in bsg_transport_fill_hdr()
49 if (IS_ERR(job->bidi_rq)) { in bsg_transport_fill_hdr()
50 ret = PTR_ERR(job->bidi_rq); in bsg_transport_fill_hdr()
54 ret = blk_rq_map_user(rq->q, job->bidi_rq, NULL, in bsg_transport_fill_hdr()
60 job in bsg_transport_fill_hdr()
78 struct bsg_job *job = blk_mq_rq_to_pdu(rq); bsg_transport_complete_rq() local
127 struct bsg_job *job = blk_mq_rq_to_pdu(rq); bsg_transport_free_rq() local
150 struct bsg_job *job = container_of(kref, struct bsg_job, kref); bsg_teardown_job() local
161 bsg_job_put(struct bsg_job *job) bsg_job_put() argument
167 bsg_job_get(struct bsg_job *job) bsg_job_get() argument
181 bsg_job_done(struct bsg_job *job, int result, unsigned int reply_payload_rcv_len) bsg_job_done() argument
199 struct bsg_job *job = blk_mq_rq_to_pdu(rq); bsg_complete() local
226 struct bsg_job *job = blk_mq_rq_to_pdu(req); bsg_prepare_job() local
296 struct bsg_job *job = blk_mq_rq_to_pdu(req); bsg_init_rq() local
307 struct bsg_job *job = blk_mq_rq_to_pdu(req); bsg_initialize_rq() local
319 struct bsg_job *job = blk_mq_rq_to_pdu(req); bsg_exit_rq() local
[all...]
/kernel/linux/linux-6.6/drivers/accel/ivpu/
H A Divpu_job.c133 * Mark the doorbell as unregistered and reset job queue pointers.
135 * and FW looses job queue state. The next time job queue is used it
180 static int ivpu_cmdq_push_job(struct ivpu_cmdq *cmdq, struct ivpu_job *job) in ivpu_cmdq_push_job() argument
182 struct ivpu_device *vdev = job->vdev; in ivpu_cmdq_push_job()
188 /* Check if there is space left in job queue */ in ivpu_cmdq_push_job()
191 job->file_priv->ctx.id, job->engine_idx, cmdq->db_id, header->head, tail); in ivpu_cmdq_push_job()
195 entry = &cmdq->jobq->job[tail]; in ivpu_cmdq_push_job()
196 entry->batch_buf_addr = job in ivpu_cmdq_push_job()
249 job_get(struct ivpu_job *job, struct ivpu_job **link) job_get() argument
261 struct ivpu_job *job = container_of(ref, struct ivpu_job, ref); job_release() local
279 job_put(struct ivpu_job *job) job_put() argument
291 struct ivpu_job *job; ivpu_create_job() local
328 struct ivpu_job *job; ivpu_job_done() local
362 struct ivpu_job *job; ivpu_jobs_abort_all() local
369 ivpu_direct_job_submission(struct ivpu_job *job) ivpu_direct_job_submission() argument
426 ivpu_job_prepare_bos_for_submit(struct drm_file *file, struct ivpu_job *job, u32 *buf_handles, u32 buf_count, u32 commands_offset) ivpu_job_prepare_bos_for_submit() argument
496 struct ivpu_job *job; ivpu_submit_ioctl() local
[all...]
/kernel/linux/linux-6.6/block/
H A Dbsg-lib.c31 struct bsg_job *job; in bsg_transport_sg_io_fn() local
49 job = blk_mq_rq_to_pdu(rq); in bsg_transport_sg_io_fn()
50 reply = job->reply; in bsg_transport_sg_io_fn()
51 memset(job, 0, sizeof(*job)); in bsg_transport_sg_io_fn()
52 job->reply = reply; in bsg_transport_sg_io_fn()
53 job->reply_len = SCSI_SENSE_BUFFERSIZE; in bsg_transport_sg_io_fn()
54 job->dd_data = job + 1; in bsg_transport_sg_io_fn()
56 job in bsg_transport_sg_io_fn()
158 struct bsg_job *job = container_of(kref, struct bsg_job, kref); bsg_teardown_job() local
169 bsg_job_put(struct bsg_job *job) bsg_job_put() argument
175 bsg_job_get(struct bsg_job *job) bsg_job_get() argument
189 bsg_job_done(struct bsg_job *job, int result, unsigned int reply_payload_rcv_len) bsg_job_done() argument
207 struct bsg_job *job = blk_mq_rq_to_pdu(rq); bsg_complete() local
234 struct bsg_job *job = blk_mq_rq_to_pdu(req); bsg_prepare_job() local
304 struct bsg_job *job = blk_mq_rq_to_pdu(req); bsg_init_rq() local
315 struct bsg_job *job = blk_mq_rq_to_pdu(req); bsg_exit_rq() local
[all...]
/kernel/linux/linux-5.10/drivers/gpu/drm/panfrost/
H A Dpanfrost_job.c112 static int panfrost_job_get_slot(struct panfrost_job *job) in panfrost_job_get_slot() argument
118 if (job->requirements & PANFROST_JD_REQ_FS) in panfrost_job_get_slot()
123 if (job->requirements & PANFROST_JD_REQ_ONLY_COMPUTE) { in panfrost_job_get_slot()
124 if ((job->requirements & PANFROST_JD_REQ_CORE_GRP_MASK) && in panfrost_job_get_slot()
125 (job->pfdev->features.nr_core_groups == 2)) in panfrost_job_get_slot()
127 if (panfrost_has_hw_issue(job->pfdev, HW_ISSUE_8987)) in panfrost_job_get_slot()
151 static void panfrost_job_hw_submit(struct panfrost_job *job, int js) in panfrost_job_hw_submit() argument
153 struct panfrost_device *pfdev = job->pfdev; in panfrost_job_hw_submit()
155 u64 jc_head = job->jc; in panfrost_job_hw_submit()
168 cfg = panfrost_mmu_as_get(pfdev, job in panfrost_job_hw_submit()
219 panfrost_job_push(struct panfrost_job *job) panfrost_job_push() argument
264 struct panfrost_job *job = container_of(ref, struct panfrost_job, panfrost_job_cleanup() local
302 panfrost_job_put(struct panfrost_job *job) panfrost_job_put() argument
309 struct panfrost_job *job = to_panfrost_job(sched_job); panfrost_job_free() local
319 struct panfrost_job *job = to_panfrost_job(sched_job); panfrost_job_dependency() local
346 struct panfrost_job *job = to_panfrost_job(sched_job); panfrost_job_run() local
437 struct panfrost_job *job = to_panfrost_job(sched_job); panfrost_job_timedout() local
519 struct panfrost_job *job; panfrost_job_irq_handler() local
[all...]
/kernel/linux/linux-6.6/drivers/gpu/drm/panfrost/
H A Dpanfrost_job.c106 int panfrost_job_get_slot(struct panfrost_job *job) in panfrost_job_get_slot() argument
112 if (job->requirements & PANFROST_JD_REQ_FS) in panfrost_job_get_slot()
117 if (job->requirements & PANFROST_JD_REQ_ONLY_COMPUTE) { in panfrost_job_get_slot()
118 if ((job->requirements & PANFROST_JD_REQ_CORE_GRP_MASK) && in panfrost_job_get_slot()
119 (job->pfdev->features.nr_core_groups == 2)) in panfrost_job_get_slot()
121 if (panfrost_has_hw_issue(job->pfdev, HW_ISSUE_8987)) in panfrost_job_get_slot()
146 panfrost_get_job_chain_flag(const struct panfrost_job *job) in panfrost_get_job_chain_flag() argument
148 struct panfrost_fence *f = to_panfrost_fence(job->done_fence); in panfrost_get_job_chain_flag()
150 if (!panfrost_has_hw_feature(job->pfdev, HW_FEATURE_JOBCHAIN_DISAMBIGUATION)) in panfrost_get_job_chain_flag()
159 struct panfrost_job *job in panfrost_dequeue_job() local
169 panfrost_enqueue_job(struct panfrost_device *pfdev, int slot, struct panfrost_job *job) panfrost_enqueue_job() argument
187 panfrost_job_hw_submit(struct panfrost_job *job, int js) panfrost_job_hw_submit() argument
244 panfrost_acquire_object_fences(struct drm_gem_object **bos, int bo_count, struct drm_sched_job *job) panfrost_acquire_object_fences() argument
275 panfrost_job_push(struct panfrost_job *job) panfrost_job_push() argument
315 struct panfrost_job *job = container_of(ref, struct panfrost_job, panfrost_job_cleanup() local
343 panfrost_job_put(struct panfrost_job *job) panfrost_job_put() argument
350 struct panfrost_job *job = to_panfrost_job(sched_job); panfrost_job_free() local
359 struct panfrost_job *job = to_panfrost_job(sched_job); panfrost_job_run() local
399 panfrost_job_handle_err(struct panfrost_device *pfdev, struct panfrost_job *job, unsigned int js) panfrost_job_handle_err() argument
453 panfrost_job_handle_done(struct panfrost_device *pfdev, struct panfrost_job *job) panfrost_job_handle_done() argument
712 struct panfrost_job *job = to_panfrost_job(sched_job); panfrost_job_timedout() local
905 struct panfrost_job *job = pfdev->jobs[i][j]; panfrost_job_close() local
[all...]
/kernel/linux/linux-5.10/drivers/gpu/drm/
H A Ddrm_writeback.c262 int drm_writeback_prepare_job(struct drm_writeback_job *job) in drm_writeback_prepare_job() argument
264 struct drm_writeback_connector *connector = job->connector; in drm_writeback_prepare_job()
270 ret = funcs->prepare_writeback_job(connector, job); in drm_writeback_prepare_job()
275 job->prepared = true; in drm_writeback_prepare_job()
281 * drm_writeback_queue_job - Queue a writeback job for later signalling
282 * @wb_connector: The writeback connector to queue a job on
283 * @conn_state: The connector state containing the job to queue
285 * This function adds the job contained in @conn_state to the job_queue for a
286 * writeback connector. It takes ownership of the writeback job and sets the
287 * @conn_state->writeback_job to NULL, and so no access to the job ma
302 struct drm_writeback_job *job; drm_writeback_queue_job() local
314 drm_writeback_cleanup_job(struct drm_writeback_job *job) drm_writeback_cleanup_job() argument
342 struct drm_writeback_job *job = container_of(work, cleanup_work() local
370 struct drm_writeback_job *job; drm_writeback_signal_completion() local
[all...]
/kernel/linux/linux-5.10/drivers/gpu/host1x/hw/
H A Dchannel_hw.c17 #include "../job.h"
50 static void submit_gathers(struct host1x_job *job) in submit_gathers() argument
52 struct host1x_cdma *cdma = &job->channel->cdma; in submit_gathers()
54 struct device *dev = job->channel->dev; in submit_gathers()
58 for (i = 0; i < job->num_gathers; i++) { in submit_gathers()
59 struct host1x_job_gather *g = &job->gathers[i]; in submit_gathers()
87 static inline void synchronize_syncpt_base(struct host1x_job *job) in synchronize_syncpt_base() argument
89 struct host1x *host = dev_get_drvdata(job->channel->dev->parent); in synchronize_syncpt_base()
90 struct host1x_syncpt *sp = host->syncpt + job->syncpt_id; in synchronize_syncpt_base()
97 host1x_cdma_push(&job in synchronize_syncpt_base()
118 channel_submit(struct host1x_job *job) channel_submit() argument
[all...]
/kernel/linux/linux-5.10/drivers/misc/habanalabs/common/
H A Dcommand_submission.c123 static bool is_cb_patched(struct hl_device *hdev, struct hl_cs_job *job) in is_cb_patched() argument
129 return (job->queue_type == QUEUE_TYPE_EXT || in is_cb_patched()
130 (job->queue_type == QUEUE_TYPE_HW && in is_cb_patched()
131 job->is_kernel_allocated_cb && in is_cb_patched()
139 * @job : pointer to the job that holds the command submission info
146 static int cs_parser(struct hl_fpriv *hpriv, struct hl_cs_job *job) in cs_parser() argument
152 parser.ctx_id = job->cs->ctx->asid; in cs_parser()
153 parser.cs_sequence = job->cs->sequence; in cs_parser()
154 parser.job_id = job in cs_parser()
195 free_job(struct hl_device *hdev, struct hl_cs_job *job) free_job() argument
264 struct hl_cs_job *job, *tmp; cs_do_release() local
480 struct hl_cs_job *job, *tmp; cs_rollback() local
509 struct hl_cs_job *job = container_of(work, struct hl_cs_job, job_wq_completion() local
590 struct hl_cs_job *job; hl_cs_allocate_job() local
613 struct hl_cs_job *job; cs_ioctl_default() local
776 struct hl_cs_job *job; cs_ioctl_signal_wait() local
[all...]

Completed in 15 milliseconds

1234567891011