Lines Matching defs:job

263  * @job: pointer to the job that needs to be submitted to the queue
268 static void ext_queue_schedule_job(struct hl_cs_job *job)
270 struct hl_device *hdev = job->cs->ctx->hdev;
271 struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id];
286 cb = job->patched_cb;
287 len = job->job_cb_size;
310 job->contains_dma_pkt);
312 q->shadow_queue[hl_pi_2_offset(q->pi)] = job;
322 * @job: pointer to the job that needs to be submitted to the queue
327 static void int_queue_schedule_job(struct hl_cs_job *job)
329 struct hl_device *hdev = job->cs->ctx->hdev;
330 struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id];
335 bd.len = cpu_to_le32(job->job_cb_size);
336 bd.ptr = cpu_to_le64((u64) (uintptr_t) job->user_cb);
351 * @job: pointer to the job that needs to be submitted to the queue
356 static void hw_queue_schedule_job(struct hl_cs_job *job)
358 struct hl_device *hdev = job->cs->ctx->hdev;
359 struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id];
369 offset = job->cs->sequence & (hdev->asic_prop.max_pending_cs - 1);
373 len = job->job_cb_size;
381 if (job->patched_cb)
382 ptr = job->patched_cb->bus_address;
383 else if (job->is_kernel_allocated_cb)
384 ptr = job->user_cb->bus_address;
386 ptr = (u64) (uintptr_t) job->user_cb;
406 struct hl_cs_job *job;
409 /* There is only one job in a signal/wait CS */
410 job = list_first_entry(&cs->job_list, struct hl_cs_job,
412 q_idx = job->hw_queue_id;
425 hdev->asic_funcs->gen_signal_cb(hdev, job->patched_cb,
463 hdev->asic_funcs->gen_wait_cb(hdev, job->patched_cb,
489 struct hl_cs_job *job, *tmp;
564 list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
565 switch (job->queue_type) {
567 ext_queue_schedule_job(job);
570 int_queue_schedule_job(job);
573 hw_queue_schedule_job(job);
827 * user context. It also means that if a job was submitted by
828 * the kernel driver (e.g. context creation), the job itself was