Lines Matching refs:job

123 static bool is_cb_patched(struct hl_device *hdev, struct hl_cs_job *job)
129 return (job->queue_type == QUEUE_TYPE_EXT ||
130 (job->queue_type == QUEUE_TYPE_HW &&
131 job->is_kernel_allocated_cb &&
139 * @job : pointer to the job that holds the command submission info
146 static int cs_parser(struct hl_fpriv *hpriv, struct hl_cs_job *job)
152 parser.ctx_id = job->cs->ctx->asid;
153 parser.cs_sequence = job->cs->sequence;
154 parser.job_id = job->id;
156 parser.hw_queue_id = job->hw_queue_id;
157 parser.job_userptr_list = &job->userptr_list;
159 parser.user_cb = job->user_cb;
160 parser.user_cb_size = job->user_cb_size;
161 parser.queue_type = job->queue_type;
162 parser.is_kernel_allocated_cb = job->is_kernel_allocated_cb;
163 job->patched_cb = NULL;
167 if (is_cb_patched(hdev, job)) {
169 job->patched_cb = parser.patched_cb;
170 job->job_cb_size = parser.patched_cb_size;
171 job->contains_dma_pkt = parser.contains_dma_pkt;
173 spin_lock(&job->patched_cb->lock);
174 job->patched_cb->cs_cnt++;
175 spin_unlock(&job->patched_cb->lock);
183 spin_lock(&job->user_cb->lock);
184 job->user_cb->cs_cnt--;
185 spin_unlock(&job->user_cb->lock);
186 hl_cb_put(job->user_cb);
187 job->user_cb = NULL;
189 job->job_cb_size = job->user_cb_size;
195 static void free_job(struct hl_device *hdev, struct hl_cs_job *job)
197 struct hl_cs *cs = job->cs;
199 if (is_cb_patched(hdev, job)) {
200 hl_userptr_delete_list(hdev, &job->userptr_list);
206 if (job->patched_cb) {
207 spin_lock(&job->patched_cb->lock);
208 job->patched_cb->cs_cnt--;
209 spin_unlock(&job->patched_cb->lock);
211 hl_cb_put(job->patched_cb);
219 if (job->queue_type == QUEUE_TYPE_HW &&
220 job->is_kernel_allocated_cb && hdev->mmu_enable) {
221 spin_lock(&job->user_cb->lock);
222 job->user_cb->cs_cnt--;
223 spin_unlock(&job->user_cb->lock);
225 hl_cb_put(job->user_cb);
233 list_del(&job->cs_node);
236 hl_debugfs_remove_job(hdev, job);
238 if (job->queue_type == QUEUE_TYPE_EXT ||
239 job->queue_type == QUEUE_TYPE_HW)
242 kfree(job);
264 struct hl_cs_job *job, *tmp;
276 list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
277 free_job(hdev, job);
480 struct hl_cs_job *job, *tmp;
482 list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node)
483 free_job(hdev, job);
509 struct hl_cs_job *job = container_of(work, struct hl_cs_job,
511 struct hl_cs *cs = job->cs;
514 /* job is no longer needed */
515 free_job(hdev, job);
590 struct hl_cs_job *job;
592 job = kzalloc(sizeof(*job), GFP_ATOMIC);
593 if (!job)
596 job->queue_type = queue_type;
597 job->is_kernel_allocated_cb = is_kernel_allocated_cb;
599 if (is_cb_patched(hdev, job))
600 INIT_LIST_HEAD(&job->userptr_list);
602 if (job->queue_type == QUEUE_TYPE_EXT)
603 INIT_WORK(&job->finish_work, job_wq_completion);
605 return job;
613 struct hl_cs_job *job;
684 job = hl_cs_allocate_job(hdev, queue_type,
686 if (!job) {
688 dev_err(hdev->dev, "Failed to allocate a new job\n");
696 job->id = i + 1;
697 job->cs = cs;
698 job->user_cb = cb;
699 job->user_cb_size = chunk->cb_size;
700 job->hw_queue_id = chunk->queue_index;
702 cs->jobs_in_queue_cnt[job->hw_queue_id]++;
704 list_add_tail(&job->cs_node, &cs->job_list);
712 if (job->queue_type == QUEUE_TYPE_EXT ||
713 job->queue_type == QUEUE_TYPE_HW)
716 hl_debugfs_add_job(hdev, job);
718 rc = cs_parser(hpriv, job);
723 cs->ctx->asid, cs->sequence, job->id, rc);
776 struct hl_cs_job *job;
920 job = hl_cs_allocate_job(hdev, q_type, true);
921 if (!job) {
923 dev_err(hdev->dev, "Failed to allocate a new job\n");
937 kfree(job);
942 job->id = 0;
943 job->cs = cs;
944 job->user_cb = cb;
945 job->user_cb->cs_cnt++;
946 job->user_cb_size = cb_size;
947 job->hw_queue_id = q_idx;
955 job->patched_cb = job->user_cb;
956 job->job_cb_size = job->user_cb_size;
959 cs->jobs_in_queue_cnt[job->hw_queue_id]++;
961 list_add_tail(&job->cs_node, &cs->job_list);
966 hl_debugfs_add_job(hdev, job);