Lines Matching refs:job
18 * other jobs competing for the hardware. Otherwise we might end up with job
20 * want jobs to time out because of system load, but because of the job being
30 nouveau_job_init(struct nouveau_job *job,
36 job->file_priv = args->file_priv;
37 job->cli = nouveau_cli(args->file_priv);
38 job->entity = entity;
40 job->sync = args->sync;
41 job->resv_usage = args->resv_usage;
43 job->ops = args->ops;
45 job->in_sync.count = args->in_sync.count;
46 if (job->in_sync.count) {
47 if (job->sync)
50 job->in_sync.data = kmemdup(args->in_sync.s,
54 if (!job->in_sync.data)
58 job->out_sync.count = args->out_sync.count;
59 if (job->out_sync.count) {
60 if (job->sync) {
65 job->out_sync.data = kmemdup(args->out_sync.s,
69 if (!job->out_sync.data) {
74 job->out_sync.objs = kcalloc(job->out_sync.count,
75 sizeof(*job->out_sync.objs),
77 if (!job->out_sync.objs) {
82 job->out_sync.chains = kcalloc(job->out_sync.count,
83 sizeof(*job->out_sync.chains),
85 if (!job->out_sync.chains) {
92 ret = drm_sched_job_init(&job->base, &entity->base, NULL);
96 job->state = NOUVEAU_JOB_INITIALIZED;
101 kfree(job->out_sync.chains);
103 kfree(job->out_sync.objs);
105 kfree(job->out_sync.data);
107 kfree(job->in_sync.data);
112 nouveau_job_free(struct nouveau_job *job)
114 kfree(job->in_sync.data);
115 kfree(job->out_sync.data);
116 kfree(job->out_sync.objs);
117 kfree(job->out_sync.chains);
120 void nouveau_job_fini(struct nouveau_job *job)
122 dma_fence_put(job->done_fence);
123 drm_sched_job_cleanup(&job->base);
124 job->ops->free(job);
128 sync_find_fence(struct nouveau_job *job,
143 ret = drm_syncobj_find_fence(job->file_priv,
153 nouveau_job_add_deps(struct nouveau_job *job)
158 for (i = 0; i < job->in_sync.count; i++) {
159 struct drm_nouveau_sync *sync = &job->in_sync.data[i];
161 ret = sync_find_fence(job, sync, &in_fence);
163 NV_PRINTK(warn, job->cli,
169 ret = drm_sched_job_add_dependency(&job->base, in_fence);
178 nouveau_job_fence_attach_cleanup(struct nouveau_job *job)
182 for (i = 0; i < job->out_sync.count; i++) {
183 struct drm_syncobj *obj = job->out_sync.objs[i];
184 struct dma_fence_chain *chain = job->out_sync.chains[i];
195 nouveau_job_fence_attach_prepare(struct nouveau_job *job)
199 for (i = 0; i < job->out_sync.count; i++) {
200 struct drm_nouveau_sync *sync = &job->out_sync.data[i];
201 struct drm_syncobj **pobj = &job->out_sync.objs[i];
202 struct dma_fence_chain **pchain = &job->out_sync.chains[i];
211 *pobj = drm_syncobj_find(job->file_priv, sync->handle);
213 NV_PRINTK(warn, job->cli,
232 nouveau_job_fence_attach_cleanup(job);
237 nouveau_job_fence_attach(struct nouveau_job *job)
239 struct dma_fence *fence = job->done_fence;
242 for (i = 0; i < job->out_sync.count; i++) {
243 struct drm_nouveau_sync *sync = &job->out_sync.data[i];
244 struct drm_syncobj **pobj = &job->out_sync.objs[i];
245 struct dma_fence_chain **pchain = &job->out_sync.chains[i];
262 nouveau_job_submit(struct nouveau_job *job)
264 struct nouveau_sched_entity *entity = to_nouveau_sched_entity(job->base.entity);
268 ret = nouveau_job_add_deps(job);
272 ret = nouveau_job_fence_attach_prepare(job);
276 /* Make sure the job appears on the sched_entity's queue in the same
284 if (job->ops->submit) {
285 ret = job->ops->submit(job);
290 drm_sched_job_arm(&job->base);
291 job->done_fence = dma_fence_get(&job->base.s_fence->finished);
292 if (job->sync)
293 done_fence = dma_fence_get(job->done_fence);
295 /* If a sched job depends on a dma-fence from a job from the same GPU
297 * scheduler does only wait for the particular job to be scheduled,
298 * rather than for the job to fully complete. This is due to the GPU
305 * out-fences in order to force the scheduler to wait for full job
315 set_bit(DRM_SCHED_FENCE_DONT_PIPELINE, &job->done_fence->flags);
317 if (job->ops->armed_submit)
318 job->ops->armed_submit(job);
320 nouveau_job_fence_attach(job);
322 /* Set job state before pushing the job to the scheduler,
323 * such that we do not overwrite the job state set in run().
325 job->state = NOUVEAU_JOB_SUBMIT_SUCCESS;
327 drm_sched_entity_push_job(&job->base);
340 nouveau_job_fence_attach_cleanup(job);
342 job->state = NOUVEAU_JOB_SUBMIT_FAILED;
354 nouveau_job_run(struct nouveau_job *job)
358 fence = job->ops->run(job);
360 job->state = NOUVEAU_JOB_RUN_FAILED;
362 job->state = NOUVEAU_JOB_RUN_SUCCESS;
370 struct nouveau_job *job = to_nouveau_job(sched_job);
372 return nouveau_job_run(job);
379 struct nouveau_job *job = to_nouveau_job(sched_job);
384 if (job->ops->timeout)
385 stat = job->ops->timeout(job);
387 NV_PRINTK(warn, job->cli, "Generic job timeout.\n");
397 struct nouveau_job *job = to_nouveau_job(sched_job);
399 nouveau_job_fini(job);
407 spin_lock_init(&entity->job.list.lock);
408 INIT_LIST_HEAD(&entity->job.list.head);
409 init_waitqueue_head(&entity->job.wq);