Lines Matching refs:job
10 * scheduler will round-robin between clients to submit the next job.
13 * jobs when bulk background jobs are queued up, we submit a new job
60 struct v3d_job *job = to_v3d_job(sched_job);
63 v3d_job_put(job);
67 * Returns the fences that the job depends on, one by one.
76 struct v3d_job *job = to_v3d_job(sched_job);
82 if (!xa_empty(&job->deps))
83 return xa_erase(&job->deps, job->last_dep++);
90 struct v3d_bin_job *job = to_bin_job(sched_job);
91 struct v3d_dev *v3d = job->base.v3d;
96 if (unlikely(job->base.base.s_fence->finished.error))
103 v3d->bin_job = job;
105 * reuse the overflow attached to a previous job.
116 if (job->base.irq_fence)
117 dma_fence_put(job->base.irq_fence);
118 job->base.irq_fence = dma_fence_get(fence);
121 job->start, job->end);
124 * Writing the end register is what starts the job.
126 if (job->qma) {
127 V3D_CORE_WRITE(0, V3D_CLE_CT0QMA, job->qma);
128 V3D_CORE_WRITE(0, V3D_CLE_CT0QMS, job->qms);
130 if (job->qts) {
133 job->qts);
135 V3D_CORE_WRITE(0, V3D_CLE_CT0QBA, job->start);
136 V3D_CORE_WRITE(0, V3D_CLE_CT0QEA, job->end);
143 struct v3d_render_job *job = to_render_job(sched_job);
144 struct v3d_dev *v3d = job->base.v3d;
148 if (unlikely(job->base.base.s_fence->finished.error))
151 v3d->render_job = job;
165 if (job->base.irq_fence)
166 dma_fence_put(job->base.irq_fence);
167 job->base.irq_fence = dma_fence_get(fence);
170 job->start, job->end);
175 * Writing the end register is what starts the job.
177 V3D_CORE_WRITE(0, V3D_CLE_CT1QBA, job->start);
178 V3D_CORE_WRITE(0, V3D_CLE_CT1QEA, job->end);
186 struct v3d_tfu_job *job = to_tfu_job(sched_job);
187 struct v3d_dev *v3d = job->base.v3d;
195 v3d->tfu_job = job;
196 if (job->base.irq_fence)
197 dma_fence_put(job->base.irq_fence);
198 job->base.irq_fence = dma_fence_get(fence);
202 V3D_WRITE(V3D_TFU_IIA, job->args.iia);
203 V3D_WRITE(V3D_TFU_IIS, job->args.iis);
204 V3D_WRITE(V3D_TFU_ICA, job->args.ica);
205 V3D_WRITE(V3D_TFU_IUA, job->args.iua);
206 V3D_WRITE(V3D_TFU_IOA, job->args.ioa);
207 V3D_WRITE(V3D_TFU_IOS, job->args.ios);
208 V3D_WRITE(V3D_TFU_COEF0, job->args.coef[0]);
209 if (job->args.coef[0] & V3D_TFU_COEF0_USECOEF) {
210 V3D_WRITE(V3D_TFU_COEF1, job->args.coef[1]);
211 V3D_WRITE(V3D_TFU_COEF2, job->args.coef[2]);
212 V3D_WRITE(V3D_TFU_COEF3, job->args.coef[3]);
214 /* ICFG kicks off the job. */
215 V3D_WRITE(V3D_TFU_ICFG, job->args.icfg | V3D_TFU_ICFG_IOC);
223 struct v3d_csd_job *job = to_csd_job(sched_job);
224 struct v3d_dev *v3d = job->base.v3d;
229 v3d->csd_job = job;
237 if (job->base.irq_fence)
238 dma_fence_put(job->base.irq_fence);
239 job->base.irq_fence = dma_fence_get(fence);
244 V3D_CORE_WRITE(0, V3D_CSD_QUEUED_CFG0 + 4 * i, job->args.cfg[i]);
245 /* CFG0 write kicks off the job. */
246 V3D_CORE_WRITE(0, V3D_CSD_QUEUED_CFG0, job->args.cfg[0]);
254 struct v3d_job *job = to_v3d_job(sched_job);
255 struct v3d_dev *v3d = job->v3d;
299 struct v3d_job *job = to_v3d_job(sched_job);
300 struct v3d_dev *v3d = job->v3d;
316 struct v3d_bin_job *job = to_bin_job(sched_job);
319 &job->timedout_ctca, &job->timedout_ctra);
325 struct v3d_render_job *job = to_render_job(sched_job);
328 &job->timedout_ctca, &job->timedout_ctra);
334 struct v3d_job *job = to_v3d_job(sched_job);
336 v3d_gpu_reset_for_timeout(job->v3d, sched_job);
342 struct v3d_csd_job *job = to_csd_job(sched_job);
343 struct v3d_dev *v3d = job->base.v3d;
349 if (job->timedout_batches != batches) {
350 job->timedout_batches = batches;