Lines Matching refs:job
10 * scheduler will round-robin between clients to submit the next job.
13 * jobs when bulk background jobs are queued up, we submit a new job
60 struct v3d_job *job = to_v3d_job(sched_job);
62 v3d_job_cleanup(job);
66 v3d_switch_perfmon(struct v3d_dev *v3d, struct v3d_job *job)
68 if (job->perfmon != v3d->active_perfmon)
71 if (job->perfmon && v3d->active_perfmon != job->perfmon)
72 v3d_perfmon_start(v3d, job->perfmon);
77 struct v3d_bin_job *job = to_bin_job(sched_job);
78 struct v3d_dev *v3d = job->base.v3d;
83 if (unlikely(job->base.base.s_fence->finished.error))
90 v3d->bin_job = job;
92 * reuse the overflow attached to a previous job.
103 if (job->base.irq_fence)
104 dma_fence_put(job->base.irq_fence);
105 job->base.irq_fence = dma_fence_get(fence);
108 job->start, job->end);
110 v3d_switch_perfmon(v3d, &job->base);
113 * Writing the end register is what starts the job.
115 if (job->qma) {
116 V3D_CORE_WRITE(0, V3D_CLE_CT0QMA, job->qma);
117 V3D_CORE_WRITE(0, V3D_CLE_CT0QMS, job->qms);
119 if (job->qts) {
122 job->qts);
124 V3D_CORE_WRITE(0, V3D_CLE_CT0QBA, job->start);
125 V3D_CORE_WRITE(0, V3D_CLE_CT0QEA, job->end);
132 struct v3d_render_job *job = to_render_job(sched_job);
133 struct v3d_dev *v3d = job->base.v3d;
137 if (unlikely(job->base.base.s_fence->finished.error))
140 v3d->render_job = job;
154 if (job->base.irq_fence)
155 dma_fence_put(job->base.irq_fence);
156 job->base.irq_fence = dma_fence_get(fence);
159 job->start, job->end);
161 v3d_switch_perfmon(v3d, &job->base);
166 * Writing the end register is what starts the job.
168 V3D_CORE_WRITE(0, V3D_CLE_CT1QBA, job->start);
169 V3D_CORE_WRITE(0, V3D_CLE_CT1QEA, job->end);
177 struct v3d_tfu_job *job = to_tfu_job(sched_job);
178 struct v3d_dev *v3d = job->base.v3d;
186 v3d->tfu_job = job;
187 if (job->base.irq_fence)
188 dma_fence_put(job->base.irq_fence);
189 job->base.irq_fence = dma_fence_get(fence);
193 V3D_WRITE(V3D_TFU_IIA, job->args.iia);
194 V3D_WRITE(V3D_TFU_IIS, job->args.iis);
195 V3D_WRITE(V3D_TFU_ICA, job->args.ica);
196 V3D_WRITE(V3D_TFU_IUA, job->args.iua);
197 V3D_WRITE(V3D_TFU_IOA, job->args.ioa);
198 V3D_WRITE(V3D_TFU_IOS, job->args.ios);
199 V3D_WRITE(V3D_TFU_COEF0, job->args.coef[0]);
200 if (job->args.coef[0] & V3D_TFU_COEF0_USECOEF) {
201 V3D_WRITE(V3D_TFU_COEF1, job->args.coef[1]);
202 V3D_WRITE(V3D_TFU_COEF2, job->args.coef[2]);
203 V3D_WRITE(V3D_TFU_COEF3, job->args.coef[3]);
205 /* ICFG kicks off the job. */
206 V3D_WRITE(V3D_TFU_ICFG, job->args.icfg | V3D_TFU_ICFG_IOC);
214 struct v3d_csd_job *job = to_csd_job(sched_job);
215 struct v3d_dev *v3d = job->base.v3d;
220 v3d->csd_job = job;
228 if (job->base.irq_fence)
229 dma_fence_put(job->base.irq_fence);
230 job->base.irq_fence = dma_fence_get(fence);
234 v3d_switch_perfmon(v3d, &job->base);
237 V3D_CORE_WRITE(0, V3D_CSD_QUEUED_CFG0 + 4 * i, job->args.cfg[i]);
238 /* CFG0 write kicks off the job. */
239 V3D_CORE_WRITE(0, V3D_CSD_QUEUED_CFG0, job->args.cfg[0]);
247 struct v3d_job *job = to_v3d_job(sched_job);
248 struct v3d_dev *v3d = job->v3d;
294 struct v3d_job *job = to_v3d_job(sched_job);
295 struct v3d_dev *v3d = job->v3d;
311 struct v3d_bin_job *job = to_bin_job(sched_job);
314 &job->timedout_ctca, &job->timedout_ctra);
320 struct v3d_render_job *job = to_render_job(sched_job);
323 &job->timedout_ctca, &job->timedout_ctra);
329 struct v3d_job *job = to_v3d_job(sched_job);
331 return v3d_gpu_reset_for_timeout(job->v3d, sched_job);
337 struct v3d_csd_job *job = to_csd_job(sched_job);
338 struct v3d_dev *v3d = job->base.v3d;
344 if (job->timedout_batches != batches) {
345 job->timedout_batches = batches;