Lines Matching refs:pipe

21 	struct lima_sched_pipe *pipe;
63 return f->pipe->base.name;
87 static struct lima_fence *lima_fence_create(struct lima_sched_pipe *pipe)
95 fence->pipe = pipe;
96 dma_fence_init(&fence->base, &lima_fence_ops, &pipe->fence_lock,
97 pipe->fence_context, ++pipe->fence_seqno);
155 int lima_sched_context_init(struct lima_sched_pipe *pipe,
159 struct drm_gpu_scheduler *sched = &pipe->base;
165 void lima_sched_context_fini(struct lima_sched_pipe *pipe,
205 struct lima_sched_pipe *pipe = to_lima_pipe(job->sched);
206 struct lima_device *ldev = pipe->ldev;
214 fence = lima_fence_create(pipe);
231 pipe->current_task = task;
248 for (i = 0; i < pipe->num_l2_cache; i++)
249 lima_l2_cache_flush(pipe->l2_cache[i]);
251 lima_vm_put(pipe->current_vm);
252 pipe->current_vm = lima_vm_get(task->vm);
254 if (pipe->bcast_mmu)
255 lima_mmu_switch_vm(pipe->bcast_mmu, pipe->current_vm);
257 for (i = 0; i < pipe->num_mmu; i++)
258 lima_mmu_switch_vm(pipe->mmu[i], pipe->current_vm);
263 pipe->error = false;
264 pipe->task_run(pipe, task);
272 struct lima_sched_pipe *pipe = to_lima_pipe(task->base.sched);
273 struct lima_ip *ip = pipe->processor[0];
299 size = sizeof(struct lima_dump_chunk) + pipe->frame_size;
332 chunk->size = pipe->frame_size;
333 memcpy(chunk + 1, task->frame, pipe->frame_size);
401 struct lima_sched_pipe *pipe = to_lima_pipe(job->sched);
403 struct lima_device *ldev = pipe->ldev;
405 if (!pipe->error)
408 drm_sched_stop(&pipe->base, &task->base);
415 pipe->task_error(pipe);
417 if (pipe->bcast_mmu)
418 lima_mmu_page_fault_resume(pipe->bcast_mmu);
422 for (i = 0; i < pipe->num_mmu; i++)
423 lima_mmu_page_fault_resume(pipe->mmu[i]);
426 lima_vm_put(pipe->current_vm);
427 pipe->current_vm = NULL;
428 pipe->current_task = NULL;
432 drm_sched_resubmit_jobs(&pipe->base);
433 drm_sched_start(&pipe->base, true);
441 struct lima_sched_pipe *pipe = to_lima_pipe(job->sched);
452 kmem_cache_free(pipe->task_slab, task);
463 struct lima_sched_pipe *pipe =
467 for (i = 0; i < pipe->num_l2_cache; i++)
468 lima_l2_cache_flush(pipe->l2_cache[i]);
470 if (pipe->bcast_mmu) {
471 lima_mmu_flush_tlb(pipe->bcast_mmu);
473 for (i = 0; i < pipe->num_mmu; i++)
474 lima_mmu_flush_tlb(pipe->mmu[i]);
477 if (pipe->task_recover(pipe))
478 drm_sched_fault(&pipe->base);
481 int lima_sched_pipe_init(struct lima_sched_pipe *pipe, const char *name)
486 pipe->fence_context = dma_fence_context_alloc(1);
487 spin_lock_init(&pipe->fence_lock);
489 INIT_WORK(&pipe->recover_work, lima_sched_recover_work);
491 return drm_sched_init(&pipe->base, &lima_sched_ops, 1,
494 NULL, name, pipe->ldev->dev);
497 void lima_sched_pipe_fini(struct lima_sched_pipe *pipe)
499 drm_sched_fini(&pipe->base);
502 void lima_sched_pipe_task_done(struct lima_sched_pipe *pipe)
504 struct lima_sched_task *task = pipe->current_task;
505 struct lima_device *ldev = pipe->ldev;
507 if (pipe->error) {
509 schedule_work(&pipe->recover_work);
511 drm_sched_fault(&pipe->base);
513 pipe->task_fini(pipe);