Lines Matching refs:pipe

20 	struct lima_sched_pipe *pipe;
62 return f->pipe->base.name;
86 static struct lima_fence *lima_fence_create(struct lima_sched_pipe *pipe)
94 fence->pipe = pipe;
95 dma_fence_init(&fence->base, &lima_fence_ops, &pipe->fence_lock,
96 pipe->fence_context, ++pipe->fence_seqno);
161 int lima_sched_context_init(struct lima_sched_pipe *pipe,
165 struct drm_gpu_scheduler *sched = &pipe->base;
171 void lima_sched_context_fini(struct lima_sched_pipe *pipe,
223 struct lima_sched_pipe *pipe = to_lima_pipe(job->sched);
224 struct lima_device *ldev = pipe->ldev;
233 fence = lima_fence_create(pipe);
250 pipe->current_task = task;
267 for (i = 0; i < pipe->num_l2_cache; i++)
268 lima_l2_cache_flush(pipe->l2_cache[i]);
270 lima_vm_put(pipe->current_vm);
271 pipe->current_vm = lima_vm_get(task->vm);
273 if (pipe->bcast_mmu)
274 lima_mmu_switch_vm(pipe->bcast_mmu, pipe->current_vm);
276 for (i = 0; i < pipe->num_mmu; i++)
277 lima_mmu_switch_vm(pipe->mmu[i], pipe->current_vm);
282 pipe->error = false;
283 pipe->task_run(pipe, task);
291 struct lima_sched_pipe *pipe = to_lima_pipe(task->base.sched);
292 struct lima_ip *ip = pipe->processor[0];
316 size = sizeof(struct lima_dump_chunk) + pipe->frame_size;
349 chunk->size = pipe->frame_size;
350 memcpy(chunk + 1, task->frame, pipe->frame_size);
418 struct lima_sched_pipe *pipe = to_lima_pipe(job->sched);
420 struct lima_device *ldev = pipe->ldev;
422 if (!pipe->error)
425 drm_sched_stop(&pipe->base, &task->base);
431 pipe->task_error(pipe);
433 if (pipe->bcast_mmu)
434 lima_mmu_page_fault_resume(pipe->bcast_mmu);
438 for (i = 0; i < pipe->num_mmu; i++)
439 lima_mmu_page_fault_resume(pipe->mmu[i]);
442 lima_vm_put(pipe->current_vm);
443 pipe->current_vm = NULL;
444 pipe->current_task = NULL;
448 drm_sched_resubmit_jobs(&pipe->base);
449 drm_sched_start(&pipe->base, true);
455 struct lima_sched_pipe *pipe = to_lima_pipe(job->sched);
466 kmem_cache_free(pipe->task_slab, task);
478 struct lima_sched_pipe *pipe =
482 for (i = 0; i < pipe->num_l2_cache; i++)
483 lima_l2_cache_flush(pipe->l2_cache[i]);
485 if (pipe->bcast_mmu) {
486 lima_mmu_flush_tlb(pipe->bcast_mmu);
488 for (i = 0; i < pipe->num_mmu; i++)
489 lima_mmu_flush_tlb(pipe->mmu[i]);
492 if (pipe->task_recover(pipe))
493 drm_sched_fault(&pipe->base);
496 int lima_sched_pipe_init(struct lima_sched_pipe *pipe, const char *name)
501 pipe->fence_context = dma_fence_context_alloc(1);
502 spin_lock_init(&pipe->fence_lock);
504 INIT_WORK(&pipe->recover_work, lima_sched_recover_work);
506 return drm_sched_init(&pipe->base, &lima_sched_ops, 1,
511 void lima_sched_pipe_fini(struct lima_sched_pipe *pipe)
513 drm_sched_fini(&pipe->base);
516 void lima_sched_pipe_task_done(struct lima_sched_pipe *pipe)
518 struct lima_sched_task *task = pipe->current_task;
519 struct lima_device *ldev = pipe->ldev;
521 if (pipe->error) {
523 schedule_work(&pipe->recover_work);
525 drm_sched_fault(&pipe->base);
527 pipe->task_fini(pipe);