Lines Matching refs:vc4

42 	struct vc4_dev *vc4 = to_vc4_dev(dev);
44 mod_timer(&vc4->hangcheck.timer,
74 struct vc4_dev *vc4 = to_vc4_dev(dev);
79 if (!vc4->v3d) {
84 spin_lock_irqsave(&vc4->job_lock, irqflags);
85 kernel_state = vc4->hang_state;
87 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
97 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
101 vc4->hang_state = NULL;
102 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
151 struct vc4_dev *vc4 = to_vc4_dev(dev);
165 spin_lock_irqsave(&vc4->job_lock, irqflags);
166 exec[0] = vc4_first_bin_job(vc4);
167 exec[1] = vc4_first_render_job(vc4);
169 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
189 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
227 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
275 spin_lock_irqsave(&vc4->job_lock, irqflags);
276 if (vc4->hang_state) {
277 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
280 vc4->hang_state = kernel_state;
281 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
288 struct vc4_dev *vc4 = to_vc4_dev(dev);
292 mutex_lock(&vc4->power_lock);
293 if (vc4->power_refcount) {
297 pm_runtime_put_sync_suspend(&vc4->v3d->pdev->dev);
298 pm_runtime_get_sync(&vc4->v3d->pdev->dev);
300 mutex_unlock(&vc4->power_lock);
314 struct vc4_dev *vc4 =
317 vc4_save_hang_state(&vc4->base);
319 vc4_reset(&vc4->base);
325 struct vc4_dev *vc4 = from_timer(vc4, t, hangcheck.timer);
326 struct drm_device *dev = &vc4->base;
331 spin_lock_irqsave(&vc4->job_lock, irqflags);
333 bin_exec = vc4_first_bin_job(vc4);
334 render_exec = vc4_first_render_job(vc4);
338 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
354 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
359 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
365 schedule_work(&vc4->hangcheck.reset_work);
371 struct vc4_dev *vc4 = to_vc4_dev(dev);
384 struct vc4_dev *vc4 = to_vc4_dev(dev);
389 if (vc4->finished_seqno >= seqno)
399 prepare_to_wait(&vc4->job_wait_queue, &wait,
408 if (vc4->finished_seqno >= seqno)
422 finish_wait(&vc4->job_wait_queue, &wait);
431 struct vc4_dev *vc4 = to_vc4_dev(dev);
450 struct vc4_dev *vc4 = to_vc4_dev(dev);
468 struct vc4_dev *vc4 = to_vc4_dev(dev);
472 exec = vc4_first_bin_job(vc4);
481 if (exec->perfmon && vc4->active_perfmon != exec->perfmon)
482 vc4_perfmon_start(vc4, exec->perfmon);
493 next = vc4_first_bin_job(vc4);
508 struct vc4_dev *vc4 = to_vc4_dev(dev);
509 struct vc4_exec_info *exec = vc4_first_render_job(vc4);
528 struct vc4_dev *vc4 = to_vc4_dev(dev);
529 bool was_empty = list_empty(&vc4->render_job_list);
531 list_move_tail(&exec->head, &vc4->render_job_list);
582 * to vc4, so we don't attach dma-buf fences to them.
668 struct vc4_dev *vc4 = to_vc4_dev(dev);
679 spin_lock_irqsave(&vc4->job_lock, irqflags);
681 seqno = ++vc4->emit_seqno;
684 dma_fence_init(&fence->base, &vc4_fence_ops, &vc4->job_lock,
685 vc4->dma_fence_context, exec->seqno);
696 list_add_tail(&exec->head, &vc4->bin_job_list);
703 renderjob = vc4_first_render_job(vc4);
704 if (vc4_first_bin_job(vc4) == exec &&
710 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
824 struct vc4_dev *vc4 = to_vc4_dev(dev);
924 ret = vc4_v3d_bin_bo_get(vc4, &exec->bin_bo_used);
943 struct vc4_dev *vc4 = to_vc4_dev(dev);
973 spin_lock_irqsave(&vc4->job_lock, irqflags);
974 vc4->bin_alloc_used &= ~exec->bin_slots;
975 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
979 vc4_v3d_bin_bo_put(vc4);
984 vc4_v3d_pm_put(vc4);
990 vc4_job_handle_completed(struct vc4_dev *vc4)
995 spin_lock_irqsave(&vc4->job_lock, irqflags);
996 while (!list_empty(&vc4->job_done_list)) {
998 list_first_entry(&vc4->job_done_list,
1002 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
1003 vc4_complete_exec(&vc4->base, exec);
1004 spin_lock_irqsave(&vc4->job_lock, irqflags);
1007 list_for_each_entry_safe(cb, cb_temp, &vc4->seqno_cb_list, work.entry) {
1008 if (cb->seqno <= vc4->finished_seqno) {
1014 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
1028 struct vc4_dev *vc4 = to_vc4_dev(dev);
1035 spin_lock_irqsave(&vc4->job_lock, irqflags);
1036 if (seqno > vc4->finished_seqno) {
1038 list_add_tail(&cb->work.entry, &vc4->seqno_cb_list);
1042 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
1054 struct vc4_dev *vc4 =
1057 vc4_job_handle_completed(vc4);
1130 struct vc4_dev *vc4 = to_vc4_dev(dev);
1139 if (!vc4->v3d) {
1163 ret = vc4_v3d_pm_get(vc4);
1197 vc4->dma_fence_context)) {
1256 args->seqno = vc4->emit_seqno;
1261 vc4_complete_exec(&vc4->base, exec);
1269 struct vc4_dev *vc4 = to_vc4_dev(dev);
1271 vc4->dma_fence_context = dma_fence_context_alloc(1);
1273 INIT_LIST_HEAD(&vc4->bin_job_list);
1274 INIT_LIST_HEAD(&vc4->render_job_list);
1275 INIT_LIST_HEAD(&vc4->job_done_list);
1276 INIT_LIST_HEAD(&vc4->seqno_cb_list);
1277 spin_lock_init(&vc4->job_lock);
1279 INIT_WORK(&vc4->hangcheck.reset_work, vc4_reset_work);
1280 timer_setup(&vc4->hangcheck.timer, vc4_hangcheck_elapsed, 0);
1282 INIT_WORK(&vc4->job_done_work, vc4_job_done_work);
1284 mutex_init(&vc4->power_lock);
1286 INIT_LIST_HEAD(&vc4->purgeable.list);
1287 mutex_init(&vc4->purgeable.lock);
1294 struct vc4_dev *vc4 = to_vc4_dev(dev);
1299 WARN_ON(vc4->emit_seqno != vc4->finished_seqno);
1304 if (vc4->bin_bo) {
1305 drm_gem_object_put(&vc4->bin_bo->base.base);
1306 vc4->bin_bo = NULL;
1309 if (vc4->hang_state)
1310 vc4_free_hang_state(dev, vc4->hang_state);