Lines Matching refs:vc4

42 	struct vc4_dev *vc4 = to_vc4_dev(dev);
44 mod_timer(&vc4->hangcheck.timer,
74 struct vc4_dev *vc4 = to_vc4_dev(dev);
79 if (WARN_ON_ONCE(vc4->is_vc5))
82 if (!vc4->v3d) {
87 spin_lock_irqsave(&vc4->job_lock, irqflags);
88 kernel_state = vc4->hang_state;
90 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
100 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
104 vc4->hang_state = NULL;
105 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
154 struct vc4_dev *vc4 = to_vc4_dev(dev);
168 spin_lock_irqsave(&vc4->job_lock, irqflags);
169 exec[0] = vc4_first_bin_job(vc4);
170 exec[1] = vc4_first_render_job(vc4);
172 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
192 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
230 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
278 spin_lock_irqsave(&vc4->job_lock, irqflags);
279 if (vc4->hang_state) {
280 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
283 vc4->hang_state = kernel_state;
284 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
291 struct vc4_dev *vc4 = to_vc4_dev(dev);
295 mutex_lock(&vc4->power_lock);
296 if (vc4->power_refcount) {
300 pm_runtime_put_sync_suspend(&vc4->v3d->pdev->dev);
301 pm_runtime_get_sync(&vc4->v3d->pdev->dev);
303 mutex_unlock(&vc4->power_lock);
317 struct vc4_dev *vc4 =
320 vc4_save_hang_state(&vc4->base);
322 vc4_reset(&vc4->base);
328 struct vc4_dev *vc4 = from_timer(vc4, t, hangcheck.timer);
329 struct drm_device *dev = &vc4->base;
334 spin_lock_irqsave(&vc4->job_lock, irqflags);
336 bin_exec = vc4_first_bin_job(vc4);
337 render_exec = vc4_first_render_job(vc4);
341 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
357 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
362 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
368 schedule_work(&vc4->hangcheck.reset_work);
374 struct vc4_dev *vc4 = to_vc4_dev(dev);
387 struct vc4_dev *vc4 = to_vc4_dev(dev);
392 if (WARN_ON_ONCE(vc4->is_vc5))
395 if (vc4->finished_seqno >= seqno)
405 prepare_to_wait(&vc4->job_wait_queue, &wait,
414 if (vc4->finished_seqno >= seqno)
428 finish_wait(&vc4->job_wait_queue, &wait);
437 struct vc4_dev *vc4 = to_vc4_dev(dev);
456 struct vc4_dev *vc4 = to_vc4_dev(dev);
474 struct vc4_dev *vc4 = to_vc4_dev(dev);
477 if (WARN_ON_ONCE(vc4->is_vc5))
481 exec = vc4_first_bin_job(vc4);
490 if (exec->perfmon && vc4->active_perfmon != exec->perfmon)
491 vc4_perfmon_start(vc4, exec->perfmon);
504 next = vc4_first_bin_job(vc4);
519 struct vc4_dev *vc4 = to_vc4_dev(dev);
520 struct vc4_exec_info *exec = vc4_first_render_job(vc4);
525 if (WARN_ON_ONCE(vc4->is_vc5))
543 struct vc4_dev *vc4 = to_vc4_dev(dev);
544 bool was_empty = list_empty(&vc4->render_job_list);
546 if (WARN_ON_ONCE(vc4->is_vc5))
549 list_move_tail(&exec->head, &vc4->render_job_list);
599 * to vc4, so we don't attach dma-buf fences to them.
685 struct vc4_dev *vc4 = to_vc4_dev(dev);
696 spin_lock_irqsave(&vc4->job_lock, irqflags);
698 seqno = ++vc4->emit_seqno;
701 dma_fence_init(&fence->base, &vc4_fence_ops, &vc4->job_lock,
702 vc4->dma_fence_context, exec->seqno);
713 list_add_tail(&exec->head, &vc4->bin_job_list);
720 renderjob = vc4_first_render_job(vc4);
721 if (vc4_first_bin_job(vc4) == exec &&
727 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
802 struct vc4_dev *vc4 = to_vc4_dev(dev);
902 ret = vc4_v3d_bin_bo_get(vc4, &exec->bin_bo_used);
921 struct vc4_dev *vc4 = to_vc4_dev(dev);
951 spin_lock_irqsave(&vc4->job_lock, irqflags);
952 vc4->bin_alloc_used &= ~exec->bin_slots;
953 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
957 vc4_v3d_bin_bo_put(vc4);
962 vc4_v3d_pm_put(vc4);
968 vc4_job_handle_completed(struct vc4_dev *vc4)
973 if (WARN_ON_ONCE(vc4->is_vc5))
976 spin_lock_irqsave(&vc4->job_lock, irqflags);
977 while (!list_empty(&vc4->job_done_list)) {
979 list_first_entry(&vc4->job_done_list,
983 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
984 vc4_complete_exec(&vc4->base, exec);
985 spin_lock_irqsave(&vc4->job_lock, irqflags);
988 list_for_each_entry_safe(cb, cb_temp, &vc4->seqno_cb_list, work.entry) {
989 if (cb->seqno <= vc4->finished_seqno) {
995 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
1009 struct vc4_dev *vc4 = to_vc4_dev(dev);
1012 if (WARN_ON_ONCE(vc4->is_vc5))
1018 spin_lock_irqsave(&vc4->job_lock, irqflags);
1019 if (seqno > vc4->finished_seqno) {
1021 list_add_tail(&cb->work.entry, &vc4->seqno_cb_list);
1025 spin_unlock_irqrestore(&vc4->job_lock, irqflags);
1037 struct vc4_dev *vc4 =
1040 vc4_job_handle_completed(vc4);
1065 struct vc4_dev *vc4 = to_vc4_dev(dev);
1068 if (WARN_ON_ONCE(vc4->is_vc5))
1079 struct vc4_dev *vc4 = to_vc4_dev(dev);
1085 if (WARN_ON_ONCE(vc4->is_vc5))
1121 struct vc4_dev *vc4 = to_vc4_dev(dev);
1134 if (WARN_ON_ONCE(vc4->is_vc5))
1137 if (!vc4->v3d) {
1160 exec->dev = vc4;
1162 ret = vc4_v3d_pm_get(vc4);
1196 vc4->dma_fence_context)) {
1255 args->seqno = vc4->emit_seqno;
1260 vc4_complete_exec(&vc4->base, exec);
1268 struct vc4_dev *vc4 = to_vc4_dev(dev);
1271 if (WARN_ON_ONCE(vc4->is_vc5))
1274 vc4->dma_fence_context = dma_fence_context_alloc(1);
1276 INIT_LIST_HEAD(&vc4->bin_job_list);
1277 INIT_LIST_HEAD(&vc4->render_job_list);
1278 INIT_LIST_HEAD(&vc4->job_done_list);
1279 INIT_LIST_HEAD(&vc4->seqno_cb_list);
1280 spin_lock_init(&vc4->job_lock);
1282 INIT_WORK(&vc4->hangcheck.reset_work, vc4_reset_work);
1283 timer_setup(&vc4->hangcheck.timer, vc4_hangcheck_elapsed, 0);
1285 INIT_WORK(&vc4->job_done_work, vc4_job_done_work);
1287 ret = drmm_mutex_init(dev, &vc4->power_lock);
1291 INIT_LIST_HEAD(&vc4->purgeable.list);
1293 ret = drmm_mutex_init(dev, &vc4->purgeable.lock);
1302 struct vc4_dev *vc4 = to_vc4_dev(dev);
1307 WARN_ON(vc4->emit_seqno != vc4->finished_seqno);
1312 if (vc4->bin_bo) {
1313 drm_gem_object_put(&vc4->bin_bo->base.base);
1314 vc4->bin_bo = NULL;
1317 if (vc4->hang_state)
1318 vc4_free_hang_state(dev, vc4->hang_state);
1324 struct vc4_dev *vc4 = to_vc4_dev(dev);
1330 if (WARN_ON_ONCE(vc4->is_vc5))