Lines Matching refs:vdev
60 struct ivpu_device *vdev = file_priv->vdev;
64 ivpu_dbg(vdev, KREF, "file_priv get: ctx %u refcount %u\n",
70 struct ivpu_file_priv *ivpu_file_priv_get_by_ctx_id(struct ivpu_device *vdev, unsigned long id)
74 xa_lock_irq(&vdev->context_xa);
75 file_priv = xa_load(&vdev->context_xa, id);
79 xa_unlock_irq(&vdev->context_xa);
82 ivpu_dbg(vdev, KREF, "file_priv get by id: ctx %u refcount %u\n",
91 struct ivpu_device *vdev = file_priv->vdev;
93 ivpu_dbg(vdev, FILE, "file_priv release: ctx %u\n", file_priv->ctx.id);
97 ivpu_jsm_context_release(vdev, file_priv->ctx.id);
98 ivpu_mmu_user_context_fini(vdev, &file_priv->ctx);
99 drm_WARN_ON(&vdev->drm, xa_erase_irq(&vdev->context_xa, file_priv->ctx.id) != file_priv);
107 struct ivpu_device *vdev = file_priv->vdev;
109 drm_WARN_ON(&vdev->drm, !file_priv);
111 ivpu_dbg(vdev, KREF, "file_priv put: ctx %u refcount %u\n",
118 static int ivpu_get_capabilities(struct ivpu_device *vdev, struct drm_ivpu_param *args)
137 struct ivpu_device *vdev = file_priv->vdev;
138 struct pci_dev *pdev = to_pci_dev(vdev->drm.dev);
154 args->value = vdev->platform;
157 args->value = ivpu_hw_reg_pll_freq_get(vdev);
160 args->value = ivpu_get_context_count(vdev);
163 args->value = vdev->hw->ranges.user.start;
175 fw_hdr = (struct vpu_firmware_header *)vdev->fw->file->data;
182 ret = ivpu_jsm_get_heartbeat(vdev, args->index, &args->value);
185 args->value = (u64)atomic64_inc_return(&vdev->unique_id_counter);
188 args->value = vdev->hw->tile_fuse;
191 args->value = vdev->hw->sku;
194 ret = ivpu_get_capabilities(vdev, args);
227 struct ivpu_device *vdev = to_ivpu_device(dev);
233 ret = xa_alloc_irq(&vdev->context_xa, &ctx_id, NULL, vdev->context_xa_limit, GFP_KERNEL);
235 ivpu_err(vdev, "Failed to allocate context id: %d\n", ret);
245 file_priv->vdev = vdev;
250 ret = ivpu_mmu_user_context_init(vdev, &file_priv->ctx, ctx_id);
254 old = xa_store_irq(&vdev->context_xa, ctx_id, file_priv, GFP_KERNEL);
257 ivpu_err(vdev, "Failed to store context %u: %d\n", ctx_id, ret);
261 ivpu_dbg(vdev, FILE, "file_priv create: ctx %u process %s pid %d\n",
268 ivpu_mmu_user_context_fini(vdev, &file_priv->ctx);
273 xa_erase_irq(&vdev->context_xa, ctx_id);
280 struct ivpu_device *vdev = to_ivpu_device(dev);
282 ivpu_dbg(vdev, FILE, "file_priv close: ctx %u process %s pid %d\n",
297 static int ivpu_wait_for_ready(struct ivpu_device *vdev)
307 ivpu_ipc_consumer_add(vdev, &cons, IVPU_IPC_CHAN_BOOT_MSG);
309 timeout = jiffies + msecs_to_jiffies(vdev->timeout.boot);
311 ret = ivpu_ipc_irq_handler(vdev);
314 ret = ivpu_ipc_receive(vdev, &cons, &ipc_hdr, NULL, 0);
321 ivpu_ipc_consumer_del(vdev, &cons);
324 ivpu_err(vdev, "Invalid VPU ready message: 0x%x\n",
330 ivpu_dbg(vdev, PM, "VPU ready message received successfully\n");
332 ivpu_hw_diagnose_failure(vdev);
339 * @vdev: VPU device
344 int ivpu_boot(struct ivpu_device *vdev)
349 ivpu_fw_boot_params_setup(vdev, vdev->fw->mem->kvaddr);
351 ret = ivpu_hw_boot_fw(vdev);
353 ivpu_err(vdev, "Failed to start the firmware: %d\n", ret);
357 ret = ivpu_wait_for_ready(vdev);
359 ivpu_err(vdev, "Failed to boot the firmware: %d\n", ret);
363 ivpu_hw_irq_clear(vdev);
364 enable_irq(vdev->irq);
365 ivpu_hw_irq_enable(vdev);
366 ivpu_ipc_enable(vdev);
370 void ivpu_prepare_for_reset(struct ivpu_device *vdev)
372 ivpu_hw_irq_disable(vdev);
373 disable_irq(vdev->irq);
374 ivpu_ipc_disable(vdev);
375 ivpu_mmu_disable(vdev);
378 int ivpu_shutdown(struct ivpu_device *vdev)
382 ivpu_prepare_for_reset(vdev);
384 ret = ivpu_hw_power_down(vdev);
386 ivpu_warn(vdev, "Failed to power down HW: %d\n", ret);
418 static int ivpu_irq_init(struct ivpu_device *vdev)
420 struct pci_dev *pdev = to_pci_dev(vdev->drm.dev);
425 ivpu_err(vdev, "Failed to allocate a MSI IRQ: %d\n", ret);
429 vdev->irq = pci_irq_vector(pdev, 0);
431 ret = devm_request_irq(vdev->drm.dev, vdev->irq, vdev->hw->ops->irq_handler,
432 IRQF_NO_AUTOEN, DRIVER_NAME, vdev);
434 ivpu_err(vdev, "Failed to request an IRQ %d\n", ret);
439 static int ivpu_pci_init(struct ivpu_device *vdev)
441 struct pci_dev *pdev = to_pci_dev(vdev->drm.dev);
446 ivpu_dbg(vdev, MISC, "Mapping BAR0 (RegV) %pR\n", bar0);
447 vdev->regv = devm_ioremap_resource(vdev->drm.dev, bar0);
448 if (IS_ERR(vdev->regv)) {
449 ivpu_err(vdev, "Failed to map bar 0: %pe\n", vdev->regv);
450 return PTR_ERR(vdev->regv);
453 ivpu_dbg(vdev, MISC, "Mapping BAR4 (RegB) %pR\n", bar4);
454 vdev->regb = devm_ioremap_resource(vdev->drm.dev, bar4);
455 if (IS_ERR(vdev->regb)) {
456 ivpu_err(vdev, "Failed to map bar 4: %pe\n", vdev->regb);
457 return PTR_ERR(vdev->regb);
460 ret = dma_set_mask_and_coherent(vdev->drm.dev, DMA_BIT_MASK(vdev->hw->dma_bits));
462 ivpu_err(vdev, "Failed to set DMA mask: %d\n", ret);
465 dma_set_max_seg_size(vdev->drm.dev, UINT_MAX);
475 ivpu_err(vdev, "Failed to enable PCI device: %d\n", ret);
484 static int ivpu_dev_init(struct ivpu_device *vdev)
488 vdev->hw = drmm_kzalloc(&vdev->drm, sizeof(*vdev->hw), GFP_KERNEL);
489 if (!vdev->hw)
492 vdev->mmu = drmm_kzalloc(&vdev->drm, sizeof(*vdev->mmu), GFP_KERNEL);
493 if (!vdev->mmu)
496 vdev->fw = drmm_kzalloc(&vdev->drm, sizeof(*vdev->fw), GFP_KERNEL);
497 if (!vdev->fw)
500 vdev->ipc = drmm_kzalloc(&vdev->drm, sizeof(*vdev->ipc), GFP_KERNEL);
501 if (!vdev->ipc)
504 vdev->pm = drmm_kzalloc(&vdev->drm, sizeof(*vdev->pm), GFP_KERNEL);
505 if (!vdev->pm)
508 if (ivpu_hw_gen(vdev) >= IVPU_HW_40XX) {
509 vdev->hw->ops = &ivpu_hw_40xx_ops;
510 vdev->hw->dma_bits = 48;
512 vdev->hw->ops = &ivpu_hw_37xx_ops;
513 vdev->hw->dma_bits = 38;
516 vdev->platform = IVPU_PLATFORM_INVALID;
517 vdev->context_xa_limit.min = IVPU_USER_CONTEXT_MIN_SSID;
518 vdev->context_xa_limit.max = IVPU_USER_CONTEXT_MAX_SSID;
519 atomic64_set(&vdev->unique_id_counter, 0);
520 xa_init_flags(&vdev->context_xa, XA_FLAGS_ALLOC);
521 xa_init_flags(&vdev->submitted_jobs_xa, XA_FLAGS_ALLOC1);
522 lockdep_set_class(&vdev->submitted_jobs_xa.xa_lock, &submitted_jobs_xa_lock_class_key);
524 ret = ivpu_pci_init(vdev);
526 ivpu_err(vdev, "Failed to initialize PCI device: %d\n", ret);
530 ret = ivpu_irq_init(vdev);
532 ivpu_err(vdev, "Failed to initialize IRQs: %d\n", ret);
537 ret = ivpu_hw_info_init(vdev);
539 ivpu_err(vdev, "Failed to initialize HW info: %d\n", ret);
544 ret = ivpu_hw_power_up(vdev);
546 ivpu_err(vdev, "Failed to power up HW: %d\n", ret);
550 ret = ivpu_mmu_global_context_init(vdev);
552 ivpu_err(vdev, "Failed to initialize global MMU context: %d\n", ret);
556 ret = ivpu_mmu_init(vdev);
558 ivpu_err(vdev, "Failed to initialize MMU device: %d\n", ret);
562 ret = ivpu_fw_init(vdev);
564 ivpu_err(vdev, "Failed to initialize firmware: %d\n", ret);
568 ret = ivpu_ipc_init(vdev);
570 ivpu_err(vdev, "Failed to initialize IPC: %d\n", ret);
574 ret = ivpu_pm_init(vdev);
576 ivpu_err(vdev, "Failed to initialize PM: %d\n", ret);
580 ret = ivpu_job_done_thread_init(vdev);
582 ivpu_err(vdev, "Failed to initialize job done thread: %d\n", ret);
586 ret = ivpu_fw_load(vdev);
588 ivpu_err(vdev, "Failed to load firmware: %d\n", ret);
592 ret = ivpu_boot(vdev);
594 ivpu_err(vdev, "Failed to boot: %d\n", ret);
598 ivpu_pm_enable(vdev);
603 ivpu_job_done_thread_fini(vdev);
605 ivpu_ipc_fini(vdev);
607 ivpu_fw_fini(vdev);
609 ivpu_mmu_global_context_fini(vdev);
611 ivpu_hw_power_down(vdev);
613 pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
615 xa_destroy(&vdev->submitted_jobs_xa);
616 xa_destroy(&vdev->context_xa);
620 static void ivpu_dev_fini(struct ivpu_device *vdev)
622 ivpu_pm_disable(vdev);
623 ivpu_shutdown(vdev);
625 pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
626 ivpu_job_done_thread_fini(vdev);
627 ivpu_pm_cancel_recovery(vdev);
629 ivpu_ipc_fini(vdev);
630 ivpu_fw_fini(vdev);
631 ivpu_mmu_global_context_fini(vdev);
633 drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->submitted_jobs_xa));
634 xa_destroy(&vdev->submitted_jobs_xa);
635 drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->context_xa));
636 xa_destroy(&vdev->context_xa);
649 struct ivpu_device *vdev;
652 vdev = devm_drm_dev_alloc(&pdev->dev, &driver, struct ivpu_device, drm);
653 if (IS_ERR(vdev))
654 return PTR_ERR(vdev);
656 pci_set_drvdata(pdev, vdev);
658 ret = ivpu_dev_init(vdev);
664 ret = drm_dev_register(&vdev->drm, 0);
667 ivpu_dev_fini(vdev);
675 struct ivpu_device *vdev = pci_get_drvdata(pdev);
677 drm_dev_unplug(&vdev->drm);
678 ivpu_dev_fini(vdev);