Lines Matching refs:drm
33 #include <drm/drm_aperture.h>
34 #include <drm/drm_drv.h>
35 #include <drm/drm_fbdev_generic.h>
36 #include <drm/drm_gem_ttm_helper.h>
37 #include <drm/drm_ioctl.h>
38 #include <drm/drm_vblank.h>
208 mutex_lock(&cli->drm->master.lock);
210 mutex_unlock(&cli->drm->master.lock);
214 nouveau_cli_init(struct nouveau_drm *drm, const char *sname,
240 u64 device = nouveau_name(drm->dev);
244 cli->drm = drm;
252 if (cli == &drm->master) {
256 mutex_lock(&drm->master.lock);
257 ret = nvif_client_ctor(&drm->master.base, cli->name, device,
259 mutex_unlock(&drm->master.lock);
310 ret = nouveau_sched_entity_init(&cli->sched_entity, &drm->sched,
311 drm->sched_wq);
323 nouveau_accel_ce_fini(struct nouveau_drm *drm)
325 nouveau_channel_idle(drm->cechan);
326 nvif_object_dtor(&drm->ttm.copy);
327 nouveau_channel_del(&drm->cechan);
331 nouveau_accel_ce_init(struct nouveau_drm *drm)
333 struct nvif_device *device = &drm->client.device;
342 NV_DEBUG(drm, "no ce runlist\n");
346 ret = nouveau_channel_new(drm, device, false, runm, NvDmaFB, NvDmaTT, &drm->cechan);
348 NV_ERROR(drm, "failed to create ce channel, %d\n", ret);
352 nouveau_accel_gr_fini(struct nouveau_drm *drm)
354 nouveau_channel_idle(drm->channel);
355 nvif_object_dtor(&drm->ntfy);
356 nvkm_gpuobj_del(&drm->notify);
357 nouveau_channel_del(&drm->channel);
361 nouveau_accel_gr_init(struct nouveau_drm *drm)
363 struct nvif_device *device = &drm->client.device;
370 NV_DEBUG(drm, "no gr runlist\n");
374 ret = nouveau_channel_new(drm, device, false, runm, NvDmaFB, NvDmaTT, &drm->channel);
376 NV_ERROR(drm, "failed to create kernel channel, %d\n", ret);
377 nouveau_accel_gr_fini(drm);
385 if (!drm->channel->nvsw.client && device->info.family < NV_DEVICE_INFO_V0_TESLA) {
386 ret = nvif_object_ctor(&drm->channel->user, "drmNvsw",
387 NVDRM_NVSW, nouveau_abi16_swclass(drm),
388 NULL, 0, &drm->channel->nvsw);
391 ret = nvif_object_ctor(&drm->channel->user, "drmBlit",
393 NULL, 0, &drm->channel->blit);
397 struct nvif_push *push = drm->channel->chan.push;
401 PUSH_NVSQ(push, NV05F, 0x0000, drm->channel->blit.handle);
406 PUSH_NVSQ(push, NV_SW, 0x0000, drm->channel->nvsw.handle);
411 NV_ERROR(drm, "failed to allocate sw or blit class, %d\n", ret);
412 nouveau_accel_gr_fini(drm);
423 &drm->notify);
425 NV_ERROR(drm, "failed to allocate notifier, %d\n", ret);
426 nouveau_accel_gr_fini(drm);
430 ret = nvif_object_ctor(&drm->channel->user, "drmM2mfNtfy",
435 .start = drm->notify->addr,
436 .limit = drm->notify->addr + 31
438 &drm->ntfy);
440 nouveau_accel_gr_fini(drm);
447 nouveau_accel_fini(struct nouveau_drm *drm)
449 nouveau_accel_ce_fini(drm);
450 nouveau_accel_gr_fini(drm);
451 if (drm->fence)
452 nouveau_fence(drm)->dtor(drm);
453 nouveau_channels_fini(drm);
457 nouveau_accel_init(struct nouveau_drm *drm)
459 struct nvif_device *device = &drm->client.device;
467 ret = nouveau_channels_init(drm);
481 ret = nv04_fence_create(drm);
484 ret = nv10_fence_create(drm);
488 ret = nv17_fence_create(drm);
491 ret = nv50_fence_create(drm);
494 ret = nv84_fence_create(drm);
505 ret = nvc0_fence_create(drm);
514 NV_ERROR(drm, "failed to initialise sync subsystem, %d\n", ret);
515 nouveau_accel_fini(drm);
520 if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_VOLTA) {
527 nouveau_accel_gr_init(drm);
528 nouveau_accel_ce_init(drm);
531 nouveau_bo_move_init(drm);
537 struct nouveau_drm *drm = container_of(object->parent, typeof(*drm), parent);
544 NV_ERROR(drm, "%pV", &vaf);
551 struct nouveau_drm *drm = container_of(object->parent, typeof(*drm), parent);
558 NV_DEBUG(drm, "%pV", &vaf);
571 struct nouveau_drm *drm;
574 if (!(drm = kzalloc(sizeof(*drm), GFP_KERNEL)))
576 dev->dev_private = drm;
577 drm->dev = dev;
579 nvif_parent_ctor(&nouveau_parent, &drm->parent);
580 drm->master.base.object.parent = &drm->parent;
582 ret = nouveau_sched_init(drm);
586 ret = nouveau_cli_init(drm, "DRM-master", &drm->master);
590 ret = nouveau_cli_init(drm, "DRM", &drm->client);
594 nvxx_client(&drm->client.base)->debug =
597 INIT_LIST_HEAD(&drm->clients);
598 mutex_init(&drm->clients_lock);
599 spin_lock_init(&drm->tile.lock);
605 if (drm->client.device.info.chipset == 0xc1)
606 nvif_mask(&drm->client.device.object, 0x00088080, 0x00000800, 0x00000000);
608 nouveau_vga_init(drm);
610 ret = nouveau_ttm_init(drm);
618 nouveau_accel_init(drm);
630 nouveau_debugfs_init(drm);
632 nouveau_svm_init(drm);
633 nouveau_dmem_init(drm);
649 nouveau_accel_fini(drm);
652 nouveau_ttm_fini(drm);
654 nouveau_vga_fini(drm);
655 nouveau_cli_fini(&drm->client);
657 nouveau_cli_fini(&drm->master);
659 nouveau_sched_fini(drm);
661 nvif_parent_dtor(&drm->parent);
662 kfree(drm);
670 struct nouveau_drm *drm = nouveau_drm(dev);
678 nouveau_dmem_fini(drm);
679 nouveau_svm_fini(drm);
681 nouveau_debugfs_fini(drm);
687 nouveau_accel_fini(drm);
690 nouveau_ttm_fini(drm);
691 nouveau_vga_fini(drm);
699 mutex_lock(&drm->clients_lock);
700 list_for_each_entry_safe(cli, temp_cli, &drm->clients, head) {
709 mutex_unlock(&drm->clients_lock);
711 nouveau_cli_fini(&drm->client);
712 nouveau_cli_fini(&drm->master);
714 nouveau_sched_fini(drm);
716 nvif_parent_dtor(&drm->parent);
717 mutex_destroy(&drm->clients_lock);
718 kfree(drm);
764 struct nouveau_drm *drm = nouveau_drm(dev);
772 drm->old_pm_cap = pdev->pm_cap;
774 NV_INFO(drm, "Disabling PCI power management to avoid bug\n");
856 struct nouveau_drm *drm = nouveau_drm(dev);
862 client = nvxx_client(&drm->client.base);
874 struct nouveau_drm *drm = nouveau_drm(dev);
877 if (drm->old_pm_cap)
878 pdev->pm_cap = drm->old_pm_cap;
886 struct nouveau_drm *drm = nouveau_drm(dev);
890 nouveau_svm_suspend(drm);
891 nouveau_dmem_suspend(drm);
895 NV_DEBUG(drm, "suspending display...\n");
901 NV_DEBUG(drm, "evicting buffers...\n");
903 man = ttm_manager_type(&drm->ttm.bdev, TTM_PL_VRAM);
904 ttm_resource_manager_evict_all(&drm->ttm.bdev, man);
906 NV_DEBUG(drm, "waiting for kernel channels to go idle...\n");
907 if (drm->cechan) {
908 ret = nouveau_channel_idle(drm->cechan);
913 if (drm->channel) {
914 ret = nouveau_channel_idle(drm->channel);
919 NV_DEBUG(drm, "suspending fence...\n");
920 if (drm->fence && nouveau_fence(drm)->suspend) {
921 if (!nouveau_fence(drm)->suspend(drm)) {
927 NV_DEBUG(drm, "suspending object tree...\n");
928 ret = nvif_client_suspend(&drm->master.base);
935 if (drm->fence && nouveau_fence(drm)->resume)
936 nouveau_fence(drm)->resume(drm);
940 NV_DEBUG(drm, "resuming display...\n");
950 struct nouveau_drm *drm = nouveau_drm(dev);
952 NV_DEBUG(drm, "resuming object tree...\n");
953 ret = nvif_client_resume(&drm->master.base);
955 NV_ERROR(drm, "Client resume failed with error: %d\n", ret);
959 NV_DEBUG(drm, "resuming fence...\n");
960 if (drm->fence && nouveau_fence(drm)->resume)
961 nouveau_fence(drm)->resume(drm);
966 NV_DEBUG(drm, "resuming display...\n");
971 nouveau_dmem_resume(drm);
972 nouveau_svm_resume(drm);
1075 struct nouveau_drm *drm = nouveau_drm(drm_dev);
1093 NV_ERROR(drm, "resume failed with: %d\n", ret);
1124 struct nouveau_drm *drm = nouveau_drm(dev);
1147 ret = nouveau_cli_init(drm, name, cli);
1153 mutex_lock(&drm->clients_lock);
1154 list_add(&cli->head, &drm->clients);
1155 mutex_unlock(&drm->clients_lock);
1172 struct nouveau_drm *drm = nouveau_drm(dev);
1191 mutex_lock(&drm->clients_lock);
1193 mutex_unlock(&drm->clients_lock);
1358 struct drm_device *drm;
1366 drm = drm_dev_alloc(&driver_platform, &pdev->dev);
1367 if (IS_ERR(drm)) {
1368 err = PTR_ERR(drm);
1372 err = nouveau_drm_device_init(drm);
1376 platform_set_drvdata(pdev, drm);
1378 return drm;
1381 drm_dev_put(drm);