Lines Matching refs:uvmm
6 * The uvmm mutex protects any operations on the GPU VA space provided by the
89 nouveau_uvmm_vmm_sparse_ref(struct nouveau_uvmm *uvmm,
92 struct nvif_vmm *vmm = &uvmm->vmm.vmm;
98 nouveau_uvmm_vmm_sparse_unref(struct nouveau_uvmm *uvmm,
101 struct nvif_vmm *vmm = &uvmm->vmm.vmm;
107 nouveau_uvmm_vmm_get(struct nouveau_uvmm *uvmm,
110 struct nvif_vmm *vmm = &uvmm->vmm.vmm;
116 nouveau_uvmm_vmm_put(struct nouveau_uvmm *uvmm,
119 struct nvif_vmm *vmm = &uvmm->vmm.vmm;
125 nouveau_uvmm_vmm_unmap(struct nouveau_uvmm *uvmm,
128 struct nvif_vmm *vmm = &uvmm->vmm.vmm;
134 nouveau_uvmm_vmm_map(struct nouveau_uvmm *uvmm,
139 struct nvif_vmm *vmm = &uvmm->vmm.vmm;
175 return nouveau_uvmm_vmm_sparse_unref(reg->uvmm, addr, range);
274 __nouveau_uvma_region_insert(struct nouveau_uvmm *uvmm,
280 MA_STATE(mas, &uvmm->region_mt, addr, addr);
293 reg->uvmm = uvmm;
299 nouveau_uvma_region_insert(struct nouveau_uvmm *uvmm,
305 reg->uvmm = uvmm;
309 ret = __nouveau_uvma_region_insert(uvmm, reg);
319 struct nouveau_uvmm *uvmm = reg->uvmm;
320 MA_STATE(mas, &uvmm->region_mt, reg->va.addr, 0);
326 nouveau_uvma_region_create(struct nouveau_uvmm *uvmm,
332 if (!drm_gpuva_interval_empty(&uvmm->umgr, addr, range))
339 ret = nouveau_uvma_region_insert(uvmm, reg, addr, range);
343 ret = nouveau_uvmm_vmm_sparse_ref(uvmm, addr, range);
357 nouveau_uvma_region_find_first(struct nouveau_uvmm *uvmm,
360 MA_STATE(mas, &uvmm->region_mt, addr, 0);
366 nouveau_uvma_region_find(struct nouveau_uvmm *uvmm,
371 reg = nouveau_uvma_region_find_first(uvmm, addr, range);
385 struct nouveau_uvmm *uvmm = reg->uvmm;
387 return drm_gpuva_interval_empty(&uvmm->umgr,
395 struct nouveau_uvmm *uvmm = reg->uvmm;
403 nouveau_uvmm_vmm_sparse_unref(uvmm, addr, range);
410 nouveau_uvma_region_destroy(struct nouveau_uvmm *uvmm,
415 reg = nouveau_uvma_region_find(uvmm, addr, range);
451 nouveau_uvmm_sm_prepare_unwind(struct nouveau_uvmm *uvmm,
499 nouveau_uvmm_vmm_put(uvmm, vmm_get_start,
536 nouveau_uvmm_vmm_put(uvmm, vmm_get_start,
552 nouveau_uvmm_sm_map_prepare_unwind(struct nouveau_uvmm *uvmm,
563 nouveau_uvmm_sm_prepare_unwind(uvmm, new, ops, last, &args);
567 nouveau_uvmm_sm_unmap_prepare_unwind(struct nouveau_uvmm *uvmm,
573 nouveau_uvmm_sm_prepare_unwind(uvmm, new, ops, last, NULL);
577 op_map_prepare(struct nouveau_uvmm *uvmm,
592 drm_gpuva_map(&uvmm->umgr, &uvma->va, op);
608 nouveau_uvmm_sm_prepare(struct nouveau_uvmm *uvmm,
623 ret = op_map_prepare(uvmm, &new->map, &op->map, args);
628 ret = nouveau_uvmm_vmm_get(uvmm, vmm_get_start,
651 ret = op_map_prepare(uvmm, &new->prev, r->prev,
661 ret = op_map_prepare(uvmm, &new->next, r->next,
698 ret = nouveau_uvmm_vmm_get(uvmm, vmm_get_start,
719 nouveau_uvmm_sm_prepare_unwind(uvmm, new, ops,
726 nouveau_uvmm_sm_map_prepare(struct nouveau_uvmm *uvmm,
739 return nouveau_uvmm_sm_prepare(uvmm, new, ops, &args);
743 nouveau_uvmm_sm_unmap_prepare(struct nouveau_uvmm *uvmm,
747 return nouveau_uvmm_sm_prepare(uvmm, new, ops, NULL);
819 nouveau_uvmm_sm(struct nouveau_uvmm *uvmm,
845 nouveau_uvmm_sm_map(struct nouveau_uvmm *uvmm,
849 return nouveau_uvmm_sm(uvmm, new, ops);
853 nouveau_uvmm_sm_unmap(struct nouveau_uvmm *uvmm,
857 return nouveau_uvmm_sm(uvmm, new, ops);
861 nouveau_uvmm_sm_cleanup(struct nouveau_uvmm *uvmm,
888 nouveau_uvmm_vmm_put(uvmm, addr, end - addr);
914 nouveau_uvmm_sm_map_cleanup(struct nouveau_uvmm *uvmm,
918 nouveau_uvmm_sm_cleanup(uvmm, new, ops, false);
922 nouveau_uvmm_sm_unmap_cleanup(struct nouveau_uvmm *uvmm,
926 nouveau_uvmm_sm_cleanup(uvmm, new, ops, true);
930 nouveau_uvmm_validate_range(struct nouveau_uvmm *uvmm, u64 addr, u64 range)
933 u64 kernel_managed_end = uvmm->kernel_managed_addr +
934 uvmm->kernel_managed_size;
950 end > uvmm->kernel_managed_addr)
994 struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(job->cli);
1008 return nouveau_uvmm_validate_range(uvmm, op->va.addr, op->va.range);
1044 struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(job->cli);
1050 nouveau_uvmm_lock(uvmm);
1051 reg = nouveau_uvma_region_find_first(uvmm, addr, range);
1053 nouveau_uvmm_unlock(uvmm);
1062 nouveau_uvmm_unlock(uvmm);
1067 nouveau_uvmm_unlock(uvmm);
1144 struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(job->cli);
1174 * uvmm lock until we can't fail anymore. This is due to the set of GPU
1178 nouveau_uvmm_lock(uvmm);
1182 ret = nouveau_uvma_region_create(uvmm,
1190 op->reg = nouveau_uvma_region_find(uvmm, op->va.addr,
1197 op->ops = drm_gpuva_sm_unmap_ops_create(&uvmm->umgr,
1205 ret = nouveau_uvmm_sm_unmap_prepare(uvmm, &op->new,
1208 drm_gpuva_ops_free(&uvmm->umgr, op->ops);
1220 reg = nouveau_uvma_region_find_first(uvmm,
1243 op->ops = drm_gpuva_sm_map_ops_create(&uvmm->umgr,
1253 ret = nouveau_uvmm_sm_map_prepare(uvmm, &op->new,
1259 drm_gpuva_ops_free(&uvmm->umgr, op->ops);
1267 op->ops = drm_gpuva_sm_unmap_ops_create(&uvmm->umgr,
1275 ret = nouveau_uvmm_sm_unmap_prepare(uvmm, &op->new,
1278 drm_gpuva_ops_free(&uvmm->umgr, op->ops);
1373 nouveau_uvmm_unlock(uvmm);
1387 nouveau_uvma_region_destroy(uvmm, op->va.addr,
1391 __nouveau_uvma_region_insert(uvmm, op->reg);
1392 nouveau_uvmm_sm_unmap_prepare_unwind(uvmm, &op->new,
1396 nouveau_uvmm_sm_map_prepare_unwind(uvmm, &op->new,
1402 nouveau_uvmm_sm_unmap_prepare_unwind(uvmm, &op->new,
1407 drm_gpuva_ops_free(&uvmm->umgr, op->ops);
1412 nouveau_uvmm_unlock(uvmm);
1434 struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(job->cli);
1444 ret = nouveau_uvmm_sm_map(uvmm, &op->new, op->ops);
1451 ret = nouveau_uvmm_sm_unmap(uvmm, &op->new, op->ops);
1470 struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(job->cli);
1486 nouveau_uvmm_sm_unmap_cleanup(uvmm, &op->new,
1491 nouveau_uvmm_lock(uvmm);
1493 nouveau_uvmm_unlock(uvmm);
1501 nouveau_uvmm_sm_map_cleanup(uvmm, &op->new,
1506 nouveau_uvmm_sm_unmap_cleanup(uvmm, &op->new,
1512 drm_gpuva_ops_free(&uvmm->umgr, op->ops);
1659 return nouveau_uvmm_init(&cli->uvmm, cli, init->kernel_managed_addr,
1808 nouveau_uvmm_init(struct nouveau_uvmm *uvmm, struct nouveau_cli *cli,
1814 mutex_init(&uvmm->mutex);
1815 dma_resv_init(&uvmm->resv);
1816 mt_init_flags(&uvmm->region_mt, MT_FLAGS_LOCK_EXTERN);
1817 mt_set_external_lock(&uvmm->region_mt, &uvmm->mutex);
1821 if (unlikely(cli->uvmm.disabled)) {
1836 uvmm->kernel_managed_addr = kernel_managed_addr;
1837 uvmm->kernel_managed_size = kernel_managed_size;
1839 drm_gpuva_manager_init(&uvmm->umgr, cli->name,
1845 ret = nvif_vmm_ctor(&cli->mmu, "uvmm",
1848 NULL, 0, &cli->uvmm.vmm.vmm);
1852 cli->uvmm.vmm.cli = cli;
1858 drm_gpuva_manager_destroy(&uvmm->umgr);
1865 nouveau_uvmm_fini(struct nouveau_uvmm *uvmm)
1867 MA_STATE(mas, &uvmm->region_mt, 0, 0);
1869 struct nouveau_cli *cli = uvmm->vmm.cli;
1879 nouveau_uvmm_lock(uvmm);
1880 drm_gpuva_for_each_va_safe(va, next, &uvmm->umgr) {
1884 if (unlikely(va == &uvmm->umgr.kernel_alloc_node))
1906 WARN(!mtree_empty(&uvmm->region_mt),
1908 __mt_destroy(&uvmm->region_mt);
1909 nouveau_uvmm_unlock(uvmm);
1912 nouveau_vmm_fini(&uvmm->vmm);
1913 drm_gpuva_manager_destroy(&uvmm->umgr);
1916 dma_resv_fini(&uvmm->resv);