Lines Matching refs:svmm

67 			struct nouveau_svmm *svmm;
88 struct nouveau_svmm *svmm;
172 if (!cli->svm.svmm) {
189 nouveau_dmem_migrate_vma(cli->drm, cli->svm.svmm, vma, addr,
209 nouveau_svmm_part(struct nouveau_svmm *svmm, u64 inst)
212 if (svmm) {
213 mutex_lock(&svmm->vmm->cli->drm->svm->mutex);
214 ivmm = nouveau_ivmm_find(svmm->vmm->cli->drm->svm, inst);
219 mutex_unlock(&svmm->vmm->cli->drm->svm->mutex);
225 nouveau_svmm_join(struct nouveau_svmm *svmm, u64 inst)
228 if (svmm) {
231 ivmm->svmm = svmm;
234 mutex_lock(&svmm->vmm->cli->drm->svm->mutex);
235 list_add(&ivmm->head, &svmm->vmm->cli->drm->svm->inst);
236 mutex_unlock(&svmm->vmm->cli->drm->svm->mutex);
243 nouveau_svmm_invalidate(struct nouveau_svmm *svmm, u64 start, u64 limit)
246 nvif_object_mthd(&svmm->vmm->vmm.object, NVIF_VMM_V0_PFNCLR,
258 struct nouveau_svmm *svmm =
266 SVMM_DBG(svmm, "invalidate %016lx-%016lx", start, limit);
268 mutex_lock(&svmm->mutex);
269 if (unlikely(!svmm->vmm))
277 update->owner == svmm->vmm->cli->drm->dev)
280 if (limit > svmm->unmanaged.start && start < svmm->unmanaged.limit) {
281 if (start < svmm->unmanaged.start) {
282 nouveau_svmm_invalidate(svmm, start,
283 svmm->unmanaged.limit);
285 start = svmm->unmanaged.limit;
288 nouveau_svmm_invalidate(svmm, start, limit);
291 mutex_unlock(&svmm->mutex);
308 struct nouveau_svmm *svmm = *psvmm;
309 if (svmm) {
310 mutex_lock(&svmm->mutex);
311 svmm->vmm = NULL;
312 mutex_unlock(&svmm->mutex);
313 mmu_notifier_put(&svmm->notifier);
323 struct nouveau_svmm *svmm;
332 if (!(svmm = kzalloc(sizeof(*svmm), GFP_KERNEL)))
334 svmm->vmm = &cli->svm;
335 svmm->unmanaged.start = args->unmanaged_addr;
336 svmm->unmanaged.limit = args->unmanaged_addr + args->unmanaged_size;
337 mutex_init(&svmm->mutex);
362 svmm->notifier.ops = &nouveau_mn_ops;
363 ret = __mmu_notifier_register(&svmm->notifier, current->mm);
366 /* Note, ownership of svmm transfers to mmu_notifier */
368 cli->svm.svmm = svmm;
378 kfree(svmm);
506 struct nouveau_svmm *svmm;
517 range->owner == sn->svmm->vmm->cli->drm->dev)
528 mutex_lock(&sn->svmm->mutex);
529 else if (!mutex_trylock(&sn->svmm->mutex))
532 mutex_unlock(&sn->svmm->mutex);
588 static int nouveau_atomic_range_fault(struct nouveau_svmm *svmm,
595 struct mm_struct *mm = svmm->notifier.mm;
623 mutex_lock(&svmm->mutex);
627 mutex_unlock(&svmm->mutex);
640 ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args, size, NULL);
641 mutex_unlock(&svmm->mutex);
651 static int nouveau_range_fault(struct nouveau_svmm *svmm,
667 struct mm_struct *mm = svmm->notifier.mm;
695 mutex_lock(&svmm->mutex);
698 mutex_unlock(&svmm->mutex);
706 ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args, size, NULL);
707 mutex_unlock(&svmm->mutex);
721 struct nouveau_svmm *svmm;
761 for (fi = 0, svmm = NULL; fi < buffer->fault_nr; fi++) {
762 if (!svmm || buffer->fault[fi]->inst != inst) {
765 svmm = ivmm ? ivmm->svmm : NULL;
767 SVM_DBG(svm, "inst %016llx -> svm-%p", inst, svmm);
769 buffer->fault[fi]->svmm = svmm;
785 if (!(svmm = buffer->fault[fi]->svmm)) {
789 SVMM_DBG(svmm, "addr %016llx", buffer->fault[fi]->addr);
796 if (start < svmm->unmanaged.limit)
797 limit = min_t(u64, limit, svmm->unmanaged.start);
826 mm = svmm->notifier.mm;
832 notifier.svmm = svmm;
834 ret = nouveau_atomic_range_fault(svmm, svm->drm,
838 ret = nouveau_range_fault(svmm, svm->drm, &args.i,
854 if (buffer->fault[fn]->svmm != svmm ||
925 nouveau_pfns_map(struct nouveau_svmm *svmm, struct mm_struct *mm,
934 mutex_lock(&svmm->mutex);
936 ret = nvif_object_ioctl(&svmm->vmm->vmm.object, args,
939 mutex_unlock(&svmm->mutex);