Lines Matching refs:args
141 struct vm_fault *vmf, struct migrate_vma *args,
148 spage = migrate_pfn_to_page(args->src[0]);
149 if (!spage || !(args->src[0] & MIGRATE_PFN_MIGRATE))
163 nouveau_svmm_invalidate(svmm, args->start, args->end);
169 args->dst[0] = migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
188 struct migrate_vma args = {
203 if (migrate_vma_setup(&args) < 0)
205 if (!args.cpages)
208 ret = nouveau_dmem_fault_copy_one(drm, vmf, &args, &dma_addr);
213 migrate_vma_pages(&args);
217 migrate_vma_finalize(&args);
617 struct nouveau_svmm *svmm, struct migrate_vma *args,
621 unsigned long addr = args->start, nr_dma = 0, i;
623 for (i = 0; addr < args->end; i++) {
624 args->dst[i] = nouveau_dmem_migrate_copy_one(drm, svmm,
625 args->src[i], dma_addrs + nr_dma, pfns + i);
632 migrate_vma_pages(args);
634 nouveau_pfns_map(svmm, args->vma->vm_mm, args->start, pfns, i);
640 migrate_vma_finalize(args);
653 struct migrate_vma args = {
666 args.src = kcalloc(max, sizeof(*args.src), GFP_KERNEL);
667 if (!args.src)
669 args.dst = kcalloc(max, sizeof(*args.dst), GFP_KERNEL);
670 if (!args.dst)
682 if (args.start + (max << PAGE_SHIFT) > end)
683 args.end = end;
685 args.end = args.start + (max << PAGE_SHIFT);
687 ret = migrate_vma_setup(&args);
691 if (args.cpages)
692 nouveau_dmem_migrate_chunk(drm, svmm, &args, dma_addrs,
694 args.start = args.end;
703 kfree(args.dst);
705 kfree(args.src);