Lines Matching refs:dst_vma
30 struct vm_area_struct *dst_vma;
32 dst_vma = find_vma(dst_mm, dst_start);
33 if (!dst_vma)
36 if (dst_start < dst_vma->vm_start ||
37 dst_start + len > dst_vma->vm_end)
45 if (!dst_vma->vm_userfaultfd_ctx.ctx)
48 return dst_vma;
53 struct vm_area_struct *dst_vma,
69 page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, dst_vma, dst_addr);
104 _dst_pte = pte_mkdirty(mk_pte(page, dst_vma->vm_page_prot));
105 if (dst_vma->vm_flags & VM_WRITE) {
113 if (dst_vma->vm_file) {
115 inode = dst_vma->vm_file->f_inode;
116 offset = linear_page_index(dst_vma, dst_addr);
127 page_add_new_anon_rmap(page, dst_vma, dst_addr, false);
128 lru_cache_add_inactive_or_unevictable(page, dst_vma);
133 update_mmu_cache(dst_vma, dst_addr, dst_pte);
148 struct vm_area_struct *dst_vma,
158 dst_vma->vm_page_prot));
160 if (dst_vma->vm_file) {
162 inode = dst_vma->vm_file->f_inode;
163 offset = linear_page_index(dst_vma, dst_addr);
174 update_mmu_cache(dst_vma, dst_addr, dst_pte);
208 struct vm_area_struct *dst_vma,
214 int vm_alloc_shared = dst_vma->vm_flags & VM_SHARED;
215 int vm_shared = dst_vma->vm_flags & VM_SHARED;
241 vma_hpagesize = vma_kernel_pagesize(dst_vma);
252 * On routine entry dst_vma is set. If we had to drop mmap_lock and
253 * retry, dst_vma will be set to NULL and we must lookup again.
255 if (!dst_vma) {
257 dst_vma = find_dst_vma(dst_mm, dst_start, len);
258 if (!dst_vma || !is_vm_hugetlb_page(dst_vma))
262 if (vma_hpagesize != vma_kernel_pagesize(dst_vma))
265 vm_shared = dst_vma->vm_flags & VM_SHARED;
269 * If not shared, ensure the dst_vma has a anon_vma.
273 if (unlikely(anon_vma_prepare(dst_vma)))
288 mapping = dst_vma->vm_file->f_mapping;
290 idx = linear_page_index(dst_vma, dst_addr);
310 err = hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma,
333 dst_vma = NULL;
392 * for which the page was originally allocated. dst_vma could
409 struct vm_area_struct *dst_vma,
418 struct vm_area_struct *dst_vma,
437 if (!(dst_vma->vm_flags & VM_SHARED)) {
439 err = mcopy_atomic_pte(dst_mm, dst_pmd, dst_vma,
444 dst_vma, dst_addr);
449 dst_vma, dst_addr,
453 dst_vma, dst_addr);
467 struct vm_area_struct *dst_vma;
506 dst_vma = find_dst_vma(dst_mm, dst_start, len);
507 if (!dst_vma)
515 if (WARN_ON_ONCE(vma_is_anonymous(dst_vma) &&
516 dst_vma->vm_flags & VM_SHARED))
520 * validate 'mode' now that we know the dst_vma: don't allow
524 if (wp_copy && !(dst_vma->vm_flags & VM_UFFD_WP))
530 if (is_vm_hugetlb_page(dst_vma))
531 return __mcopy_atomic_hugetlb(dst_mm, dst_vma, dst_start,
534 if (!vma_is_anonymous(dst_vma) && !vma_is_shmem(dst_vma))
538 * Ensure the dst_vma has a anon_vma or this page
540 * dst_vma.
543 if (!(dst_vma->vm_flags & VM_SHARED) &&
544 unlikely(anon_vma_prepare(dst_vma)))
581 err = mfill_atomic_pte(dst_mm, dst_pmd, dst_vma, dst_addr,
645 struct vm_area_struct *dst_vma;
670 dst_vma = find_dst_vma(dst_mm, start, len);
675 if (!dst_vma || (dst_vma->vm_flags & VM_SHARED))
677 if (!userfaultfd_wp(dst_vma))
679 if (!vma_is_anonymous(dst_vma))
683 newprot = vm_get_page_prot(dst_vma->vm_flags & ~(VM_WRITE));
685 newprot = vm_get_page_prot(dst_vma->vm_flags);
687 change_protection(dst_vma, start, start + len, newprot,