Lines Matching refs:dst_vma
31 struct vm_area_struct *dst_vma;
33 dst_vma = find_vma(dst_mm, dst_start);
34 if (!range_in_vma(dst_vma, dst_start, dst_start + len))
42 if (!dst_vma->vm_userfaultfd_ctx.ctx)
45 return dst_vma;
49 static bool mfill_file_over_size(struct vm_area_struct *dst_vma,
55 if (!dst_vma->vm_file)
58 inode = dst_vma->vm_file->f_inode;
59 offset = linear_page_index(dst_vma, dst_addr);
65 * Install PTEs, to map dst_addr (within dst_vma) to page.
71 struct vm_area_struct *dst_vma,
76 struct mm_struct *dst_mm = dst_vma->vm_mm;
78 bool writable = dst_vma->vm_flags & VM_WRITE;
79 bool vm_shared = dst_vma->vm_flags & VM_SHARED;
84 _dst_pte = mk_pte(page, dst_vma->vm_page_prot);
89 _dst_pte = pte_mkwrite(_dst_pte, dst_vma);
98 if (mfill_file_over_size(dst_vma, dst_addr)) {
117 page_add_file_rmap(page, dst_vma, false);
119 page_add_new_anon_rmap(page, dst_vma, dst_addr);
120 folio_add_lru_vma(folio, dst_vma);
132 update_mmu_cache(dst_vma, dst_addr, dst_pte);
141 struct vm_area_struct *dst_vma,
153 folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, dst_vma,
202 if (mem_cgroup_charge(folio, dst_vma->vm_mm, GFP_KERNEL))
205 ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr,
217 struct vm_area_struct *dst_vma,
225 dst_vma->vm_page_prot));
227 dst_pte = pte_offset_map_lock(dst_vma->vm_mm, dst_pmd, dst_addr, &ptl);
230 if (mfill_file_over_size(dst_vma, dst_addr)) {
237 set_pte_at(dst_vma->vm_mm, dst_addr, dst_pte, _dst_pte);
239 update_mmu_cache(dst_vma, dst_addr, dst_pte);
249 struct vm_area_struct *dst_vma,
253 struct inode *inode = file_inode(dst_vma->vm_file);
254 pgoff_t pgoff = linear_page_index(dst_vma, dst_addr);
276 ret = mfill_atomic_install_pte(dst_pmd, dst_vma, dst_addr,
293 struct vm_area_struct *dst_vma,
298 struct mm_struct *dst_mm = dst_vma->vm_mm;
308 if (mfill_file_over_size(dst_vma, dst_addr)) {
321 update_mmu_cache(dst_vma, dst_addr, dst_pte);
356 struct vm_area_struct *dst_vma,
363 struct mm_struct *dst_mm = dst_vma->vm_mm;
364 int vm_shared = dst_vma->vm_flags & VM_SHARED;
390 vma_hpagesize = vma_kernel_pagesize(dst_vma);
401 * On routine entry dst_vma is set. If we had to drop mmap_lock and
402 * retry, dst_vma will be set to NULL and we must lookup again.
404 if (!dst_vma) {
406 dst_vma = find_dst_vma(dst_mm, dst_start, len);
407 if (!dst_vma || !is_vm_hugetlb_page(dst_vma))
411 if (vma_hpagesize != vma_kernel_pagesize(dst_vma))
414 vm_shared = dst_vma->vm_flags & VM_SHARED;
418 * If not shared, ensure the dst_vma has a anon_vma.
422 if (unlikely(anon_vma_prepare(dst_vma)))
435 idx = linear_page_index(dst_vma, dst_addr);
436 mapping = dst_vma->vm_file->f_mapping;
439 hugetlb_vma_lock_read(dst_vma);
442 dst_pte = huge_pte_alloc(dst_mm, dst_vma, dst_addr, vma_hpagesize);
444 hugetlb_vma_unlock_read(dst_vma);
452 hugetlb_vma_unlock_read(dst_vma);
457 err = hugetlb_mfill_atomic_pte(dst_pte, dst_vma, dst_addr,
460 hugetlb_vma_unlock_read(dst_vma);
486 dst_vma = NULL;
515 extern ssize_t mfill_atomic_hugetlb(struct vm_area_struct *dst_vma,
524 struct vm_area_struct *dst_vma,
533 return mfill_atomic_pte_continue(dst_pmd, dst_vma,
536 return mfill_atomic_pte_poison(dst_pmd, dst_vma,
550 if (!(dst_vma->vm_flags & VM_SHARED)) {
552 err = mfill_atomic_pte_copy(dst_pmd, dst_vma,
557 dst_vma, dst_addr);
559 err = shmem_mfill_atomic_pte(dst_pmd, dst_vma,
574 struct vm_area_struct *dst_vma;
612 dst_vma = find_dst_vma(dst_mm, dst_start, len);
613 if (!dst_vma)
621 if (WARN_ON_ONCE(vma_is_anonymous(dst_vma) &&
622 dst_vma->vm_flags & VM_SHARED))
626 * validate 'mode' now that we know the dst_vma: don't allow
629 if ((flags & MFILL_ATOMIC_WP) && !(dst_vma->vm_flags & VM_UFFD_WP))
635 if (is_vm_hugetlb_page(dst_vma))
636 return mfill_atomic_hugetlb(dst_vma, dst_start, src_start,
639 if (!vma_is_anonymous(dst_vma) && !vma_is_shmem(dst_vma))
641 if (!vma_is_shmem(dst_vma) &&
646 * Ensure the dst_vma has a anon_vma or this page
648 * dst_vma.
651 if (!(dst_vma->vm_flags & VM_SHARED) &&
652 unlikely(anon_vma_prepare(dst_vma)))
689 err = mfill_atomic_pte(dst_pmd, dst_vma, dst_addr,
767 long uffd_wp_range(struct vm_area_struct *dst_vma,
774 VM_WARN_ONCE(start < dst_vma->vm_start || start + len > dst_vma->vm_end,
787 if (!enable_wp && vma_wants_manual_pte_write_upgrade(dst_vma))
789 tlb_gather_mmu(&tlb, dst_vma->vm_mm);
790 ret = change_protection(&tlb, dst_vma, start, start + len, mm_cp_flags);
802 struct vm_area_struct *dst_vma;
828 for_each_vma_range(vmi, dst_vma, end) {
830 if (!userfaultfd_wp(dst_vma)) {
835 if (is_vm_hugetlb_page(dst_vma)) {
837 page_mask = vma_kernel_pagesize(dst_vma) - 1;
842 _start = max(dst_vma->vm_start, start);
843 _end = min(dst_vma->vm_end, end);
845 err = uffd_wp_range(dst_vma, _start, _end - _start, enable_wp);