Lines Matching refs:vma

366 		   struct vm_area_struct *vma, unsigned long floor,
370 unsigned long addr = vma->vm_start;
380 * Hide vma from rmap and truncate_pagecache before freeing
384 vma_start_write(vma);
385 unlink_anon_vmas(vma);
386 unlink_file_vma(vma);
388 if (is_vm_hugetlb_page(vma)) {
389 hugetlb_free_pgd_range(tlb, addr, vma->vm_end,
395 while (next && next->vm_start <= vma->vm_end + PMD_SIZE
397 vma = next;
400 vma_start_write(vma);
401 unlink_anon_vmas(vma);
402 unlink_file_vma(vma);
404 free_pgd_range(tlb, addr, vma->vm_end,
407 vma = next;
408 } while (vma);
490 static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr,
493 pgd_t *pgd = pgd_offset(vma->vm_mm, addr);
522 mapping = vma->vm_file ? vma->vm_file->f_mapping : NULL;
523 index = linear_page_index(vma, addr);
531 (void *)addr, vma->vm_flags, vma->anon_vma, mapping, index);
533 vma->vm_file,
534 vma->vm_ops ? vma->vm_ops->fault : NULL,
535 vma->vm_file ? vma->vm_file->f_op->mmap : NULL,
558 * rules set up by "remap_pfn_range()": the vma will have the VM_PFNMAP bit
562 * pfn_of_page == vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT)
568 * as the vma is not a COW mapping; in that case, we know that all ptes are
583 struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
591 if (vma->vm_ops && vma->vm_ops->find_special_page)
592 return vma->vm_ops->find_special_page(vma, addr);
593 if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP))
608 print_bad_pte(vma, addr, pte, NULL);
614 if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
615 if (vma->vm_flags & VM_MIXEDMAP) {
621 off = (addr - vma->vm_start) >> PAGE_SHIFT;
622 if (pfn == vma->vm_pgoff + off)
624 if (!is_cow_mapping(vma->vm_flags))
634 print_bad_pte(vma, addr, pte, NULL);
646 struct folio *vm_normal_folio(struct vm_area_struct *vma, unsigned long addr,
649 struct page *page = vm_normal_page(vma, addr, pte);
657 struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
667 if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
668 if (vma->vm_flags & VM_MIXEDMAP) {
674 off = (addr - vma->vm_start) >> PAGE_SHIFT;
675 if (pfn == vma->vm_pgoff + off)
677 if (!is_cow_mapping(vma->vm_flags))
698 static void restore_exclusive_pte(struct vm_area_struct *vma,
707 pte = pte_mkold(mk_pte(page, READ_ONCE(vma->vm_page_prot)));
715 pte = maybe_mkwrite(pte_mkdirty(pte), vma);
724 page_add_anon_rmap(page, vma, address, RMAP_NONE);
732 set_pte_at(vma->vm_mm, address, ptep, pte);
738 update_mmu_cache(vma, address, ptep);
746 try_restore_exclusive_pte(pte_t *src_pte, struct vm_area_struct *vma,
753 restore_exclusive_pte(vma, page, addr, src_pte);
764 * covered by this vma.
985 struct vm_area_struct *vma, unsigned long addr)
989 new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, addr, false);
1238 * Return true if the vma needs to copy the pgtable during this fork(). Return
1382 zap_install_uffd_wp_if_needed(struct vm_area_struct *vma,
1387 if (vma_is_anonymous(vma))
1393 pte_install_uffd_wp_if_needed(vma, addr, pte, pteval);
1397 struct vm_area_struct *vma, pmd_t *pmd,
1430 page = vm_normal_page(vma, addr, ptent);
1432 if (vma->vm_flags & VM_USEREXPTE)
1439 arch_check_zapped_pte(vma, ptent);
1441 zap_install_uffd_wp_if_needed(vma, addr, pte, details,
1448 if (vma->vm_flags & VM_PURGEABLE)
1449 uxpte_clear_present(vma, addr);
1460 if (pte_young(ptent) && likely(vma_has_recency(vma)))
1465 page_remove_rmap(page, vma, false);
1467 print_bad_pte(vma, addr, ptent, page);
1489 WARN_ON_ONCE(!vma_is_anonymous(vma));
1492 page_remove_rmap(page, vma, false);
1500 print_bad_pte(vma, addr, ptent, NULL);
1511 if (!vma_is_anonymous(vma) &&
1523 zap_install_uffd_wp_if_needed(vma, addr, pte, details, ptent);
1532 tlb_flush_rmaps(tlb, vma);
1549 struct vm_area_struct *vma, pud_t *pud,
1561 __split_huge_pmd(vma, pmd, addr, false, NULL);
1562 else if (zap_huge_pmd(tlb, vma, pmd, addr)) {
1582 addr = zap_pte_range(tlb, vma, pmd, addr, next, details);
1591 struct vm_area_struct *vma, p4d_t *p4d,
1604 split_huge_pud(vma, pud, addr);
1605 } else if (zap_huge_pud(tlb, vma, pud, addr))
1611 next = zap_pmd_range(tlb, vma, pud, addr, next, details);
1620 struct vm_area_struct *vma, pgd_t *pgd,
1632 next = zap_pud_range(tlb, vma, p4d, addr, next, details);
1639 struct vm_area_struct *vma,
1647 tlb_start_vma(tlb, vma);
1648 pgd = pgd_offset(vma->vm_mm, addr);
1653 next = zap_p4d_range(tlb, vma, pgd, addr, next, details);
1655 tlb_end_vma(tlb, vma);
1660 struct vm_area_struct *vma, unsigned long start_addr,
1664 unsigned long start = max(vma->vm_start, start_addr);
1667 if (start >= vma->vm_end)
1669 end = min(vma->vm_end, end_addr);
1670 if (end <= vma->vm_start)
1673 if (vma->vm_file)
1674 uprobe_munmap(vma, start, end);
1676 if (unlikely(vma->vm_flags & VM_PFNMAP))
1677 untrack_pfn(vma, 0, 0, mm_wr_locked);
1680 if (unlikely(is_vm_hugetlb_page(vma))) {
1682 * It is undesirable to test vma->vm_file as it
1687 * mmap_region() nullifies vma->vm_file
1692 if (vma->vm_file) {
1695 __unmap_hugepage_range(tlb, vma, start, end,
1699 unmap_page_range(tlb, vma, start, end, details);
1704 * unmap_vmas - unmap a range of memory covered by a list of vma's
1707 * @vma: the starting vma
1713 * Unmap all pages in the vma list.
1725 struct vm_area_struct *vma, unsigned long start_addr,
1736 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma->vm_mm,
1742 hugetlb_zap_begin(vma, &start, &end);
1743 unmap_single_vma(tlb, vma, start, end, &details,
1745 hugetlb_zap_end(vma, &details);
1746 } while ((vma = mas_find(mas, tree_end - 1)) != NULL);
1752 * @vma: vm_area_struct holding the applicable pages
1759 void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
1767 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
1769 hugetlb_zap_begin(vma, &range.start, &range.end);
1770 tlb_gather_mmu(&tlb, vma->vm_mm);
1771 update_hiwater_rss(vma->vm_mm);
1777 unmap_single_vma(&tlb, vma, address, end, details, false);
1780 hugetlb_zap_end(vma, details);
1784 * zap_vma_ptes - remove ptes mapping the vma
1785 * @vma: vm_area_struct holding ptes to be zapped
1791 * The entire address range must be fully contained within the vma.
1794 void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
1797 if (!range_in_vma(vma, address, address + size) ||
1798 !(vma->vm_flags & VM_PFNMAP))
1801 zap_page_range_single(vma, address, size, NULL);
1845 static int insert_page_into_pte_locked(struct vm_area_struct *vma, pte_t *pte,
1852 inc_mm_counter(vma->vm_mm, mm_counter_file(page));
1853 page_add_file_rmap(page, vma, false);
1854 set_pte_at(vma->vm_mm, addr, pte, mk_pte(page, prot));
1865 static int insert_page(struct vm_area_struct *vma, unsigned long addr,
1876 pte = get_locked_pte(vma->vm_mm, addr, &ptl);
1879 retval = insert_page_into_pte_locked(vma, pte, addr, page, prot);
1885 static int insert_page_in_batch_locked(struct vm_area_struct *vma, pte_t *pte,
1895 return insert_page_into_pte_locked(vma, pte, addr, page, prot);
1901 static int insert_pages(struct vm_area_struct *vma, unsigned long addr,
1907 struct mm_struct *const mm = vma->vm_mm;
1936 int err = insert_page_in_batch_locked(vma, pte,
1960 * vm_insert_pages - insert multiple pages into user vma, batching the pmd lock.
1961 * @vma: user vma to map to
1974 int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr,
1979 if (addr < vma->vm_start || end_addr >= vma->vm_end)
1981 if (!(vma->vm_flags & VM_MIXEDMAP)) {
1982 BUG_ON(mmap_read_trylock(vma->vm_mm));
1983 BUG_ON(vma->vm_flags & VM_PFNMAP);
1984 vm_flags_set(vma, VM_MIXEDMAP);
1987 return insert_pages(vma, addr, pages, num, vma->vm_page_prot);
1992 * vm_insert_page - insert single page into user vma
1993 * @vma: user vma to map to
1998 * into a user vma.
2007 * that. Your vma protection will have to be set up correctly, which
2014 * under mm->mmap_lock write-lock, so it can change vma->vm_flags.
2015 * Caller must set VM_MIXEDMAP on vma if it wants to call this
2020 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
2023 if (addr < vma->vm_start || addr >= vma->vm_end)
2027 if (!(vma->vm_flags & VM_MIXEDMAP)) {
2028 BUG_ON(mmap_read_trylock(vma->vm_mm));
2029 BUG_ON(vma->vm_flags & VM_PFNMAP);
2030 vm_flags_set(vma, VM_MIXEDMAP);
2032 return insert_page(vma, addr, page, vma->vm_page_prot);
2037 * __vm_map_pages - maps range of kernel pages into user vma
2038 * @vma: user vma to map to
2043 * This allows drivers to map range of kernel pages into a user vma.
2047 static int __vm_map_pages(struct vm_area_struct *vma, struct page **pages,
2050 unsigned long count = vma_pages(vma);
2051 unsigned long uaddr = vma->vm_start;
2063 ret = vm_insert_page(vma, uaddr, pages[offset + i]);
2074 * @vma: user vma to map to
2081 * If we fail to insert any page into the vma, the function will return
2084 * will destroy the vma, removing any successfully inserted pages. Other
2090 int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
2093 return __vm_map_pages(vma, pages, num, vma->vm_pgoff);
2099 * @vma: user vma to map to
2110 int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
2113 return __vm_map_pages(vma, pages, num, 0);
2117 static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr,
2120 struct mm_struct *mm = vma->vm_mm;
2145 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
2146 if (ptep_set_access_flags(vma, addr, pte, entry, 1))
2147 update_mmu_cache(vma, addr, pte);
2160 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
2164 update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */
2172 * vmf_insert_pfn_prot - insert single pfn into user vma with specified pgprot
2173 * @vma: user vma to map to
2186 * pgprot typically only differs from @vma->vm_page_prot when drivers set
2187 * caching- and encryption bits different than those of @vma->vm_page_prot,
2190 * This is ok as long as @vma->vm_page_prot is not used by the core vm
2197 * fault() callback, and never using the value of vma->vm_page_prot,
2204 vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
2213 BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
2214 BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
2216 BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
2217 BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn));
2219 if (addr < vma->vm_start || addr >= vma->vm_end)
2225 track_pfn_insert(vma, &pgprot, __pfn_to_pfn_t(pfn, PFN_DEV));
2227 return insert_pfn(vma, addr, __pfn_to_pfn_t(pfn, PFN_DEV), pgprot,
2233 * vmf_insert_pfn - insert single pfn into user vma
2234 * @vma: user vma to map to
2239 * they've allocated into a user vma. Same comments apply.
2244 * vma cannot be a COW mapping.
2252 vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
2255 return vmf_insert_pfn_prot(vma, addr, pfn, vma->vm_page_prot);
2259 static bool vm_mixed_ok(struct vm_area_struct *vma, pfn_t pfn)
2262 if (vma->vm_flags & VM_MIXEDMAP)
2273 static vm_fault_t __vm_insert_mixed(struct vm_area_struct *vma,
2276 pgprot_t pgprot = vma->vm_page_prot;
2279 BUG_ON(!vm_mixed_ok(vma, pfn));
2281 if (addr < vma->vm_start || addr >= vma->vm_end)
2284 track_pfn_insert(vma, &pgprot, pfn);
2306 err = insert_page(vma, addr, page, pgprot);
2308 return insert_pfn(vma, addr, pfn, pgprot, mkwrite);
2319 vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
2322 return __vm_insert_mixed(vma, addr, pfn, false);
2331 vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
2334 return __vm_insert_mixed(vma, addr, pfn, true);
2440 int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr,
2446 struct mm_struct *mm = vma->vm_mm;
2461 * Disable vma merging and expanding with mremap().
2463 * Omit vma from core dump, even when VM_IO turned off.
2467 * un-COW'ed pages by matching them up with "vma->vm_pgoff".
2470 if (is_cow_mapping(vma->vm_flags)) {
2471 if (addr != vma->vm_start || end != vma->vm_end)
2473 vma->vm_pgoff = pfn;
2476 vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
2481 flush_cache_range(vma, addr, end);
2495 * @vma: user vma to map to
2505 int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
2510 err = track_pfn_remap(vma, &prot, pfn, addr, PAGE_ALIGN(size));
2514 err = remap_pfn_range_notrack(vma, addr, pfn, size, prot);
2516 untrack_pfn(vma, pfn, PAGE_ALIGN(size), true);
2523 * @vma: user vma to map to
2529 * we'll figure out the rest from the vma information.
2531 * NOTE! Some drivers might want to tweak vma->vm_page_prot first to get
2536 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len)
2555 if (vma->vm_pgoff > pages)
2557 pfn += vma->vm_pgoff;
2558 pages -= vma->vm_pgoff;
2561 vm_len = vma->vm_end - vma->vm_start;
2566 return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
2820 struct vm_area_struct *vma = vmf->vma;
2821 struct mm_struct *mm = vma->vm_mm;
2825 if (copy_mc_user_highpage(dst, src, addr, vma)) {
2856 update_mmu_tlb(vma, addr, vmf->pte);
2862 if (ptep_set_access_flags(vma, addr, vmf->pte, entry, 0))
2863 update_mmu_cache_range(vmf, vma, addr, vmf->pte, 1);
2881 update_mmu_tlb(vma, addr, vmf->pte);
2912 static gfp_t __get_fault_gfp_mask(struct vm_area_struct *vma)
2914 struct file *vm_file = vma->vm_file;
2939 if (vmf->vma->vm_file &&
2940 IS_SWAPFILE(vmf->vma->vm_file->f_mapping->host))
2943 ret = vmf->vma->vm_ops->page_mkwrite(vmf);
2967 struct vm_area_struct *vma = vmf->vma;
2971 bool page_mkwrite = vma->vm_ops && vma->vm_ops->page_mkwrite;
2978 * pinned by vma->vm_file's reference. We rely on folio_unlock()'s
2985 file_update_time(vma->vm_file);
3011 * Handle write page faults for pages that can be reused in the current vma
3021 struct vm_area_struct *vma = vmf->vma;
3036 flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
3038 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
3039 if (ptep_set_access_flags(vma, vmf->address, vmf->pte, entry, 1))
3040 update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1);
3065 struct vm_area_struct *vma = vmf->vma;
3066 struct mm_struct *mm = vma->vm_mm;
3078 if (unlikely(anon_vma_prepare(vma)))
3082 new_folio = vma_alloc_zeroed_movable_folio(vma, vmf->address);
3086 new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma,
3135 flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
3136 entry = mk_pte(&new_folio->page, vma->vm_page_prot);
3144 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
3154 ptep_clear_flush(vma, vmf->address, vmf->pte);
3155 folio_add_new_anon_rmap(new_folio, vma, vmf->address);
3157 if (vma->vm_flags & VM_PURGEABLE) {
3160 uxpte_set_present(vma, vmf->address);
3163 folio_add_lru_vma(new_folio, vma);
3171 update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1);
3195 page_remove_rmap(vmf->page, vma, false);
3203 update_mmu_tlb(vma, vmf->address, vmf->pte);
3247 WARN_ON_ONCE(!(vmf->vma->vm_flags & VM_SHARED));
3248 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address,
3257 update_mmu_tlb(vmf->vma, vmf->address, vmf->pte);
3271 struct vm_area_struct *vma = vmf->vma;
3273 if (vma->vm_ops && vma->vm_ops->pfn_mkwrite) {
3278 vma_end_read(vmf->vma);
3283 ret = vma->vm_ops->pfn_mkwrite(vmf);
3295 struct vm_area_struct *vma = vmf->vma;
3300 if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
3306 vma_end_read(vmf->vma);
3350 * We enter with non-exclusive mmap_lock (to exclude vma changes,
3358 struct vm_area_struct *vma = vmf->vma;
3362 if (userfaultfd_pte_wp(vma, ptep_get(vmf->pte))) {
3371 if (unlikely(userfaultfd_wp(vmf->vma) &&
3372 mm_tlb_flush_pending(vmf->vma->vm_mm)))
3373 flush_tlb_page(vmf->vma, vmf->address);
3376 vmf->page = vm_normal_page(vma, vmf->address, vmf->orig_pte);
3385 if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) {
3440 page_move_anon_rmap(vmf->page, vma);
3451 if ((vmf->flags & FAULT_FLAG_VMA_LOCK) && !vma->anon_vma) {
3453 vma_end_read(vmf->vma);
3471 static void unmap_mapping_range_vma(struct vm_area_struct *vma,
3475 zap_page_range_single(vma, start_addr, end_addr - start_addr, details);
3483 struct vm_area_struct *vma;
3486 vma_interval_tree_foreach(vma, root, first_index, last_index) {
3487 vba = vma->vm_pgoff;
3488 vea = vba + vma_pages(vma) - 1;
3492 unmap_mapping_range_vma(vma,
3493 ((zba - vba) << PAGE_SHIFT) + vma->vm_start,
3494 ((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start,
3605 struct vm_area_struct *vma = vmf->vma;
3626 vma->vm_mm, vmf->address & PAGE_MASK,
3630 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
3633 restore_exclusive_pte(vma, vmf->page, vmf->address, vmf->pte);
3645 struct vm_area_struct *vma,
3650 if (mem_cgroup_swap_full(folio) || (vma->vm_flags & VM_LOCKED) ||
3665 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd,
3678 pte_clear(vmf->vma->vm_mm, vmf->address, vmf->pte);
3685 if (vma_is_anonymous(vmf->vma))
3701 if (unlikely(!userfaultfd_wp(vmf->vma)))
3731 * We enter with non-exclusive mmap_lock (to exclude vma changes,
3740 struct vm_area_struct *vma = vmf->vma;
3758 migration_entry_wait(vma->vm_mm, vmf->pmd,
3769 vma_end_read(vma);
3775 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
3795 print_bad_pte(vma, vmf->address, vmf->orig_pte, NULL);
3806 folio = swap_cache_get_folio(entry, vma, vmf->address);
3830 vma, vmf->address, false);
3837 vma->vm_mm, GFP_KERNEL,
3868 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
3879 count_memcg_event_mm(vma->vm_mm, PGMAJFAULT);
3910 page = ksm_might_need_to_copy(page, vma, vmf->address);
3936 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
4006 if (should_try_to_free_swap(folio, vma, vmf->flags))
4009 inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
4010 dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
4011 pte = mk_pte(page, vma->vm_page_prot);
4022 pte = maybe_mkwrite(pte_mkdirty(pte), vma);
4027 flush_icache_page(vma, page);
4036 page_add_new_anon_rmap(page, vma, vmf->address);
4037 folio_add_lru_vma(folio, vma);
4039 page_add_anon_rmap(page, vma, vmf->address, rmap_flags);
4044 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte);
4045 arch_do_swap_page(vma->vm_mm, vma, vmf->address, pte, vmf->orig_pte);
4069 update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1);
4099 * We enter with non-exclusive mmap_lock (to exclude vma changes,
4106 struct vm_area_struct *vma = vmf->vma;
4112 if (vma->vm_flags & VM_SHARED)
4119 if (pte_alloc(vma->vm_mm, vmf->pmd))
4124 if (vma->vm_flags & VM_USEREXPTE) {
4133 !mm_forbids_zeropage(vma->vm_mm)) {
4135 vma->vm_page_prot));
4139 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
4144 update_mmu_tlb(vma, vmf->address, vmf->pte);
4147 ret = check_stable_address_space(vma->vm_mm);
4151 if (userfaultfd_missing(vma)) {
4159 if (unlikely(anon_vma_prepare(vma)))
4161 folio = vma_alloc_zeroed_movable_folio(vma, vmf->address);
4165 if (mem_cgroup_charge(folio, vma->vm_mm, GFP_KERNEL))
4176 entry = mk_pte(&folio->page, vma->vm_page_prot);
4178 if (vma->vm_flags & VM_WRITE)
4179 entry = pte_mkwrite(pte_mkdirty(entry), vma);
4181 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
4186 update_mmu_tlb(vma, vmf->address, vmf->pte);
4190 ret = check_stable_address_space(vma->vm_mm);
4195 if (userfaultfd_missing(vma)) {
4201 inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
4202 folio_add_new_anon_rmap(folio, vma, vmf->address);
4204 if (vma->vm_flags & VM_PURGEABLE)
4207 folio_add_lru_vma(folio, vma);
4210 if (vma->vm_flags & VM_PURGEABLE)
4211 uxpte_set_present(vma, vmf->address);
4215 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry);
4218 update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1);
4234 * released depending on flags and vma->vm_ops->fault() return value.
4239 struct vm_area_struct *vma = vmf->vma;
4258 vmf->prealloc_pte = pte_alloc_one(vma->vm_mm);
4263 ret = vma->vm_ops->fault(vmf);
4296 struct vm_area_struct *vma = vmf->vma;
4298 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, vmf->prealloc_pte);
4303 mm_inc_nr_ptes(vma->vm_mm);
4309 struct vm_area_struct *vma = vmf->vma;
4315 if (!transhuge_vma_suitable(vma, haddr))
4336 vmf->prealloc_pte = pte_alloc_one(vma->vm_mm);
4341 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
4345 flush_icache_pages(vma, page, HPAGE_PMD_NR);
4347 entry = mk_huge_pmd(page, vma->vm_page_prot);
4349 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
4351 add_mm_counter(vma->vm_mm, mm_counter_file(page), HPAGE_PMD_NR);
4352 page_add_file_rmap(page, vma, true);
4360 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
4362 update_mmu_cache_pmd(vma, haddr, vmf->pmd);
4389 struct vm_area_struct *vma = vmf->vma;
4395 flush_icache_pages(vma, page, nr);
4396 entry = mk_pte(page, vma->vm_page_prot);
4404 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
4408 if (write && !(vma->vm_flags & VM_SHARED)) {
4409 add_mm_counter(vma->vm_mm, MM_ANONPAGES, nr);
4411 folio_add_new_anon_rmap(folio, vma, addr);
4412 folio_add_lru_vma(folio, vma);
4414 add_mm_counter(vma->vm_mm, mm_counter_file(page), nr);
4415 folio_add_file_rmap_range(folio, page, nr, vma, false);
4417 set_ptes(vma->vm_mm, addr, vmf->pte, entry, nr);
4420 update_mmu_cache_range(vmf, vma, addr, vmf->pte, nr);
4448 struct vm_area_struct *vma = vmf->vma;
4453 if ((vmf->flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED))
4462 if (!(vma->vm_flags & VM_SHARED)) {
4463 ret = check_stable_address_space(vma->vm_mm);
4476 pmd_install(vma->vm_mm, vmf->pmd, &vmf->prealloc_pte);
4477 else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd)))
4481 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
4493 update_mmu_tlb(vma, vmf->address, vmf->pte);
4565 pgoff_t vma_off = vmf->pgoff - vmf->vma->vm_pgoff;
4575 pte_off + vma_pages(vmf->vma) - vma_off) - 1;
4578 vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm);
4584 ret = vmf->vma->vm_ops->map_pages(vmf,
4596 if (!vmf->vma->vm_ops->map_pages)
4599 if (uffd_disable_fault_around(vmf->vma))
4623 vma_end_read(vmf->vma);
4641 struct vm_area_struct *vma = vmf->vma;
4645 vma_end_read(vma);
4649 if (unlikely(anon_vma_prepare(vma)))
4652 vmf->cow_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vmf->address);
4656 if (mem_cgroup_charge(page_folio(vmf->cow_page), vma->vm_mm,
4669 copy_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma);
4685 struct vm_area_struct *vma = vmf->vma;
4690 vma_end_read(vma);
4704 if (vma->vm_ops->page_mkwrite) {
4727 * We enter with non-exclusive mmap_lock (to exclude vma changes,
4731 * If mmap_lock is released, vma may become invalid (for example
4736 struct vm_area_struct *vma = vmf->vma;
4737 struct mm_struct *vm_mm = vma->vm_mm;
4743 if (!vma->vm_ops->fault) {
4744 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd,
4765 else if (!(vma->vm_flags & VM_SHARED))
4778 int numa_migrate_prep(struct page *page, struct vm_area_struct *vma,
4784 vma_set_access_pid_bit(vma);
4792 return mpol_misplaced(page, vma, addr);
4797 struct vm_area_struct *vma = vmf->vma;
4819 pte = pte_modify(old_pte, vma->vm_page_prot);
4826 if (!writable && vma_wants_manual_pte_write_upgrade(vma) &&
4827 can_change_pte_writable(vma, vmf->address, pte))
4830 page = vm_normal_page(vma, vmf->address, pte);
4853 if (page_mapcount(page) > 1 && (vma->vm_flags & VM_SHARED))
4866 target_nid = numa_migrate_prep(page, vma, vmf->address, page_nid,
4876 if (migrate_misplaced_page(page, vma, target_nid)) {
4881 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
4901 old_pte = ptep_modify_prot_start(vma, vmf->address, vmf->pte);
4902 pte = pte_modify(old_pte, vma->vm_page_prot);
4905 pte = pte_mkwrite(pte, vma);
4906 ptep_modify_prot_commit(vma, vmf->address, vmf->pte, old_pte, pte);
4907 update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1);
4914 struct vm_area_struct *vma = vmf->vma;
4915 if (vma_is_anonymous(vma))
4917 if (vma->vm_ops->huge_fault)
4918 return vma->vm_ops->huge_fault(vmf, PMD_ORDER);
4925 struct vm_area_struct *vma = vmf->vma;
4929 if (vma_is_anonymous(vma)) {
4931 userfaultfd_huge_pmd_wp(vma, vmf->orig_pmd))
4936 if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) {
4937 if (vma->vm_ops->huge_fault) {
4938 ret = vma->vm_ops->huge_fault(vmf, PMD_ORDER);
4945 __split_huge_pmd(vma, vmf->pmd, vmf->address, false, NULL);
4954 struct vm_area_struct *vma = vmf->vma;
4956 if (vma_is_anonymous(vma))
4958 if (vma->vm_ops->huge_fault)
4959 return vma->vm_ops->huge_fault(vmf, PUD_ORDER);
4968 struct vm_area_struct *vma = vmf->vma;
4972 if (vma_is_anonymous(vma))
4974 if (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) {
4975 if (vma->vm_ops->huge_fault) {
4976 ret = vma->vm_ops->huge_fault(vmf, PUD_ORDER);
4983 __split_huge_pud(vma, vmf->pud, vmf->address);
4997 * We enter with non-exclusive mmap_lock (to exclude vma changes, but allow
5023 vmf->pte = pte_offset_map_nolock(vmf->vma->vm_mm, vmf->pmd,
5042 if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma))
5048 update_mmu_tlb(vmf->vma, vmf->address, vmf->pte);
5058 if (ptep_set_access_flags(vmf->vma, vmf->address, vmf->pte, entry,
5060 update_mmu_cache_range(vmf, vmf->vma, vmf->address,
5073 flush_tlb_fix_spurious_fault(vmf->vma, vmf->address,
5087 static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
5091 .vma = vma,
5095 .pgoff = linear_page_index(vma, address),
5096 .gfp_mask = __get_fault_gfp_mask(vma),
5098 struct mm_struct *mm = vma->vm_mm;
5099 unsigned long vm_flags = vma->vm_flags;
5114 hugepage_vma_check(vma, vm_flags, false, true, true)) {
5148 hugepage_vma_check(vma, vm_flags, false, true, true)) {
5163 if (pmd_protnone(vmf.orig_pmd) && vma_is_accessible(vma))
5250 static void lru_gen_enter_fault(struct vm_area_struct *vma)
5253 current->in_lru_fault = vma_has_recency(vma);
5261 static void lru_gen_enter_fault(struct vm_area_struct *vma)
5270 static vm_fault_t sanitize_fault_flags(struct vm_area_struct *vma,
5280 if (!is_cow_mapping(vma->vm_flags))
5284 if (WARN_ON_ONCE(!(vma->vm_flags & VM_MAYWRITE)))
5287 if (WARN_ON_ONCE(!(vma->vm_flags & VM_WRITE) &&
5288 !is_cow_mapping(vma->vm_flags)))
5311 vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
5314 /* If the fault handler drops the mmap_lock, vma may be freed */
5315 struct mm_struct *mm = vma->vm_mm;
5320 ret = sanitize_fault_flags(vma, &flags);
5324 if (!arch_vma_access_permitted(vma, flags & FAULT_FLAG_WRITE,
5338 lru_gen_enter_fault(vma);
5340 if (unlikely(is_vm_hugetlb_page(vma)))
5341 ret = hugetlb_fault(vma->vm_mm, vma, address, flags);
5343 ret = __handle_mm_fault(vma, address, flags);
5423 * need to extend the vma, which helps the VM layer a lot.
5428 struct vm_area_struct *vma;
5433 vma = find_vma(mm, addr);
5434 if (likely(vma && (vma->vm_start <= addr)))
5435 return vma;
5439 * if we can extend a vma to do so.
5441 if (!vma || !(vma->vm_flags & VM_GROWSDOWN)) {
5448 * in which case we can continue to use the vma
5452 * re-take it, and also look up the vma again,
5459 vma = find_vma(mm, addr);
5460 if (!vma)
5462 if (vma->vm_start <= addr)
5464 if (!(vma->vm_flags & VM_GROWSDOWN))
5468 if (expand_stack_locked(vma, addr))
5473 return vma;
5491 struct vm_area_struct *vma;
5495 vma = mas_walk(&mas);
5496 if (!vma)
5499 if (!vma_start_read(vma))
5508 if (unlikely(vma_is_anonymous(vma) && !vma->anon_vma))
5512 if (unlikely(address < vma->vm_start || address >= vma->vm_end))
5516 if (vma->detached) {
5517 vma_end_read(vma);
5524 return vma;
5527 vma_end_read(vma);
5667 * @vma: memory mapping
5678 int follow_pfn(struct vm_area_struct *vma, unsigned long address,
5685 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
5688 ret = follow_pte(vma->vm_mm, address, &ptep, &ptl);
5698 int follow_phys(struct vm_area_struct *vma,
5706 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
5709 if (follow_pte(vma->vm_mm, address, &ptep, &ptl))
5728 * @vma: the vma to access
5729 * @addr: userspace address, not relative offset within @vma
5735 * iomem mapping. This callback is used by access_process_vm() when the @vma is
5738 int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
5749 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
5753 if (follow_pte(vma->vm_mm, addr, &ptep, &ptl))
5768 if (follow_pte(vma->vm_mm, addr, &ptep, &ptl))
5815 struct vm_area_struct *vma = NULL;
5817 gup_flags, &vma);
5821 vma = vma_lookup(mm, addr);
5822 if (!vma) {
5823 vma = expand_stack(mm, addr);
5826 if (!vma)
5840 if (vma->vm_ops && vma->vm_ops->access)
5841 bytes = vma->vm_ops->access(vma, addr, buf,
5854 copy_to_user_page(vma, page, addr,
5858 copy_from_user_page(vma, page, addr,
5920 struct vm_area_struct *vma;
5928 vma = find_vma(mm, ip);
5929 if (vma && vma->vm_file) {
5930 struct file *f = vma->vm_file;
5939 vma->vm_start,
5940 vma->vm_end - vma->vm_start);
6061 struct vm_area_struct *vma,
6074 addr + i*PAGE_SIZE, vma)) {
6085 struct vm_area_struct *vma;
6093 addr, copy_arg->vma)) {
6101 unsigned long addr_hint, struct vm_area_struct *vma)
6109 .vma = vma,
6113 return copy_user_gigantic_page(dst, src, addr, vma,