Lines Matching refs:vma

86 		struct vm_area_struct *vma, struct vm_area_struct *prev,
95 /* Update vma->vm_page_prot to reflect vma->vm_flags. */
96 void vma_set_page_prot(struct vm_area_struct *vma)
98 unsigned long vm_flags = vma->vm_flags;
101 vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags);
102 if (vma_wants_writenotify(vma, vm_page_prot)) {
106 /* remove_protection_ptes reads vma->vm_page_prot without mmap_lock */
107 WRITE_ONCE(vma->vm_page_prot, vm_page_prot);
113 static void __remove_shared_vm_struct(struct vm_area_struct *vma,
116 if (vma->vm_flags & VM_SHARED)
120 vma_interval_tree_remove(vma, &mapping->i_mmap);
126 * vma from rmap and vmtruncate before freeing its page tables.
128 void unlink_file_vma(struct vm_area_struct *vma)
130 struct file *file = vma->vm_file;
135 __remove_shared_vm_struct(vma, file, mapping);
143 static void remove_vma(struct vm_area_struct *vma, bool unreachable)
146 if (vma->vm_ops && vma->vm_ops->close)
147 vma->vm_ops->close(vma);
148 if (vma->vm_file)
149 fput(vma->vm_file);
150 mpol_put(vma_policy(vma));
152 __vm_area_free(vma);
154 vm_area_free(vma);
238 goto out; /* mapping intersects with an existing non-brk vma. */
291 struct vm_area_struct *vma;
295 for_each_vma(vmi, vma) {
297 struct anon_vma *anon_vma = vma->anon_vma;
305 if (VM_WARN_ON_ONCE_MM(vma->vm_end != vmi_end, mm))
308 if (VM_WARN_ON_ONCE_MM(vma->vm_start != vmi_start, mm))
314 dump_vma(vma);
315 pr_emerg("tree range: %px start %lx end %lx\n", vma,
323 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
331 pr_emerg("map_count %d vma iterator %d\n", mm->map_count, i);
342 * vma has some anon_vma assigned, and is already inserted on that
345 * Before updating the vma's vm_start / vm_end / vm_pgoff fields, the
346 * vma must be removed from the anon_vma's interval trees using
349 * After the update, the vma will be reinserted using
356 anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma)
360 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
365 anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma)
369 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
377 struct vm_area_struct *vma;
380 for_each_vma_range(vmi, vma, end) {
381 unsigned long vm_start = max(addr, vma->vm_start);
382 unsigned long vm_end = min(end, vma->vm_end);
390 static void __vma_link_file(struct vm_area_struct *vma,
393 if (vma->vm_flags & VM_SHARED)
397 vma_interval_tree_insert(vma, &mapping->i_mmap);
401 static int vma_link(struct mm_struct *mm, struct vm_area_struct *vma)
406 vma_iter_config(&vmi, vma->vm_start, vma->vm_end);
407 if (vma_iter_prealloc(&vmi, vma))
410 vma_start_write(vma);
412 vma_iter_store(&vmi, vma);
414 if (vma->vm_file) {
415 mapping = vma->vm_file->f_mapping;
417 __vma_link_file(vma, mapping);
429 * @vma: The vma that will be altered once locked
430 * @next: The next vma if it is to be adjusted
431 * @remove: The first vma to be removed
432 * @remove2: The second vma to be removed
435 struct vm_area_struct *vma, struct vm_area_struct *next,
439 vp->vma = vma;
440 vp->anon_vma = vma->anon_vma;
447 vp->file = vma->vm_file;
449 vp->mapping = vma->vm_file->f_mapping;
456 * @vma: The vma that will be altered once locked
459 struct vm_area_struct *vma)
461 init_multi_vma_prep(vp, vma, NULL, NULL, NULL);
472 uprobe_munmap(vp->vma, vp->vma->vm_start, vp->vma->vm_end);
484 * space until vma start or end is updated.
493 anon_vma_interval_tree_pre_update_vma(vp->vma);
500 vma_interval_tree_remove(vp->vma, &vp->mapping->i_mmap);
513 * @vmi: The vma iterator
523 vma_interval_tree_insert(vp->vma, &vp->mapping->i_mmap);
534 * split_vma has split insert from vma, and needs
536 * (it may either follow vma or precede it).
543 anon_vma_interval_tree_post_update_vma(vp->vma);
551 uprobe_mmap(vp->vma);
566 anon_vma_merge(vp->vma, vp->remove);
570 WARN_ON_ONCE(vp->vma->vm_end < vp->remove->vm_end);
601 * expanding vma has anon_vma set if the shrinking vma had, to cover any
622 * @vmi: The vma iterator
623 * @vma: The vma to expand
624 * @start: The start of the vma
625 * @end: The exclusive end of the vma
626 * @pgoff: The page offset of vma
627 * @next: The current of next vma.
629 * Expand @vma to @start and @end. Can expand off the start and end. Will
630 * expand over @next if it's different from @vma and @end == @next->vm_end.
631 * Checking if the @vma can expand and merge with @next needs to be handled by
636 int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma,
644 vma_start_write(vma);
645 if (next && (vma != next) && (end == next->vm_end)) {
650 ret = dup_anon_vma(vma, next, &anon_dup);
655 init_multi_vma_prep(&vp, vma, NULL, remove_next ? next : NULL, NULL);
658 next != vma && end > next->vm_start);
660 VM_WARN_ON(vma->vm_start < start || vma->vm_end > end);
662 /* Note: vma iterator must be pointing to 'start' */
664 if (vma_iter_prealloc(vmi, vma))
668 vma_adjust_trans_huge(vma, start, end, 0);
669 vma->vm_start = start;
670 vma->vm_end = end;
671 vma->vm_pgoff = pgoff;
672 vma_iter_store(vmi, vma);
674 vma_complete(&vp, vmi, vma->vm_mm);
685 * @vmi: The vma iterator
686 * @vma: The VMA to modify
692 int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma,
697 WARN_ON((vma->vm_start != start) && (vma->vm_end != end));
699 if (vma->vm_start < start)
700 vma_iter_config(vmi, vma->vm_start, start);
702 vma_iter_config(vmi, end, vma->vm_end);
707 vma_start_write(vma);
709 init_vma_prep(&vp, vma);
711 vma_adjust_trans_huge(vma, start, end, 0);
714 vma->vm_start = start;
715 vma->vm_end = end;
716 vma->vm_pgoff = pgoff;
717 vma_complete(&vp, vmi, vma->vm_mm);
722 * If the vma has a ->close operation then the driver probably needs to release
723 * per-vma resources, so we don't attempt to merge those if the caller indicates
724 * the current vma may be removed as part of the merge.
726 static inline bool is_mergeable_vma(struct vm_area_struct *vma,
739 if ((vma->vm_flags ^ vm_flags) & ~VM_SOFTDIRTY)
741 if (vma->vm_file != file)
743 if (may_remove_vma && vma->vm_ops && vma->vm_ops->close)
745 if (!is_mergeable_vm_userfaultfd_ctx(vma, vm_userfaultfd_ctx))
747 if (!anon_vma_name_eq(anon_vma_name(vma), anon_name))
753 struct anon_vma *anon_vma2, struct vm_area_struct *vma)
759 if ((!anon_vma1 || !anon_vma2) && (!vma ||
760 list_is_singular(&vma->anon_vma_chain)))
767 * in front of (at a lower virtual address and file offset than) the vma.
776 * We assume the vma may be removed as part of the merge.
779 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
784 if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx, anon_name, true) &&
785 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
786 if (vma->vm_pgoff == vm_pgoff)
794 * beyond (at a higher virtual address and file offset than) the vma.
799 * We assume that vma is not removed as part of the merge.
802 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
807 if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx, anon_name, false) &&
808 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
810 vm_pglen = vma_pages(vma);
811 if (vma->vm_pgoff + vm_pglen == vm_pgoff)
831 * vma, PPPP is the previous vma, CCCC is a concurrent vma that starts
833 * NNNN the next vma after ****:
848 * It is important for case 8 that the vma CCCC overlapping the
852 * rmap_locks, the properties of the merged vma will be already
866 * **** is not represented - it will be merged and the vma containing the
878 struct vm_area_struct *vma, *adjust, *remove, *remove2;
892 * We later require that vma->vm_flags == vm_flags,
893 * so this tests vma->vm_flags & VM_SPECIAL, too.
938 res = vma = prev;
980 vma = next; /* case 3 */
997 if (vma_start < vma->vm_start || vma_end > vma->vm_end)
1007 if (vma_iter_prealloc(vmi, vma))
1010 init_multi_vma_prep(&vp, vma, adjust, remove, remove2);
1015 vma_adjust_trans_huge(vma, vma_start, vma_end, adj_start);
1017 vma->vm_start = vma_start;
1018 vma->vm_end = vma_end;
1019 vma->vm_pgoff = vma_pgoff;
1022 vma_iter_store(vmi, vma);
1055 * we can merge the two vma's. For example, we refuse to merge a vma if
1071 * from 'old'. The 'a'/'b' vma's are in VM order - one of them will be
1087 * We also make sure that the two vma's are compatible (adjacent,
1107 * anon_vmas being allocated, preventing vma merge in subsequent
1110 struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma)
1112 MA_STATE(mas, &vma->vm_mm->mm_mt, vma->vm_end, vma->vm_end);
1119 anon_vma = reusable_anon_vma(next, vma, next);
1125 VM_BUG_ON_VMA(prev != vma, vma);
1129 anon_vma = reusable_anon_vma(prev, prev, vma);
1483 static bool vma_is_shared_writable(struct vm_area_struct *vma)
1485 return (vma->vm_flags & (VM_WRITE | VM_SHARED)) ==
1489 static bool vma_fs_can_writeback(struct vm_area_struct *vma)
1492 if (vma->vm_flags & VM_PFNMAP)
1495 return vma->vm_file && vma->vm_file->f_mapping &&
1496 mapping_can_writeback(vma->vm_file->f_mapping);
1503 bool vma_needs_dirty_tracking(struct vm_area_struct *vma)
1506 if (!vma_is_shared_writable(vma))
1510 if (vm_ops_needs_writenotify(vma->vm_ops))
1517 return vma_fs_can_writeback(vma);
1526 int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot)
1529 if (!vma_is_shared_writable(vma))
1533 if (vm_ops_needs_writenotify(vma->vm_ops))
1539 pgprot_val(vm_pgprot_modify(vm_page_prot, vma->vm_flags)))
1546 if (vma_soft_dirty_enabled(vma) && !is_vm_hugetlb_page(vma))
1550 if (userfaultfd_wp(vma))
1554 return vma_fs_can_writeback(vma);
1716 struct vm_area_struct *vma, *prev;
1728 vma = find_vma_prev(mm, addr, &prev);
1730 (!vma || addr + len <= vm_start_gap(vma)) &&
1763 struct vm_area_struct *vma, *prev;
1778 vma = find_vma_prev(mm, addr, &prev);
1780 (!vma || addr + len <= vm_start_gap(vma)) &&
1902 * find_vma_prev() - Find the VMA for a given address, or the next vma and
1911 * Returns: The VMA associated with @addr, or the next vma.
1912 * May return %NULL in the case of no vma at addr or above.
1918 struct vm_area_struct *vma;
1921 vma = mas_walk(&mas);
1923 if (!vma)
1924 vma = mas_next(&mas, ULONG_MAX);
1925 return vma;
1933 static int acct_stack_growth(struct vm_area_struct *vma,
1936 struct mm_struct *mm = vma->vm_mm;
1940 if (!may_expand_vm(mm, vma->vm_flags, grow))
1948 if (!mlock_future_ok(mm, vma->vm_flags, grow << PAGE_SHIFT))
1952 new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start :
1953 vma->vm_end - size;
1954 if (is_hugepage_only_range(vma->vm_mm, new_start, size))
1970 * vma is the last one with address > vma->vm_end. Have to extend vma.
1972 static int expand_upwards(struct vm_area_struct *vma, unsigned long address)
1974 struct mm_struct *mm = vma->vm_mm;
1978 MA_STATE(mas, &mm->mm_mt, vma->vm_start, address);
1980 if (!(vma->vm_flags & VM_GROWSUP))
1996 next = find_vma_intersection(mm, vma->vm_end, gap_addr);
2006 __mas_set_range(&mas, vma->vm_start, address - 1);
2007 if (mas_preallocate(&mas, vma, GFP_KERNEL))
2011 if (unlikely(anon_vma_prepare(vma))) {
2017 vma_start_write(vma);
2019 * vma->vm_start/vm_end cannot change under us because the caller
2023 anon_vma_lock_write(vma->anon_vma);
2026 if (address > vma->vm_end) {
2029 size = address - vma->vm_start;
2030 grow = (address - vma->vm_end) >> PAGE_SHIFT;
2033 if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) {
2034 error = acct_stack_growth(vma, size, grow);
2038 * we need to protect against concurrent vma
2042 * anon vma. So, we reuse mm->page_table_lock
2043 * to guard against concurrent vma expansions.
2046 if (vma->vm_flags & VM_LOCKED)
2048 vm_stat_account(mm, vma->vm_flags, grow);
2049 anon_vma_interval_tree_pre_update_vma(vma);
2050 vma->vm_end = address;
2052 mas_store_prealloc(&mas, vma);
2053 anon_vma_interval_tree_post_update_vma(vma);
2056 perf_event_mmap(vma);
2060 anon_vma_unlock_write(vma->anon_vma);
2061 khugepaged_enter_vma(vma, vma->vm_flags);
2069 * vma is the first one with address < vma->vm_start. Have to extend vma.
2072 int expand_downwards(struct vm_area_struct *vma, unsigned long address)
2074 struct mm_struct *mm = vma->vm_mm;
2075 MA_STATE(mas, &mm->mm_mt, vma->vm_start, vma->vm_start);
2079 if (!(vma->vm_flags & VM_GROWSDOWN))
2097 mas_next_range(&mas, vma->vm_start);
2099 __mas_set_range(&mas, address, vma->vm_end - 1);
2100 if (mas_preallocate(&mas, vma, GFP_KERNEL))
2104 if (unlikely(anon_vma_prepare(vma))) {
2110 vma_start_write(vma);
2112 * vma->vm_start/vm_end cannot change under us because the caller
2116 anon_vma_lock_write(vma->anon_vma);
2119 if (address < vma->vm_start) {
2122 size = vma->vm_end - address;
2123 grow = (vma->vm_start - address) >> PAGE_SHIFT;
2126 if (grow <= vma->vm_pgoff) {
2127 error = acct_stack_growth(vma, size, grow);
2131 * we need to protect against concurrent vma
2135 * anon vma. So, we reuse mm->page_table_lock
2136 * to guard against concurrent vma expansions.
2139 if (vma->vm_flags & VM_LOCKED)
2141 vm_stat_account(mm, vma->vm_flags, grow);
2142 anon_vma_interval_tree_pre_update_vma(vma);
2143 vma->vm_start = address;
2144 vma->vm_pgoff -= grow;
2146 mas_store_prealloc(&mas, vma);
2147 anon_vma_interval_tree_post_update_vma(vma);
2150 perf_event_mmap(vma);
2154 anon_vma_unlock_write(vma->anon_vma);
2155 khugepaged_enter_vma(vma, vma->vm_flags);
2178 int expand_stack_locked(struct vm_area_struct *vma, unsigned long address)
2180 return expand_upwards(vma, address);
2185 struct vm_area_struct *vma, *prev;
2188 vma = find_vma_prev(mm, addr, &prev);
2189 if (vma && (vma->vm_start <= addr))
2190 return vma;
2200 int expand_stack_locked(struct vm_area_struct *vma, unsigned long address)
2202 if (unlikely(!(vma->vm_flags & VM_GROWSDOWN)))
2204 return expand_downwards(vma, address);
2209 struct vm_area_struct *vma;
2213 vma = find_vma(mm, addr);
2214 if (!vma)
2216 if (vma->vm_start <= addr)
2217 return vma;
2218 start = vma->vm_start;
2219 if (expand_stack_locked(vma, addr))
2221 if (vma->vm_flags & VM_LOCKED)
2222 populate_vma_page_range(vma, addr, start, NULL);
2223 return vma;
2235 static inline bool vma_expand_ok(struct vm_area_struct *vma, unsigned long addr)
2237 return REGION_NUMBER(addr) == REGION_NUMBER(vma->vm_start) &&
2246 static inline int vma_expand_up(struct vm_area_struct *vma, unsigned long addr)
2248 if (!vma_expand_ok(vma, addr))
2250 if (vma->vm_end != (addr & PAGE_MASK))
2252 return expand_upwards(vma, addr);
2255 static inline bool vma_expand_down(struct vm_area_struct *vma, unsigned long addr)
2257 if (!vma_expand_ok(vma, addr))
2259 return expand_downwards(vma, addr);
2264 #define vma_expand_up(vma,addr) expand_upwards(vma, addr)
2265 #define vma_expand_down(vma, addr) (-EFAULT)
2269 #define vma_expand_up(vma,addr) (-EFAULT)
2270 #define vma_expand_down(vma, addr) expand_downwards(vma, addr)
2279 * the lock for writing, tries to look up a vma again, expands it if
2282 * If no vma is found or it can't be expanded, it returns NULL and has
2287 struct vm_area_struct *vma, *prev;
2293 vma = find_vma_prev(mm, addr, &prev);
2294 if (vma && vma->vm_start <= addr)
2298 vma = prev;
2302 if (vma && !vma_expand_down(vma, addr))
2310 return vma;
2315 * and do the vma updates.
2322 struct vm_area_struct *vma;
2326 mas_for_each(mas, vma, ULONG_MAX) {
2327 long nrpages = vma_pages(vma);
2329 if (vma->vm_flags & VM_ACCOUNT)
2331 vm_stat_account(mm, vma->vm_flags, -nrpages);
2332 remove_vma(vma, false);
2343 struct vm_area_struct *vma, struct vm_area_struct *prev,
2353 unmap_vmas(&tlb, mas, vma, start, end, tree_end, mm_wr_locked);
2355 free_pgtables(&tlb, mas, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
2366 int __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
2373 WARN_ON(vma->vm_start >= addr);
2374 WARN_ON(vma->vm_end <= addr);
2376 if (vma->vm_ops && vma->vm_ops->may_split) {
2377 err = vma->vm_ops->may_split(vma, addr);
2382 new = vm_area_dup(vma);
2390 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
2398 err = vma_dup_policy(vma, new);
2402 err = anon_vma_clone(new, vma);
2412 vma_start_write(vma);
2415 init_vma_prep(&vp, vma);
2418 vma_adjust_trans_huge(vma, vma->vm_start, addr, 0);
2421 vma->vm_start = addr;
2422 vma->vm_pgoff += (addr - new->vm_start) >> PAGE_SHIFT;
2424 vma->vm_end = addr;
2427 /* vma_complete stores the new vma */
2428 vma_complete(&vp, vmi, vma->vm_mm);
2445 * Split a vma into two pieces at address 'addr', a new vma is allocated
2448 int split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
2451 if (vma->vm_mm->map_count >= sysctl_max_map_count)
2454 return __split_vma(vmi, vma, addr, new_below);
2459 * @vmi: The vma iterator
2460 * @vma: The starting vm_area_struct
2472 do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
2486 * If we need to split any vma, do it now to save pain later.
2490 * places tmp vma above, and higher split_vma places tmp vma below.
2494 if (start > vma->vm_start) {
2501 if (end < vma->vm_end && mm->map_count >= sysctl_max_map_count)
2504 error = __split_vma(vmi, vma, start, 1);
2513 next = vma;
2595 unmap_region(mm, &mas_detach, vma, prev, next, start, end, count,
2624 * @vmi: The vma iterator
2643 struct vm_area_struct *vma;
2661 vma = vma_find(vmi, end);
2662 if (!vma) {
2668 return do_vmi_align_munmap(vmi, vma, mm, start, end, uf, unlock);
2692 struct vm_area_struct *vma = NULL;
2745 vma = next;
2751 (vma ? can_vma_merge_after(prev, vm_flags, vma->anon_vma, file,
2752 pgoff, vma->vm_userfaultfd_ctx, NULL) :
2756 vma = prev;
2763 if (vma &&
2764 !vma_expand(&vmi, vma, merge_start, merge_end, vm_pgoff, next)) {
2765 khugepaged_enter_vma(vma, vm_flags);
2769 if (vma == prev)
2778 vma = vm_area_alloc(mm);
2779 if (!vma) {
2785 vma->vm_start = addr;
2786 vma->vm_end = end;
2787 vm_flags_init(vma, vm_flags);
2788 vma->vm_page_prot = vm_get_page_prot(vm_flags);
2789 vma->vm_pgoff = pgoff;
2798 vma->vm_file = get_file(file);
2799 error = call_mmap(file, vma);
2808 if (WARN_ON((addr != vma->vm_start)))
2814 * vma again as we may succeed this time.
2816 if (unlikely(vm_flags != vma->vm_flags && prev)) {
2817 merge = vma_merge(&vmi, mm, prev, vma->vm_start,
2818 vma->vm_end, vma->vm_flags, NULL,
2819 vma->vm_file, vma->vm_pgoff, NULL,
2823 * ->mmap() can change vma->vm_file and fput
2824 * the original file. So fput the vma->vm_file
2829 fput(vma->vm_file);
2830 vm_area_free(vma);
2831 vma = merge;
2833 vm_flags = vma->vm_flags;
2838 vm_flags = vma->vm_flags;
2840 error = shmem_zero_setup(vma);
2844 vma_set_anonymous(vma);
2847 if (map_deny_write_exec(vma, vma->vm_flags)) {
2854 if (!arch_validate_flags(vma->vm_flags))
2858 if (vma_iter_prealloc(&vmi, vma))
2862 vma_start_write(vma);
2863 vma_iter_store(&vmi, vma);
2865 if (vma->vm_file) {
2866 i_mmap_lock_write(vma->vm_file->f_mapping);
2867 if (vma->vm_flags & VM_SHARED)
2868 mapping_allow_writable(vma->vm_file->f_mapping);
2870 flush_dcache_mmap_lock(vma->vm_file->f_mapping);
2871 vma_interval_tree_insert(vma, &vma->vm_file->f_mapping->i_mmap);
2872 flush_dcache_mmap_unlock(vma->vm_file->f_mapping);
2873 i_mmap_unlock_write(vma->vm_file->f_mapping);
2880 khugepaged_enter_vma(vma, vma->vm_flags);
2882 /* Once vma denies write, undo our temporary denial count */
2886 file = vma->vm_file;
2887 ksm_add_vma(vma);
2889 perf_event_mmap(vma);
2893 if ((vm_flags & VM_SPECIAL) || vma_is_dax(vma) ||
2894 is_vm_hugetlb_page(vma) ||
2895 vma == get_gate_vma(current->mm))
2896 vm_flags_clear(vma, VM_LOCKED_MASK);
2902 uprobe_mmap(vma);
2905 * New (or expanded) vma always get soft dirty status.
2907 * be able to distinguish situation when vma area unmapped,
2911 vm_flags_set(vma, VM_SOFTDIRTY);
2913 vma_set_page_prot(vma);
2919 if (file && vma->vm_ops && vma->vm_ops->close)
2920 vma->vm_ops->close(vma);
2922 if (file || vma->vm_file) {
2924 fput(vma->vm_file);
2925 vma->vm_file = NULL;
2927 vma_iter_set(&vmi, vma->vm_end);
2929 unmap_region(mm, &vmi.mas, vma, prev, next, vma->vm_start,
2930 vma->vm_end, vma->vm_end, true);
2935 vm_area_free(vma);
2982 struct vm_area_struct *vma;
3005 vma = vma_lookup(mm, start);
3007 if (!vma || !(vma->vm_flags & VM_SHARED))
3010 if (start + size > vma->vm_end) {
3011 VMA_ITERATOR(vmi, mm, vma->vm_end);
3012 struct vm_area_struct *next, *prev = vma;
3019 if (next->vm_file != vma->vm_file)
3022 if (next->vm_flags != vma->vm_flags)
3035 prot |= vma->vm_flags & VM_READ ? PROT_READ : 0;
3036 prot |= vma->vm_flags & VM_WRITE ? PROT_WRITE : 0;
3037 prot |= vma->vm_flags & VM_EXEC ? PROT_EXEC : 0;
3041 if (vma->vm_flags & VM_LOCKED)
3044 file = get_file(vma->vm_file);
3045 ret = do_mmap(vma->vm_file, start, size,
3058 * do_vma_munmap() - Unmap a full or partial vma.
3059 * @vmi: The vma iterator pointing at the vma
3060 * @vma: The first vma to be munmapped
3066 * unmaps a VMA mapping when the vma iterator is already in position.
3072 int do_vma_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
3076 struct mm_struct *mm = vma->vm_mm;
3079 return do_vmi_align_munmap(vmi, vma, mm, start, end, uf, unlock);
3083 * do_brk_flags() - Increase the brk vma if the flags match.
3084 * @vmi: The vma iterator
3087 * @vma: The vma,
3094 static int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *vma,
3115 * Expand the existing vma if possible; Note that singular lists do not
3118 if (vma && vma->vm_end == addr && !vma_policy(vma) &&
3119 can_vma_merge_after(vma, flags, NULL, NULL,
3121 vma_iter_config(vmi, vma->vm_start, addr + len);
3122 if (vma_iter_prealloc(vmi, vma))
3125 vma_start_write(vma);
3127 init_vma_prep(&vp, vma);
3129 vma_adjust_trans_huge(vma, vma->vm_start, addr + len, 0);
3130 vma->vm_end = addr + len;
3131 vm_flags_set(vma, VM_SOFTDIRTY);
3132 vma_iter_store(vmi, vma);
3135 khugepaged_enter_vma(vma, flags);
3139 if (vma)
3141 /* create a vma struct for an anonymous mapping */
3142 vma = vm_area_alloc(mm);
3143 if (!vma)
3146 vma_set_anonymous(vma);
3147 vma->vm_start = addr;
3148 vma->vm_end = addr + len;
3149 vma->vm_pgoff = addr >> PAGE_SHIFT;
3150 vm_flags_init(vma, flags);
3151 vma->vm_page_prot = vm_get_page_prot(flags);
3152 vma_start_write(vma);
3153 if (vma_iter_store_gfp(vmi, vma, GFP_KERNEL))
3158 ksm_add_vma(vma);
3160 perf_event_mmap(vma);
3165 vm_flags_set(vma, VM_SOFTDIRTY);
3169 vm_area_free(vma);
3178 struct vm_area_struct *vma = NULL;
3206 vma = vma_prev(&vmi);
3207 ret = do_brk_flags(&vmi, vma, addr, len, flags);
3232 struct vm_area_struct *vma;
3243 vma = mas_find(&mas, ULONG_MAX);
3244 if (!vma) {
3255 unmap_vmas(&tlb, &mas, vma, 0, ULONG_MAX, ULONG_MAX, false);
3265 mas_set(&mas, vma->vm_end);
3266 free_pgtables(&tlb, &mas, vma, FIRST_USER_ADDRESS,
3275 mas_set(&mas, vma->vm_end);
3277 if (vma->vm_flags & VM_ACCOUNT)
3278 nr_accounted += vma_pages(vma);
3279 remove_vma(vma, true);
3282 } while ((vma = mas_find(&mas, ULONG_MAX)) != NULL);
3296 int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
3298 unsigned long charged = vma_pages(vma);
3301 if (find_vma_intersection(mm, vma->vm_start, vma->vm_end))
3304 if ((vma->vm_flags & VM_ACCOUNT) &&
3309 * The vm_pgoff of a purely anonymous vma should be irrelevant
3316 * vma, merges and splits can happen in a seamless way, just
3320 if (vma_is_anonymous(vma)) {
3321 BUG_ON(vma->anon_vma);
3322 vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT;
3325 if (vma_link(mm, vma)) {
3334 * Copy the vma structure to a new location in the same mm,
3341 struct vm_area_struct *vma = *vmap;
3342 unsigned long vma_start = vma->vm_start;
3343 struct mm_struct *mm = vma->vm_mm;
3349 * If anonymous vma has not yet been faulted, update new pgoff
3352 if (unlikely(vma_is_anonymous(vma) && !vma->anon_vma)) {
3361 new_vma = vma_merge(&vmi, mm, prev, addr, addr + len, vma->vm_flags,
3362 vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma),
3363 vma->vm_userfaultfd_ctx, anon_vma_name(vma));
3366 * Source vma may have been merged into new_vma
3372 * self during an mremap is if the vma hasn't
3374 * reset the dst vma->vm_pgoff to the
3383 *vmap = vma = new_vma;
3385 *need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff);
3387 new_vma = vm_area_dup(vma);
3393 if (vma_dup_policy(vma, new_vma))
3395 if (anon_vma_clone(new_vma, vma))
3467 * Having a close hook prevents vma merging regardless of flags.
3469 static void special_mapping_close(struct vm_area_struct *vma)
3473 static const char *special_mapping_name(struct vm_area_struct *vma)
3475 return ((struct vm_special_mapping *)vma->vm_private_data)->name;
3491 static int special_mapping_split(struct vm_area_struct *vma, unsigned long addr)
3496 * the size of vma should stay the same over the special mapping's
3519 struct vm_area_struct *vma = vmf->vma;
3523 if (vma->vm_ops == &legacy_special_mapping_vmops) {
3524 pages = vma->vm_private_data;
3526 struct vm_special_mapping *sm = vma->vm_private_data;
3529 return sm->fault(sm, vmf->vma, vmf);
3554 struct vm_area_struct *vma;
3556 vma = vm_area_alloc(mm);
3557 if (unlikely(vma == NULL))
3560 vma->vm_start = addr;
3561 vma->vm_end = addr + len;
3563 vm_flags_init(vma, (vm_flags | mm->def_flags |
3565 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
3567 vma->vm_ops = ops;
3568 vma->vm_private_data = priv;
3570 ret = insert_vm_struct(mm, vma);
3574 vm_stat_account(mm, vma->vm_flags, len >> PAGE_SHIFT);
3576 perf_event_mmap(vma);
3578 return vma;
3581 vm_area_free(vma);
3585 bool vma_is_special_mapping(const struct vm_area_struct *vma,
3588 return vma->vm_private_data == sm &&
3589 (vma->vm_ops == &special_mapping_vmops ||
3590 vma->vm_ops == &legacy_special_mapping_vmops);
3595 * Insert a new vma covering the given region, with the given flags.
3615 struct vm_area_struct *vma = __install_special_mapping(
3619 return PTR_ERR_OR_ZERO(vma);
3634 * anon_vma->root->rwsem. If some other vma in this mm shares
3666 * This operation locks against the VM for all pte/vma/mm related
3676 * altering the vma layout. It's also needed in write mode to avoid new
3684 * vma in this mm is backed by the same anon_vma or address_space.
3705 struct vm_area_struct *vma;
3719 mas_for_each(&mas, vma, ULONG_MAX) {
3722 vma_start_write(vma);
3726 mas_for_each(&mas, vma, ULONG_MAX) {
3729 if (vma->vm_file && vma->vm_file->f_mapping &&
3730 is_vm_hugetlb_page(vma))
3731 vm_lock_mapping(mm, vma->vm_file->f_mapping);
3735 mas_for_each(&mas, vma, ULONG_MAX) {
3738 if (vma->vm_file && vma->vm_file->f_mapping &&
3739 !is_vm_hugetlb_page(vma))
3740 vm_lock_mapping(mm, vma->vm_file->f_mapping);
3744 mas_for_each(&mas, vma, ULONG_MAX) {
3747 if (vma->anon_vma)
3748 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
3767 * the vma so the users using the anon_vma->rb_root will
3801 struct vm_area_struct *vma;
3808 mas_for_each(&mas, vma, ULONG_MAX) {
3809 if (vma->anon_vma)
3810 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
3812 if (vma->vm_file && vma->vm_file->f_mapping)
3813 vm_unlock_mapping(vma->vm_file->f_mapping);