Lines Matching refs:vma

87 		struct vm_area_struct *vma, struct vm_area_struct *prev,
131 /* Update vma->vm_page_prot to reflect vma->vm_flags. */
132 void vma_set_page_prot(struct vm_area_struct *vma)
134 unsigned long vm_flags = vma->vm_flags;
137 vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags);
138 if (vma_wants_writenotify(vma, vm_page_prot)) {
142 /* remove_protection_ptes reads vma->vm_page_prot without mmap_lock */
143 WRITE_ONCE(vma->vm_page_prot, vm_page_prot);
149 static void __remove_shared_vm_struct(struct vm_area_struct *vma,
152 if (vma->vm_flags & VM_DENYWRITE)
154 if (vma->vm_flags & VM_SHARED)
158 vma_interval_tree_remove(vma, &mapping->i_mmap);
164 * vma from rmap and vmtruncate before freeing its page tables.
166 void unlink_file_vma(struct vm_area_struct *vma)
168 struct file *file = vma->vm_file;
173 __remove_shared_vm_struct(vma, file, mapping);
181 static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
183 struct vm_area_struct *next = vma->vm_next;
186 if (vma->vm_ops && vma->vm_ops->close)
187 vma->vm_ops->close(vma);
188 if (vma->vm_file)
189 fput(vma->vm_file);
190 mpol_put(vma_policy(vma));
191 vm_area_free(vma);
296 static inline unsigned long vma_compute_gap(struct vm_area_struct *vma)
306 gap = vm_start_gap(vma);
307 if (vma->vm_prev) {
308 prev_end = vm_end_gap(vma->vm_prev);
318 static unsigned long vma_compute_subtree_gap(struct vm_area_struct *vma)
320 unsigned long max = vma_compute_gap(vma), subtree_gap;
321 if (vma->vm_rb.rb_left) {
322 subtree_gap = rb_entry(vma->vm_rb.rb_left,
327 if (vma->vm_rb.rb_right) {
328 subtree_gap = rb_entry(vma->vm_rb.rb_right,
344 struct vm_area_struct *vma;
345 vma = rb_entry(nd, struct vm_area_struct, vm_rb);
346 if (vma->vm_start < prev) {
348 vma->vm_start, prev);
351 if (vma->vm_start < pend) {
353 vma->vm_start, pend);
356 if (vma->vm_start > vma->vm_end) {
358 vma->vm_start, vma->vm_end);
362 if (vma->rb_subtree_gap != vma_compute_subtree_gap(vma)) {
364 vma->rb_subtree_gap,
365 vma_compute_subtree_gap(vma));
371 prev = vma->vm_start;
372 pend = vma->vm_end;
389 struct vm_area_struct *vma;
390 vma = rb_entry(nd, struct vm_area_struct, vm_rb);
391 VM_BUG_ON_VMA(vma != ignore &&
392 vma->rb_subtree_gap != vma_compute_subtree_gap(vma),
393 vma);
402 struct vm_area_struct *vma = mm->mmap;
404 while (vma) {
405 struct anon_vma *anon_vma = vma->anon_vma;
410 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
415 highest_address = vm_end_gap(vma);
416 vma = vma->vm_next;
446 * Update augmented rbtree rb_subtree_gap values after vma->vm_start or
447 * vma->vm_prev->vm_end values changed, without modifying the vma's position
450 static void vma_gap_update(struct vm_area_struct *vma)
456 vma_gap_callbacks_propagate(&vma->vm_rb, NULL);
459 static inline void vma_rb_insert(struct vm_area_struct *vma,
465 rb_insert_augmented(&vma->vm_rb, root, &vma_gap_callbacks);
468 static void __vma_rb_erase(struct vm_area_struct *vma, struct rb_root *root)
475 rb_erase_augmented(&vma->vm_rb, root, &vma_gap_callbacks);
478 static __always_inline void vma_rb_erase_ignore(struct vm_area_struct *vma,
486 * a. the "next" vma being erased if next->vm_start was reduced in
488 * b. the vma being erased in detach_vmas_to_be_unmapped() ->
493 __vma_rb_erase(vma, root);
496 static __always_inline void vma_rb_erase(struct vm_area_struct *vma,
499 vma_rb_erase_ignore(vma, root, vma);
503 * vma has some anon_vma assigned, and is already inserted on that
506 * Before updating the vma's vm_start / vm_end / vm_pgoff fields, the
507 * vma must be removed from the anon_vma's interval trees using
510 * After the update, the vma will be reinserted using
517 anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma)
521 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
526 anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma)
530 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
550 /* Fail if an existing vma overlaps the area */
571 * @vma: The current vma.
573 * If @vma is NULL, return the first vma in the mm.
575 * Returns: The next VMA after @vma.
578 struct vm_area_struct *vma)
580 if (!vma)
583 return vma->vm_next;
616 struct vm_area_struct *vma;
619 vma = find_vma_intersection(mm, addr, end);
620 if (!vma)
623 nr_pages = (min(end, vma->vm_end) -
624 max(addr, vma->vm_start)) >> PAGE_SHIFT;
627 for (vma = vma->vm_next; vma; vma = vma->vm_next) {
630 if (vma->vm_start > end)
633 overlap_len = min(end, vma->vm_end) - vma->vm_start;
640 void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma,
643 /* Update tracking information for the gap following the new vma. */
644 if (vma->vm_next)
645 vma_gap_update(vma->vm_next);
647 mm->highest_vm_end = vm_end_gap(vma);
650 * vma->vm_prev wasn't known when we followed the rbtree to find the
651 * correct insertion point for that vma. As a result, we could not
652 * update the vma vm_rb parents rb_subtree_gap values on the way down.
653 * So, we first insert the vma with a zero rb_subtree_gap value
658 rb_link_node(&vma->vm_rb, rb_parent, rb_link);
659 vma->rb_subtree_gap = 0;
660 vma_gap_update(vma);
661 vma_rb_insert(vma, &mm->mm_rb);
664 static void __vma_link_file(struct vm_area_struct *vma)
668 file = vma->vm_file;
672 if (vma->vm_flags & VM_DENYWRITE)
674 if (vma->vm_flags & VM_SHARED)
678 vma_interval_tree_insert(vma, &mapping->i_mmap);
684 __vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
688 __vma_link_list(mm, vma, prev);
689 __vma_link_rb(mm, vma, rb_link, rb_parent);
692 static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
698 if (vma->vm_file) {
699 mapping = vma->vm_file->f_mapping;
703 __vma_link(mm, vma, prev, rb_link, rb_parent);
704 __vma_link_file(vma);
714 * Helper for vma_adjust() in the split_vma insert case: insert a vma into the
717 static void __insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
722 if (find_vma_links(mm, vma->vm_start, vma->vm_end,
725 __vma_link(mm, vma, prev, rb_link, rb_parent);
730 struct vm_area_struct *vma,
733 vma_rb_erase_ignore(vma, &mm->mm_rb, ignore);
734 __vma_unlink_list(mm, vma);
740 * We cannot adjust vm_start, vm_end, vm_pgoff fields of a vma that
743 * are necessary. The "insert" vma (if any) is to be inserted
746 int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
750 struct mm_struct *mm = vma->vm_mm;
751 struct vm_area_struct *next = vma->vm_next, *orig_vma = vma;
755 struct file *file = vma->vm_file;
765 * vma expands, overlapping all the next, and
772 * The only case where we don't expand "vma"
778 * removing "vma" and that to do so we
779 * swapped "vma" and "next".
783 swap(vma, next);
785 VM_WARN_ON(expand != vma);
798 importer = vma;
801 * If next doesn't have anon_vma, import from vma after
802 * next, if the vma overlaps with it.
809 * vma expands, overlapping part of the next:
814 importer = vma;
816 } else if (end < vma->vm_end) {
818 * vma shrinks, and !insert tells it's not
822 adjust_next = -(vma->vm_end - end);
823 exporter = vma;
830 * make sure the expanding vma has anon_vma set if the
831 * shrinking vma had, to cover any anon pages imported.
848 uprobe_munmap(vma, vma->vm_start, vma->vm_end);
859 * space until vma start or end is updated.
865 anon_vma = vma->anon_vma;
872 anon_vma_interval_tree_pre_update_vma(vma);
879 vma_interval_tree_remove(vma, root);
884 if (start != vma->vm_start) {
885 vma->vm_start = start;
888 if (end != vma->vm_end) {
889 vma->vm_end = end;
892 vma->vm_pgoff = pgoff;
901 vma_interval_tree_insert(vma, root);
907 * vma_merge has merged next into vma, and needs
914 * vma is not before next if they've been
920 * "vma").
922 __vma_unlink(mm, next, vma);
927 * split_vma has split insert from vma, and needs
929 * (it may either follow vma or precede it).
934 vma_gap_update(vma);
937 mm->highest_vm_end = vm_end_gap(vma);
944 anon_vma_interval_tree_post_update_vma(vma);
952 uprobe_mmap(vma);
964 anon_vma_merge(vma, next);
975 * If "next" was removed and vma->vm_end was
978 * "vma->vm_next" gap must be updated.
980 next = vma->vm_next;
984 * "vma" considered pre-swap(): if "vma" was
987 * Because of the swap() the post-swap() "vma"
992 next = vma;
1014 * case if the "next" vma that was removed was
1015 * the highest vma of the mm. However in such
1017 * "vma" has vma->vm_end == next->vm_end so
1021 VM_WARN_ON(mm->highest_vm_end != vm_end_gap(vma));
1033 * If the vma has a ->close operation then the driver probably needs to release
1034 * per-vma resources, so we don't attempt to merge those.
1036 static inline int is_mergeable_vma(struct vm_area_struct *vma,
1049 if ((vma->vm_flags ^ vm_flags) & ~VM_SOFTDIRTY)
1051 if (vma->vm_file != file)
1053 if (vma->vm_ops && vma->vm_ops->close)
1055 if (!is_mergeable_vm_userfaultfd_ctx(vma, vm_userfaultfd_ctx))
1057 if (!anon_vma_name_eq(anon_vma_name(vma), anon_name))
1064 struct vm_area_struct *vma)
1070 if ((!anon_vma1 || !anon_vma2) && (!vma ||
1071 list_is_singular(&vma->anon_vma_chain)))
1078 * in front of (at a lower virtual address and file offset than) the vma.
1088 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
1094 if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx, anon_name) &&
1095 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
1096 if (vma->vm_pgoff == vm_pgoff)
1104 * beyond (at a higher virtual address and file offset than) the vma.
1110 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
1116 if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx, anon_name) &&
1117 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
1119 vm_pglen = vma_pages(vma);
1120 if (vma->vm_pgoff + vm_pglen == vm_pgoff)
1140 * vma, PPPPPP is the prev vma specified, and NNNNNN the next vma after:
1155 * It is important for case 8 that the vma NNNN overlapping the
1159 * rmap_locks, the properties of the merged vma will be already
1182 * We later require that vma->vm_flags == vm_flags,
1183 * so this tests vma->vm_flags & VM_SPECIAL, too.
1268 * we can merge the two vma's. For example, we refuse to merge a vma if
1284 * from 'old'. The 'a'/'b' vma's are in VM order - one of them will be
1300 * We also make sure that the two vma's are compatible (adjacent,
1320 * anon_vmas being allocated, preventing vma merge in subsequent
1323 struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma)
1328 if (vma->vm_next) {
1329 anon_vma = reusable_anon_vma(vma->vm_next, vma, vma->vm_next);
1335 if (vma->vm_prev)
1336 anon_vma = reusable_anon_vma(vma->vm_prev, vma->vm_prev, vma);
1470 struct vm_area_struct *vma = find_vma(mm, addr);
1472 if (vma && vma->vm_start < addr + len)
1712 int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot)
1714 vm_flags_t vm_flags = vma->vm_flags;
1715 const struct vm_operations_struct *vm_ops = vma->vm_ops;
1736 !is_vm_hugetlb_page(vma))
1744 return vma->vm_file && vma->vm_file->f_mapping &&
1745 mapping_can_writeback(vma->vm_file->f_mapping);
1769 struct vm_area_struct *vma, *prev, *merge;
1805 vma = vma_merge(mm, prev, addr, addr + len, vm_flags,
1807 if (vma)
1815 vma = vm_area_alloc(mm);
1816 if (!vma) {
1821 vma->vm_start = addr;
1822 vma->vm_end = addr + len;
1823 vma->vm_flags = vm_flags;
1824 vma->vm_page_prot = vm_get_page_prot(vm_flags);
1825 vma->vm_pgoff = pgoff;
1839 /* ->mmap() can change vma->vm_file, but must guarantee that
1844 vma->vm_file = get_file(file);
1845 error = call_mmap(file, vma);
1856 WARN_ON_ONCE(addr != vma->vm_start);
1858 addr = vma->vm_start;
1860 /* If vm_flags changed after call_mmap(), we should try merge vma again
1863 if (unlikely(vm_flags != vma->vm_flags && prev)) {
1864 merge = vma_merge(mm, prev, vma->vm_start, vma->vm_end, vma->vm_flags,
1865 NULL, vma->vm_file, vma->vm_pgoff, NULL, NULL_VM_UFFD_CTX, NULL);
1867 /* ->mmap() can change vma->vm_file and fput the original file. So
1868 * fput the vma->vm_file here or we would add an extra fput for file
1871 fput(vma->vm_file);
1872 vm_area_free(vma);
1873 vma = merge;
1875 vm_flags = vma->vm_flags;
1880 vm_flags = vma->vm_flags;
1882 error = shmem_zero_setup(vma);
1886 vma_set_anonymous(vma);
1889 /* Allow architectures to sanity-check the vma */
1890 if (security_mmap_region(vma) ||
1891 !arch_validate_flags(vma->vm_flags)) {
1899 vma_link(mm, vma, prev, rb_link, rb_parent);
1900 /* Once vma denies write, undo our temporary denial count */
1908 file = vma->vm_file;
1910 perf_event_mmap(vma);
1914 if ((vm_flags & VM_SPECIAL) || vma_is_dax(vma) ||
1915 is_vm_hugetlb_page(vma) ||
1916 vma == get_gate_vma(current->mm))
1917 vma->vm_flags &= VM_LOCKED_CLEAR_MASK;
1923 uprobe_mmap(vma);
1926 * New (or expanded) vma always get soft dirty status.
1928 * be able to distinguish situation when vma area unmapped,
1932 vma->vm_flags |= VM_SOFTDIRTY;
1934 vma_set_page_prot(vma);
1939 if (vma->vm_ops && vma->vm_ops->close)
1940 vma->vm_ops->close(vma);
1942 vma->vm_file = NULL;
1946 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
1953 vm_area_free(vma);
1965 * - gap_start = vma->vm_prev->vm_end <= info->high_limit - length;
1966 * - gap_end = vma->vm_start >= info->low_limit + length;
1971 struct vm_area_struct *vma;
1991 vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb);
1992 if (vma->rb_subtree_gap < length)
1997 gap_end = vm_start_gap(vma);
1998 if (gap_end >= low_limit && vma->vm_rb.rb_left) {
2000 rb_entry(vma->vm_rb.rb_left,
2003 vma = left;
2008 gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0;
2019 if (vma->vm_rb.rb_right) {
2021 rb_entry(vma->vm_rb.rb_right,
2024 vma = right;
2031 struct rb_node *prev = &vma->vm_rb;
2034 vma = rb_entry(rb_parent(prev),
2036 if (prev == vma->vm_rb.rb_left) {
2037 gap_start = vm_end_gap(vma->vm_prev);
2038 gap_end = vm_start_gap(vma);
2067 struct vm_area_struct *vma;
2096 vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb);
2097 if (vma->rb_subtree_gap < length)
2102 gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0;
2103 if (gap_start <= high_limit && vma->vm_rb.rb_right) {
2105 rb_entry(vma->vm_rb.rb_right,
2108 vma = right;
2115 gap_end = vm_start_gap(vma);
2124 if (vma->vm_rb.rb_left) {
2126 rb_entry(vma->vm_rb.rb_left,
2129 vma = left;
2136 struct rb_node *prev = &vma->vm_rb;
2139 vma = rb_entry(rb_parent(prev),
2141 if (prev == vma->vm_rb.rb_right) {
2142 gap_start = vma->vm_prev ?
2143 vm_end_gap(vma->vm_prev) : 0;
2204 struct vm_area_struct *vma, *prev;
2220 vma = find_vma_prev(mm, addr, &prev);
2222 (!vma || addr + len <= vm_start_gap(vma)) &&
2249 struct vm_area_struct *vma, *prev;
2269 vma = find_vma_prev(mm, addr, &prev);
2271 (!vma || addr + len <= vm_start_gap(vma)) &&
2351 struct vm_area_struct *vma;
2354 vma = vmacache_find(mm, addr);
2355 if (likely(vma))
2356 return vma;
2366 vma = tmp;
2374 if (vma)
2375 vmacache_update(addr, vma);
2376 return vma;
2388 struct vm_area_struct *vma;
2390 vma = find_vma(mm, addr);
2391 if (vma) {
2392 *pprev = vma->vm_prev;
2398 return vma;
2406 static int acct_stack_growth(struct vm_area_struct *vma,
2409 struct mm_struct *mm = vma->vm_mm;
2413 if (!may_expand_vm(mm, vma->vm_flags, grow))
2421 if (vma->vm_flags & VM_LOCKED) {
2432 new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start :
2433 vma->vm_end - size;
2434 if (is_hugepage_only_range(vma->vm_mm, new_start, size))
2450 * vma is the last one with address > vma->vm_end. Have to extend vma.
2452 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
2454 struct mm_struct *mm = vma->vm_mm;
2459 if (!(vma->vm_flags & VM_GROWSUP))
2475 next = vma->vm_next;
2483 if (unlikely(anon_vma_prepare(vma)))
2487 * vma->vm_start/vm_end cannot change under us because the caller
2491 anon_vma_lock_write(vma->anon_vma);
2494 if (address > vma->vm_end) {
2497 size = address - vma->vm_start;
2498 grow = (address - vma->vm_end) >> PAGE_SHIFT;
2501 if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) {
2502 error = acct_stack_growth(vma, size, grow);
2508 * concurrent vma expansions.
2511 * in a mm share the same root anon vma.
2513 * against concurrent vma expansions.
2516 if (vma->vm_flags & VM_LOCKED)
2518 vm_stat_account(mm, vma->vm_flags, grow);
2519 anon_vma_interval_tree_pre_update_vma(vma);
2520 vma->vm_end = address;
2521 anon_vma_interval_tree_post_update_vma(vma);
2522 if (vma->vm_next)
2523 vma_gap_update(vma->vm_next);
2525 mm->highest_vm_end = vm_end_gap(vma);
2528 perf_event_mmap(vma);
2532 anon_vma_unlock_write(vma->anon_vma);
2533 khugepaged_enter_vma_merge(vma, vma->vm_flags);
2540 * vma is the first one with address < vma->vm_start. Have to extend vma.
2542 int expand_downwards(struct vm_area_struct *vma,
2545 struct mm_struct *mm = vma->vm_mm;
2554 prev = vma->vm_prev;
2563 if (unlikely(anon_vma_prepare(vma)))
2567 * vma->vm_start/vm_end cannot change under us because the caller
2571 anon_vma_lock_write(vma->anon_vma);
2574 if (address < vma->vm_start) {
2577 size = vma->vm_end - address;
2578 grow = (vma->vm_start - address) >> PAGE_SHIFT;
2581 if (grow <= vma->vm_pgoff) {
2582 error = acct_stack_growth(vma, size, grow);
2588 * concurrent vma expansions.
2591 * in a mm share the same root anon vma.
2593 * against concurrent vma expansions.
2596 if (vma->vm_flags & VM_LOCKED)
2598 vm_stat_account(mm, vma->vm_flags, grow);
2599 anon_vma_interval_tree_pre_update_vma(vma);
2600 vma->vm_start = address;
2601 vma->vm_pgoff -= grow;
2602 anon_vma_interval_tree_post_update_vma(vma);
2603 vma_gap_update(vma);
2606 perf_event_mmap(vma);
2610 anon_vma_unlock_write(vma->anon_vma);
2611 khugepaged_enter_vma_merge(vma, vma->vm_flags);
2633 int expand_stack(struct vm_area_struct *vma, unsigned long address)
2635 return expand_upwards(vma, address);
2641 struct vm_area_struct *vma, *prev;
2644 vma = find_vma_prev(mm, addr, &prev);
2645 if (vma && (vma->vm_start <= addr))
2646 return vma;
2655 int expand_stack(struct vm_area_struct *vma, unsigned long address)
2657 return expand_downwards(vma, address);
2663 struct vm_area_struct *vma;
2667 vma = find_vma(mm, addr);
2668 if (!vma)
2670 if (vma->vm_start <= addr)
2671 return vma;
2672 if (!(vma->vm_flags & VM_GROWSDOWN))
2674 start = vma->vm_start;
2675 if (expand_stack(vma, addr))
2677 if (vma->vm_flags & VM_LOCKED)
2678 populate_vma_page_range(vma, addr, start, NULL);
2679 return vma;
2686 * Ok - we have the memory areas we should free on the vma list,
2687 * so release them, and do the vma updates.
2691 static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
2698 long nrpages = vma_pages(vma);
2700 if (vma->vm_flags & VM_ACCOUNT)
2702 vm_stat_account(mm, vma->vm_flags, -nrpages);
2703 vma = remove_vma(vma);
2704 } while (vma);
2715 struct vm_area_struct *vma, struct vm_area_struct *prev,
2725 unmap_vmas(&tlb, vma, start, end);
2735 for (cur_vma = vma; cur_vma; cur_vma = cur_vma->vm_next) {
2742 free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
2748 * Create a list of vma's touched by the unmap, removing them from the mm's
2749 * vma list as we go..
2752 detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
2759 vma->vm_prev = NULL;
2761 vma_rb_erase(vma, &mm->mm_rb);
2763 tail_vma = vma;
2764 vma = vma->vm_next;
2765 } while (vma && vma->vm_start < end);
2766 *insertion_point = vma;
2767 if (vma) {
2768 vma->vm_prev = prev;
2769 vma_gap_update(vma);
2782 if (vma && (vma->vm_flags & VM_GROWSDOWN))
2793 int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
2799 if (vma->vm_ops && vma->vm_ops->split) {
2800 err = vma->vm_ops->split(vma, addr);
2805 new = vm_area_dup(vma);
2813 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
2816 err = vma_dup_policy(vma, new);
2820 err = anon_vma_clone(new, vma);
2831 err = vma_adjust(vma, addr, vma->vm_end, vma->vm_pgoff +
2834 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
2854 * Split a vma into two pieces at address 'addr', a new vma is allocated
2857 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
2863 return __split_vma(mm, vma, addr, new_below);
2875 struct vm_area_struct *vma, *prev, *last;
2898 vma = find_vma(mm, start);
2899 if (!vma)
2901 prev = vma->vm_prev;
2902 /* we have start < vma->vm_end */
2905 if (vma->vm_start >= end)
2909 * If we need to split any vma, do it now to save pain later.
2913 * places tmp vma above, and higher split_vma places tmp vma below.
2915 if (start > vma->vm_start) {
2923 if (end < vma->vm_end && mm->map_count >= sysctl_max_map_count)
2926 error = __split_vma(mm, vma, start, 0);
2929 prev = vma;
2939 vma = vma_next(mm, prev);
2951 int error = userfaultfd_unmap_prep(vma, start, end, uf);
2960 struct vm_area_struct *tmp = vma;
2972 if (!detach_vmas_to_be_unmapped(mm, vma, prev, end))
2978 unmap_region(mm, vma, prev, start, end);
2981 remove_vma_list(mm, vma);
3039 struct vm_area_struct *vma;
3062 vma = find_vma(mm, start);
3064 if (!vma || !(vma->vm_flags & VM_SHARED))
3067 if (start < vma->vm_start)
3070 if (start + size > vma->vm_end) {
3073 for (next = vma->vm_next; next; next = next->vm_next) {
3078 if (next->vm_file != vma->vm_file)
3081 if (next->vm_flags != vma->vm_flags)
3092 prot |= vma->vm_flags & VM_READ ? PROT_READ : 0;
3093 prot |= vma->vm_flags & VM_WRITE ? PROT_WRITE : 0;
3094 prot |= vma->vm_flags & VM_EXEC ? PROT_EXEC : 0;
3098 if (vma->vm_flags & VM_LOCKED) {
3103 for (tmp = vma; tmp->vm_start >= start + size;
3117 file = get_file(vma->vm_file);
3118 ret = do_mmap(vma->vm_file, start, size,
3138 struct vm_area_struct *vma, *prev;
3172 vma = vma_merge(mm, prev, addr, addr + len, flags,
3174 if (vma)
3178 * create a vma struct for an anonymous mapping
3180 vma = vm_area_alloc(mm);
3181 if (!vma) {
3186 vma_set_anonymous(vma);
3187 vma->vm_start = addr;
3188 vma->vm_end = addr + len;
3189 vma->vm_pgoff = pgoff;
3190 vma->vm_flags = flags;
3191 vma->vm_page_prot = vm_get_page_prot(flags);
3192 vma_link(mm, vma, prev, rb_link, rb_parent);
3194 perf_event_mmap(vma);
3199 vma->vm_flags |= VM_SOFTDIRTY;
3240 struct vm_area_struct *vma;
3271 vma = mm->mmap;
3272 while (vma) {
3273 if (vma->vm_flags & VM_LOCKED)
3274 munlock_vma_pages_all(vma);
3275 vma = vma->vm_next;
3281 vma = mm->mmap;
3282 if (!vma) /* Can happen if dup_mmap() received an OOM */
3290 unmap_vmas(&tlb, vma, 0, -1);
3291 free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, USER_PGTABLES_CEILING);
3298 while (vma) {
3299 if (vma->vm_flags & VM_ACCOUNT)
3300 nr_accounted += vma_pages(vma);
3301 vma = remove_vma(vma);
3311 int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
3316 if (find_vma_links(mm, vma->vm_start, vma->vm_end,
3319 if ((vma->vm_flags & VM_ACCOUNT) &&
3320 security_vm_enough_memory_mm(mm, vma_pages(vma)))
3324 * The vm_pgoff of a purely anonymous vma should be irrelevant
3331 * vma, merges and splits can happen in a seamless way, just
3335 if (vma_is_anonymous(vma)) {
3336 BUG_ON(vma->anon_vma);
3337 vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT;
3340 vma_link(mm, vma, prev, rb_link, rb_parent);
3345 * Copy the vma structure to a new location in the same mm,
3352 struct vm_area_struct *vma = *vmap;
3353 unsigned long vma_start = vma->vm_start;
3354 struct mm_struct *mm = vma->vm_mm;
3360 * If anonymous vma has not yet been faulted, update new pgoff
3363 if (unlikely(vma_is_anonymous(vma) && !vma->anon_vma)) {
3370 new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags,
3371 vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma),
3372 vma->vm_userfaultfd_ctx, anon_vma_name(vma));
3375 * Source vma may have been merged into new_vma
3381 * self during an mremap is if the vma hasn't
3383 * reset the dst vma->vm_pgoff to the
3392 *vmap = vma = new_vma;
3394 *need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff);
3396 new_vma = vm_area_dup(vma);
3402 if (vma_dup_policy(vma, new_vma))
3404 if (anon_vma_clone(new_vma, vma))
3467 * Having a close hook prevents vma merging regardless of flags.
3469 static void special_mapping_close(struct vm_area_struct *vma)
3473 static const char *special_mapping_name(struct vm_area_struct *vma)
3475 return ((struct vm_special_mapping *)vma->vm_private_data)->name;
3507 struct vm_area_struct *vma = vmf->vma;
3511 if (vma->vm_ops == &legacy_special_mapping_vmops) {
3512 pages = vma->vm_private_data;
3514 struct vm_special_mapping *sm = vma->vm_private_data;
3517 return sm->fault(sm, vmf->vma, vmf);
3542 struct vm_area_struct *vma;
3544 vma = vm_area_alloc(mm);
3545 if (unlikely(vma == NULL))
3548 vma->vm_start = addr;
3549 vma->vm_end = addr + len;
3551 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND | VM_SOFTDIRTY;
3552 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
3554 vma->vm_ops = ops;
3555 vma->vm_private_data = priv;
3557 ret = insert_vm_struct(mm, vma);
3561 vm_stat_account(mm, vma->vm_flags, len >> PAGE_SHIFT);
3563 perf_event_mmap(vma);
3565 return vma;
3568 vm_area_free(vma);
3572 bool vma_is_special_mapping(const struct vm_area_struct *vma,
3575 return vma->vm_private_data == sm &&
3576 (vma->vm_ops == &special_mapping_vmops ||
3577 vma->vm_ops == &legacy_special_mapping_vmops);
3582 * Insert a new vma covering the given region, with the given flags.
3602 struct vm_area_struct *vma = __install_special_mapping(
3606 return PTR_ERR_OR_ZERO(vma);
3621 * anon_vma->root->rwsem. If some other vma in this mm shares
3653 * This operation locks against the VM for all pte/vma/mm related
3663 * altering the vma layout. It's also needed in write mode to avoid new
3671 * vma in this mm is backed by the same anon_vma or address_space.
3691 struct vm_area_struct *vma;
3698 for (vma = mm->mmap; vma; vma = vma->vm_next) {
3701 if (vma->vm_file && vma->vm_file->f_mapping &&
3702 is_vm_hugetlb_page(vma))
3703 vm_lock_mapping(mm, vma->vm_file->f_mapping);
3706 for (vma = mm->mmap; vma; vma = vma->vm_next) {
3709 if (vma->vm_file && vma->vm_file->f_mapping &&
3710 !is_vm_hugetlb_page(vma))
3711 vm_lock_mapping(mm, vma->vm_file->f_mapping);
3714 for (vma = mm->mmap; vma; vma = vma->vm_next) {
3717 if (vma->anon_vma)
3718 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
3737 * the vma so the users using the anon_vma->rb_root will
3771 struct vm_area_struct *vma;
3777 for (vma = mm->mmap; vma; vma = vma->vm_next) {
3778 if (vma->anon_vma)
3779 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
3781 if (vma->vm_file && vma->vm_file->f_mapping)
3782 vm_unlock_mapping(vma->vm_file->f_mapping);