Lines Matching refs:vma
72 static pud_t *alloc_new_pud(struct mm_struct *mm, struct vm_area_struct *vma,
86 static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
92 pud = alloc_new_pud(mm, vma, addr);
105 static void take_rmap_locks(struct vm_area_struct *vma)
107 if (vma->vm_file)
108 i_mmap_lock_write(vma->vm_file->f_mapping);
109 if (vma->anon_vma)
110 anon_vma_lock_write(vma->anon_vma);
113 static void drop_rmap_locks(struct vm_area_struct *vma)
115 if (vma->anon_vma)
116 anon_vma_unlock_write(vma->anon_vma);
117 if (vma->vm_file)
118 i_mmap_unlock_write(vma->vm_file->f_mapping);
136 static int move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
141 struct mm_struct *mm = vma->vm_mm;
157 * - During exec() shift_arg_pages(), we use a specially tagged vma
160 * - During mremap(), new_vma is often known to be placed after vma
167 take_rmap_locks(vma);
186 flush_tlb_batched_pending(vma->vm_mm);
215 flush_tlb_range(vma, old_end - len, old_end);
222 drop_rmap_locks(vma);
236 static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr,
240 struct mm_struct *mm = vma->vm_mm;
275 old_ptl = pmd_lock(vma->vm_mm, old_pmd);
287 flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE);
295 static inline bool move_normal_pmd(struct vm_area_struct *vma,
304 static bool move_normal_pud(struct vm_area_struct *vma, unsigned long old_addr,
308 struct mm_struct *mm = vma->vm_mm;
324 old_ptl = pud_lock(vma->vm_mm, old_pud);
336 flush_tlb_range(vma, old_addr, old_addr + PUD_SIZE);
344 static inline bool move_normal_pud(struct vm_area_struct *vma,
353 static bool move_huge_pud(struct vm_area_struct *vma, unsigned long old_addr,
357 struct mm_struct *mm = vma->vm_mm;
371 old_ptl = pud_lock(vma->vm_mm, old_pud);
385 flush_pud_tlb_range(vma, old_addr, old_addr + HPAGE_PUD_SIZE);
393 static bool move_huge_pud(struct vm_area_struct *vma, unsigned long old_addr,
451 static bool move_pgt_entry(enum pgt_entry entry, struct vm_area_struct *vma,
459 take_rmap_locks(vma);
463 moved = move_normal_pmd(vma, old_addr, new_addr, old_entry,
467 moved = move_normal_pud(vma, old_addr, new_addr, old_entry,
472 move_huge_pmd(vma, old_addr, new_addr, old_entry,
477 move_huge_pud(vma, old_addr, new_addr, old_entry,
487 drop_rmap_locks(vma);
492 unsigned long move_page_tables(struct vm_area_struct *vma,
507 if (is_vm_hugetlb_page(vma))
508 return move_hugetlb_page_tables(vma, new_vma, old_addr,
511 flush_cache_range(vma, old_addr, old_end);
512 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma->vm_mm,
524 old_pud = get_old_pud(vma->vm_mm, old_addr);
527 new_pud = alloc_new_pud(vma->vm_mm, vma, new_addr);
532 move_pgt_entry(HPAGE_PUD, vma, old_addr, new_addr,
539 if (move_pgt_entry(NORMAL_PUD, vma, old_addr, new_addr,
545 old_pmd = get_old_pmd(vma->vm_mm, old_addr);
548 new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr);
555 move_pgt_entry(HPAGE_PMD, vma, old_addr, new_addr,
558 split_huge_pmd(vma, old_pmd, old_addr);
565 if (move_pgt_entry(NORMAL_PMD, vma, old_addr, new_addr,
573 if (move_ptes(vma, old_pmd, old_addr, old_addr + extent,
583 static unsigned long move_vma(struct vm_area_struct *vma,
590 struct mm_struct *mm = vma->vm_mm;
592 unsigned long vm_flags = vma->vm_flags;
604 * which may split one vma into three before unmapping.
612 if (vma->vm_ops && vma->vm_ops->may_split) {
613 if (vma->vm_start != old_addr)
614 err = vma->vm_ops->may_split(vma, old_addr);
615 if (!err && vma->vm_end != old_addr + old_len)
616 err = vma->vm_ops->may_split(vma, old_addr + old_len);
625 * pages recently unmapped. But leave vma->vm_flags as it was,
626 * so KSM can come around to merge on vma and new_vma afterwards.
628 err = ksm_madvise(vma, old_addr, old_addr + old_len,
638 vma_start_write(vma);
639 new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT);
640 new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff,
648 moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len,
652 } else if (vma->vm_ops && vma->vm_ops->mremap) {
653 err = vma->vm_ops->mremap(new_vma);
662 move_page_tables(new_vma, new_addr, vma, old_addr, moved_len,
664 vma = new_vma;
672 if (is_vm_hugetlb_page(vma)) {
673 clear_vma_resv_huge_pages(vma);
678 vm_flags_clear(vma, VM_ACCOUNT);
679 if (vma->vm_start < old_addr)
680 account_start = vma->vm_start;
681 if (vma->vm_end > old_addr + old_len)
682 account_end = vma->vm_end;
695 vm_stat_account(mm, vma->vm_flags, new_len >> PAGE_SHIFT);
697 /* Tell pfnmap has moved from this vma */
698 if (unlikely(vma->vm_flags & VM_PFNMAP))
699 untrack_pfn_clear(vma);
702 /* We always clear VM_LOCKED[ONFAULT] on the old vma */
703 vm_flags_clear(vma, VM_LOCKED_MASK);
706 * anon_vma links of the old vma is no longer needed after its page
709 if (new_vma != vma && vma->vm_start == old_addr &&
710 vma->vm_end == (old_addr + old_len))
711 unlink_anon_vmas(vma);
719 /* OOM: unable to split vma, just get accounts right */
732 /* Restore VM_ACCOUNT if one or two pieces of vma left */
734 vma = vma_prev(&vmi);
735 vm_flags_set(vma, VM_ACCOUNT);
739 vma = vma_next(&vmi);
740 vm_flags_set(vma, VM_ACCOUNT);
750 struct vm_area_struct *vma;
753 vma = vma_lookup(mm, addr);
754 if (!vma)
765 if (!old_len && !(vma->vm_flags & (VM_SHARED | VM_MAYSHARE))) {
771 (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)))
775 if (old_len > vma->vm_end - addr)
779 return vma;
782 pgoff = (addr - vma->vm_start) >> PAGE_SHIFT;
783 pgoff += vma->vm_pgoff;
787 if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP))
790 if (!mlock_future_ok(mm, vma->vm_flags, new_len - old_len))
793 if (!may_expand_vm(mm, vma->vm_flags,
797 return vma;
807 struct vm_area_struct *vma;
826 * state of the vma's after it gets -ENOMEM.
829 * Worst-scenario case is when both vma's (new_addr and old_addr) get
851 vma = vma_to_resize(addr, old_len, new_len, flags);
852 if (IS_ERR(vma)) {
853 ret = PTR_ERR(vma);
859 !may_expand_vm(mm, vma->vm_flags, old_len >> PAGE_SHIFT)) {
867 if (vma->vm_flags & VM_MAYSHARE)
870 ret = get_unmapped_area(vma->vm_file, new_addr, new_len, vma->vm_pgoff +
871 ((addr - vma->vm_start) >> PAGE_SHIFT),
880 ret = move_vma(vma, addr, old_len, new_len, new_addr, locked, flags, uf,
887 static int vma_expandable(struct vm_area_struct *vma, unsigned long delta)
889 unsigned long end = vma->vm_end + delta;
891 if (end < vma->vm_end) /* overflow */
893 if (find_vma_intersection(vma->vm_mm, vma->vm_end, end))
895 if (get_unmapped_area(NULL, vma->vm_start, end - vma->vm_start,
913 struct vm_area_struct *vma;
964 vma = vma_lookup(mm, addr);
965 if (!vma) {
970 if (is_vm_hugetlb_page(vma)) {
971 struct hstate *h __maybe_unused = hstate_vma(vma);
1023 vma = vma_to_resize(addr, old_len, new_len, flags);
1024 if (IS_ERR(vma)) {
1025 ret = PTR_ERR(vma);
1031 if (old_len == vma->vm_end - addr) {
1033 if (vma_expandable(vma, new_len - old_len)) {
1037 pgoff_t extension_pgoff = vma->vm_pgoff +
1038 ((extension_start - vma->vm_start) >> PAGE_SHIFT);
1041 if (vma->vm_flags & VM_ACCOUNT) {
1050 * are adding to the already existing vma, vma_merge()
1052 * vma (expand operation itself) and possibly also with
1053 * the next vma if it becomes adjacent to the expanded
1054 * vma and otherwise compatible.
1056 vma = vma_merge(&vmi, mm, vma, extension_start,
1057 extension_end, vma->vm_flags, vma->anon_vma,
1058 vma->vm_file, extension_pgoff, vma_policy(vma),
1059 vma->vm_userfaultfd_ctx, anon_vma_name(vma));
1060 if (!vma) {
1066 vm_stat_account(mm, vma->vm_flags, pages);
1067 if (vma->vm_flags & VM_LOCKED) {
1084 if (vma->vm_flags & VM_MAYSHARE)
1087 new_addr = get_unmapped_area(vma->vm_file, 0, new_len,
1088 vma->vm_pgoff +
1089 ((addr - vma->vm_start) >> PAGE_SHIFT),
1096 ret = move_vma(vma, addr, old_len, new_len, new_addr,