Lines Matching refs:vma

46  * Any behaviour which results in changes to the vma->vm_flags needs to
94 struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma)
96 mmap_assert_locked(vma->vm_mm);
98 return vma->anon_name;
102 static int replace_anon_vma_name(struct vm_area_struct *vma,
105 struct anon_vma_name *orig_name = anon_vma_name(vma);
108 vma->anon_name = NULL;
116 vma->anon_name = anon_vma_name_reuse(anon_name);
122 static int replace_anon_vma_name(struct vm_area_struct *vma,
132 * Update the vm_flags on region of a vma, splitting it or merging it as
135 * anon_name belongs to a valid vma because this function might free that vma.
137 static int madvise_update_vma(struct vm_area_struct *vma,
142 struct mm_struct *mm = vma->vm_mm;
147 if (new_flags == vma->vm_flags && anon_vma_name_eq(anon_vma_name(vma), anon_name)) {
148 *prev = vma;
152 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
154 vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma),
155 vma->vm_userfaultfd_ctx, anon_name);
157 vma = *prev;
161 *prev = vma;
163 if (start != vma->vm_start) {
164 error = split_vma(&vmi, vma, start, 1);
169 if (end != vma->vm_end) {
170 error = split_vma(&vmi, vma, end, 0);
177 vma_start_write(vma);
178 vm_flags_reset(vma, new_flags);
179 if (!vma->vm_file || vma_is_anon_shmem(vma)) {
180 error = replace_anon_vma_name(vma, anon_name);
192 struct vm_area_struct *vma = walk->private;
204 ptep = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
220 vma, addr, &splug);
238 static void shmem_swapin_range(struct vm_area_struct *vma,
242 XA_STATE(xas, &mapping->i_pages, linear_page_index(vma, start));
243 pgoff_t end_index = linear_page_index(vma, end) - 1;
259 addr = vma->vm_start +
260 ((xas.xa_index - vma->vm_pgoff) << PAGE_SHIFT);
265 vma, addr, &splug);
279 static long madvise_willneed(struct vm_area_struct *vma,
283 struct mm_struct *mm = vma->vm_mm;
284 struct file *file = vma->vm_file;
287 *prev = vma;
290 walk_page_range(vma->vm_mm, start, end, &swapin_walk_ops, vma);
296 shmem_swapin_range(vma, start, end, file->f_mapping);
312 * explicitly grab a reference because the vma (and hence the
313 * vma's reference to the file) can go away as soon as we drop
318 offset = (loff_t)(start - vma->vm_start)
319 + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
327 static inline bool can_do_file_pageout(struct vm_area_struct *vma)
329 if (!vma->vm_file)
338 file_inode(vma->vm_file)) ||
339 file_permission(vma->vm_file, MAY_WRITE) == 0;
350 struct vm_area_struct *vma = walk->vma;
360 pageout_anon_only_filter = pageout && !vma_is_anonymous(vma) &&
361 !can_do_file_pageout(vma);
369 ptl = pmd_trans_huge_lock(pmd, vma);
407 pmdp_invalidate(vma, addr, pmd);
437 start_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
451 folio = vm_normal_folio(vma, addr, ptent);
545 struct vm_area_struct *vma,
553 tlb_start_vma(tlb, vma);
554 walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private);
555 tlb_end_vma(tlb, vma);
558 static inline bool can_madv_lru_vma(struct vm_area_struct *vma)
560 return !(vma->vm_flags & (VM_LOCKED|VM_PFNMAP|VM_HUGETLB));
563 static long madvise_cold(struct vm_area_struct *vma,
567 struct mm_struct *mm = vma->vm_mm;
570 *prev = vma;
571 if (!can_madv_lru_vma(vma))
576 madvise_cold_page_range(&tlb, vma, start_addr, end_addr);
583 struct vm_area_struct *vma,
591 tlb_start_vma(tlb, vma);
592 walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private);
593 tlb_end_vma(tlb, vma);
596 static long madvise_pageout(struct vm_area_struct *vma,
600 struct mm_struct *mm = vma->vm_mm;
603 *prev = vma;
604 if (!can_madv_lru_vma(vma))
613 if (!vma_is_anonymous(vma) && (!can_do_file_pageout(vma) &&
614 (vma->vm_flags & VM_MAYSHARE)))
619 madvise_pageout_page_range(&tlb, vma, start_addr, end_addr);
631 struct vm_area_struct *vma = walk->vma;
640 if (madvise_free_huge_pmd(tlb, vma, pmd, addr, next))
674 folio = vm_normal_folio(vma, addr, ptent);
768 static int madvise_free_single_vma(struct vm_area_struct *vma,
771 struct mm_struct *mm = vma->vm_mm;
775 /* MADV_FREE works for only anon vma at the moment */
776 if (!vma_is_anonymous(vma))
779 range.start = max(vma->vm_start, start_addr);
780 if (range.start >= vma->vm_end)
782 range.end = min(vma->vm_end, end_addr);
783 if (range.end <= vma->vm_start)
793 tlb_start_vma(&tlb, vma);
794 walk_page_range(vma->vm_mm, range.start, range.end,
796 tlb_end_vma(&tlb, vma);
822 static long madvise_dontneed_single_vma(struct vm_area_struct *vma,
825 zap_page_range_single(vma, start, end - start, NULL);
829 static bool madvise_dontneed_free_valid_vma(struct vm_area_struct *vma,
834 if (!is_vm_hugetlb_page(vma)) {
840 return !(vma->vm_flags & forbidden);
845 if (start & ~huge_page_mask(hstate_vma(vma)))
854 *end = ALIGN_DOWN(*end, huge_page_size(hstate_vma(vma)));
859 static long madvise_dontneed_free(struct vm_area_struct *vma,
864 struct mm_struct *mm = vma->vm_mm;
866 *prev = vma;
867 if (!madvise_dontneed_free_valid_vma(vma, start, &end, behavior))
873 if (!userfaultfd_remove(vma, start, end)) {
877 vma = vma_lookup(mm, start);
878 if (!vma)
881 * Potential end adjustment for hugetlb vma is OK as
882 * the check below keeps end within vma.
884 if (!madvise_dontneed_free_valid_vma(vma, start, &end,
887 if (end > vma->vm_end) {
889 * Don't fail if end > vma->vm_end. If the old
890 * vma was split while the mmap_lock was
894 * adjacent next vma that we'll walk
897 * end-vma->vm_end range, but the manager can
900 end = vma->vm_end;
906 return madvise_dontneed_single_vma(vma, start, end);
908 return madvise_free_single_vma(vma, start, end);
913 static long madvise_populate(struct vm_area_struct *vma,
919 struct mm_struct *mm = vma->vm_mm;
924 *prev = vma;
931 if (!vma || start >= vma->vm_end) {
932 vma = vma_lookup(mm, start);
933 if (!vma)
937 tmp_end = min_t(unsigned long, end, vma->vm_end);
939 pages = faultin_vma_page_range(vma, start, tmp_end, write,
945 vma = NULL;
974 static long madvise_remove(struct vm_area_struct *vma,
981 struct mm_struct *mm = vma->vm_mm;
985 if (vma->vm_flags & VM_LOCKED)
988 f = vma->vm_file;
994 if ((vma->vm_flags & (VM_SHARED|VM_WRITE)) != (VM_SHARED|VM_WRITE))
997 offset = (loff_t)(start - vma->vm_start)
998 + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
1002 * explicitly grab a reference because the vma (and hence the
1003 * vma's reference to the file) can go away as soon as we drop
1007 if (userfaultfd_remove(vma, start, end)) {
1020 * Apply an madvise behavior to a region of a vma. madvise_update_vma
1024 static int madvise_vma_behavior(struct vm_area_struct *vma,
1031 unsigned long new_flags = vma->vm_flags;
1035 return madvise_remove(vma, prev, start, end);
1037 return madvise_willneed(vma, prev, start, end);
1039 return madvise_cold(vma, prev, start, end);
1041 return madvise_pageout(vma, prev, start, end);
1045 return madvise_dontneed_free(vma, prev, start, end, behavior);
1048 return madvise_populate(vma, prev, start, end, behavior);
1062 if (vma->vm_flags & VM_IO)
1068 if (vma->vm_file || vma->vm_flags & VM_SHARED)
1079 if (!is_vm_hugetlb_page(vma) && new_flags & VM_SPECIAL)
1085 error = ksm_madvise(vma, start, end, behavior, &new_flags);
1091 error = hugepage_madvise(vma, &new_flags, behavior);
1096 return madvise_collapse(vma, prev, start, end);
1099 anon_name = anon_vma_name(vma);
1101 error = madvise_update_vma(vma, prev, start, end, new_flags,
1223 * between the current vma and the original range. Any unmapped regions in the
1231 int (*visit)(struct vm_area_struct *vma,
1235 struct vm_area_struct *vma;
1245 vma = find_vma_prev(mm, start, &prev);
1246 if (vma && start > vma->vm_start)
1247 prev = vma;
1253 if (!vma)
1256 /* Here start < (end|vma->vm_end). */
1257 if (start < vma->vm_start) {
1259 start = vma->vm_start;
1264 /* Here vma->vm_start <= start < (end|vma->vm_end) */
1265 tmp = vma->vm_end;
1269 /* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */
1270 error = visit(vma, &prev, start, tmp, arg);
1279 vma = find_vma(mm, prev->vm_end);
1281 vma = find_vma(mm, start);
1288 static int madvise_vma_anon_name(struct vm_area_struct *vma,
1296 if (vma->vm_file && !vma_is_anon_shmem(vma))
1299 error = madvise_update_vma(vma, prev, start, end, vma->vm_flags,