Lines Matching refs:vma

59 static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
84 static void take_rmap_locks(struct vm_area_struct *vma)
86 if (vma->vm_file)
87 i_mmap_lock_write(vma->vm_file->f_mapping);
88 if (vma->anon_vma)
89 anon_vma_lock_write(vma->anon_vma);
92 static void drop_rmap_locks(struct vm_area_struct *vma)
94 if (vma->anon_vma)
95 anon_vma_unlock_write(vma->anon_vma);
96 if (vma->vm_file)
97 i_mmap_unlock_write(vma->vm_file->f_mapping);
115 static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
120 struct mm_struct *mm = vma->vm_mm;
135 * - During exec() shift_arg_pages(), we use a specially tagged vma
138 * - During mremap(), new_vma is often known to be placed after vma
145 take_rmap_locks(vma);
156 flush_tlb_batched_pending(vma->vm_mm);
185 flush_tlb_range(vma, old_end - len, old_end);
191 drop_rmap_locks(vma);
195 static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr,
199 struct mm_struct *mm = vma->vm_mm;
232 old_ptl = pmd_lock(vma->vm_mm, old_pmd);
245 flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE);
254 unsigned long move_page_tables(struct vm_area_struct *vma,
267 flush_cache_range(vma, old_addr, old_end);
269 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, vma->vm_mm,
283 old_pmd = get_old_pmd(vma->vm_mm, old_addr);
286 new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr);
294 take_rmap_locks(vma);
295 moved = move_huge_pmd(vma, old_addr, new_addr,
298 drop_rmap_locks(vma);
302 split_huge_pmd(vma, old_pmd, old_addr);
313 take_rmap_locks(vma);
314 moved = move_normal_pmd(vma, old_addr, new_addr,
316 drop_rmap_locks(vma);
324 move_ptes(vma, old_pmd, old_addr, old_addr + extent, new_vma,
333 static unsigned long move_vma(struct vm_area_struct *vma,
339 struct mm_struct *mm = vma->vm_mm;
341 unsigned long vm_flags = vma->vm_flags;
352 * which may split one vma into three before unmapping.
361 * pages recently unmapped. But leave vma->vm_flags as it was,
362 * so KSM can come around to merge on vma and new_vma afterwards.
364 err = ksm_madvise(vma, old_addr, old_addr + old_len,
369 new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT);
370 new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff,
375 moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len,
379 } else if (vma->vm_ops && vma->vm_ops->mremap) {
380 err = vma->vm_ops->mremap(new_vma);
389 move_page_tables(new_vma, new_addr, vma, old_addr, moved_len,
391 vma = new_vma;
403 vma->vm_flags &= ~VM_ACCOUNT;
404 excess = vma->vm_end - vma->vm_start - old_len;
405 if (old_addr > vma->vm_start &&
406 old_addr + old_len < vma->vm_end)
420 vm_stat_account(mm, vma->vm_flags, new_len >> PAGE_SHIFT);
422 /* Tell pfnmap has moved from this vma */
423 if (unlikely(vma->vm_flags & VM_PFNMAP))
424 untrack_pfn_moved(vma);
429 vma->vm_flags |= VM_ACCOUNT;
442 if (split && new_vma == vma)
445 /* We always clear VM_LOCKED[ONFAULT] on the old vma */
446 vma->vm_flags &= VM_LOCKED_CLEAR_MASK;
453 /* OOM: unable to split vma, just get accounts right */
465 /* Restore VM_ACCOUNT if one or two pieces of vma left */
467 vma->vm_flags |= VM_ACCOUNT;
469 vma->vm_next->vm_flags |= VM_ACCOUNT;
480 struct vm_area_struct *vma = find_vma(mm, addr);
483 if (!vma || vma->vm_start > addr)
494 if (!old_len && !(vma->vm_flags & (VM_SHARED | VM_MAYSHARE))) {
499 if (flags & MREMAP_DONTUNMAP && (!vma_is_anonymous(vma) ||
500 vma->vm_flags & VM_SHARED))
503 if (is_vm_hugetlb_page(vma))
507 if (old_len > vma->vm_end - addr)
511 return vma;
514 pgoff = (addr - vma->vm_start) >> PAGE_SHIFT;
515 pgoff += vma->vm_pgoff;
519 if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP))
522 if (vma->vm_flags & VM_LOCKED) {
531 if (!may_expand_vm(mm, vma->vm_flags,
535 if (vma->vm_flags & VM_ACCOUNT) {
542 return vma;
552 struct vm_area_struct *vma;
572 * state of the vma's after it gets -ENOMEM.
575 * Worst-scenario case is when both vma's (new_addr and old_addr) get
597 vma = vma_to_resize(addr, old_len, new_len, flags, &charged);
598 if (IS_ERR(vma)) {
599 ret = PTR_ERR(vma);
605 !may_expand_vm(mm, vma->vm_flags, old_len >> PAGE_SHIFT)) {
613 if (vma->vm_flags & VM_MAYSHARE)
616 ret = get_unmapped_area(vma->vm_file, new_addr, new_len, vma->vm_pgoff +
617 ((addr - vma->vm_start) >> PAGE_SHIFT),
626 ret = move_vma(vma, addr, old_len, new_len, new_addr, locked, flags, uf,
639 static int vma_expandable(struct vm_area_struct *vma, unsigned long delta)
641 unsigned long end = vma->vm_end + delta;
642 if (end < vma->vm_end) /* overflow */
644 if (vma->vm_next && vma->vm_next->vm_start < end) /* intersection */
646 if (get_unmapped_area(NULL, vma->vm_start, end - vma->vm_start,
664 struct vm_area_struct *vma;
748 vma = vma_to_resize(addr, old_len, new_len, flags, &charged);
749 if (IS_ERR(vma)) {
750 ret = PTR_ERR(vma);
756 if (old_len == vma->vm_end - addr) {
758 if (vma_expandable(vma, new_len - old_len)) {
761 if (vma_adjust(vma, vma->vm_start, addr + new_len,
762 vma->vm_pgoff, NULL)) {
767 vm_stat_account(mm, vma->vm_flags, pages);
768 if (vma->vm_flags & VM_LOCKED) {
785 if (vma->vm_flags & VM_MAYSHARE)
788 new_addr = get_unmapped_area(vma->vm_file, 0, new_len,
789 vma->vm_pgoff +
790 ((addr - vma->vm_start) >> PAGE_SHIFT),
797 ret = move_vma(vma, addr, old_len, new_len, new_addr,