Lines Matching refs:vma
43 bool can_change_pte_writable(struct vm_area_struct *vma, unsigned long addr,
48 if (WARN_ON_ONCE(!(vma->vm_flags & VM_WRITE)))
56 if (vma_soft_dirty_enabled(vma) && !pte_soft_dirty(pte))
60 if (userfaultfd_pte_wp(vma, pte))
63 if (!(vma->vm_flags & VM_SHARED)) {
70 page = vm_normal_page(vma, addr, pte);
85 struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr,
97 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
102 if (prot_numa && !(vma->vm_flags & VM_SHARED) &&
103 atomic_read(&vma->vm_mm->mm_users) == 1)
106 flush_tlb_batched_pending(vma->vm_mm);
126 page = vm_normal_page(vma, addr, oldpte);
131 if (is_cow_mapping(vma->vm_flags) &&
165 oldpte = ptep_modify_prot_start(vma, addr, pte);
188 can_change_pte_writable(vma, addr, ptent))
189 ptent = pte_mkwrite(ptent, vma);
191 ptep_modify_prot_commit(vma, addr, pte, oldpte, ptent);
245 pte_clear(vma->vm_mm, addr, pte);
259 set_pte_at(vma->vm_mm, addr, pte, newpte);
273 if (userfaultfd_wp_use_markers(vma)) {
280 set_pte_at(vma->vm_mm, addr, pte,
297 pgtable_split_needed(struct vm_area_struct *vma, unsigned long cp_flags)
304 return (cp_flags & MM_CP_UFFD_WP) && !vma_is_anonymous(vma);
312 pgtable_populate_needed(struct vm_area_struct *vma, unsigned long cp_flags)
319 return userfaultfd_wp_use_markers(vma);
328 #define change_pmd_prepare(vma, pmd, cp_flags) \
331 if (unlikely(pgtable_populate_needed(vma, cp_flags))) { \
332 if (pte_alloc(vma->vm_mm, pmd)) \
343 #define change_prepare(vma, high, low, addr, cp_flags) \
346 if (unlikely(pgtable_populate_needed(vma, cp_flags))) { \
347 low##_t *p = low##_alloc(vma->vm_mm, high, addr); \
355 struct vm_area_struct *vma, pud_t *pud, unsigned long addr,
373 ret = change_pmd_prepare(vma, pmd, cp_flags);
386 vma->vm_mm, addr, end);
393 pgtable_split_needed(vma, cp_flags)) {
394 __split_huge_pmd(vma, pmd, addr, false, NULL);
400 ret = change_pmd_prepare(vma, pmd, cp_flags);
406 ret = change_huge_pmd(tlb, vma, pmd,
421 ret = change_pte_range(tlb, vma, pmd, addr, next, newprot,
439 struct vm_area_struct *vma, p4d_t *p4d, unsigned long addr,
449 ret = change_prepare(vma, pud, pmd, addr, cp_flags);
454 pages += change_pmd_range(tlb, vma, pud, addr, next, newprot,
462 struct vm_area_struct *vma, pgd_t *pgd, unsigned long addr,
472 ret = change_prepare(vma, p4d, pud, addr, cp_flags);
477 pages += change_pud_range(tlb, vma, p4d, addr, next, newprot,
485 struct vm_area_struct *vma, unsigned long addr,
488 struct mm_struct *mm = vma->vm_mm;
495 tlb_start_vma(tlb, vma);
498 ret = change_prepare(vma, pgd, p4d, addr, cp_flags);
505 pages += change_p4d_range(tlb, vma, pgd, addr, next, newprot,
509 tlb_end_vma(tlb, vma);
515 struct vm_area_struct *vma, unsigned long start,
518 pgprot_t newprot = vma->vm_page_prot;
527 * vma_set_page_prot() will adjust vma->vm_page_prot accordingly.
535 if (is_vm_hugetlb_page(vma))
536 pages = hugetlb_change_protection(vma, start, end, newprot,
539 pages = change_protection_range(tlb, vma, start, end, newprot,
577 struct vm_area_struct *vma, struct vm_area_struct **pprev,
580 struct mm_struct *mm = vma->vm_mm;
581 unsigned long oldflags = vma->vm_flags;
589 *pprev = vma;
599 (vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
630 * First try to merge with previous and/or next vma.
632 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
634 vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma),
635 vma->vm_userfaultfd_ctx, anon_vma_name(vma));
637 vma = *pprev;
638 VM_WARN_ON((vma->vm_flags ^ newflags) & ~VM_SOFTDIRTY);
642 *pprev = vma;
644 if (start != vma->vm_start) {
645 error = split_vma(vmi, vma, start, 1);
650 if (end != vma->vm_end) {
651 error = split_vma(vmi, vma, end, 0);
661 vma_start_write(vma);
662 vm_flags_reset(vma, newflags);
663 if (vma_wants_manual_pte_write_upgrade(vma))
665 vma_set_page_prot(vma);
667 change_protection(tlb, vma, start, end, mm_cp_flags);
675 populate_vma_page_range(vma, start, end, NULL);
680 perf_event_mmap(vma);
695 struct vm_area_struct *vma, *prev;
742 vma = vma_find(&vmi, end);
744 if (!vma)
748 if (vma->vm_start >= end)
750 start = vma->vm_start;
752 if (!(vma->vm_flags & VM_GROWSDOWN))
755 if (vma->vm_start > start)
758 end = vma->vm_end;
760 if (!(vma->vm_flags & VM_GROWSUP))
766 if (start > vma->vm_start)
767 prev = vma;
771 tmp = vma->vm_start;
772 for_each_vma_range(vmi, vma, end) {
777 if (vma->vm_start != tmp) {
783 if (rier && (vma->vm_flags & VM_MAYEXEC))
793 new_vma_pkey = arch_override_mprotect_pkey(vma, prot, pkey);
795 newflags |= (vma->vm_flags & ~mask_off_old_flags);
803 if (map_deny_write_exec(vma, newflags)) {
814 error = security_file_mprotect(vma, reqprot, prot);
818 tmp = vma->vm_end;
822 if (vma->vm_ops && vma->vm_ops->mprotect) {
823 error = vma->vm_ops->mprotect(vma, nstart, tmp, newflags);
828 error = mprotect_fixup(&vmi, &tlb, vma, &prev, nstart, tmp, newflags);