Lines Matching refs:vmf

108 static vm_fault_t do_fault(struct vm_fault *vmf);
109 static vm_fault_t do_anonymous_page(struct vm_fault *vmf);
110 static bool vmf_pte_changed(struct vm_fault *vmf);
116 static bool vmf_orig_pte_uffd_wp(struct vm_fault *vmf)
118 if (!(vmf->flags & FAULT_FLAG_ORIG_PTE_VALID))
121 return pte_marker_uffd_wp(vmf->orig_pte);
2793 static inline int pte_unmap_same(struct vm_fault *vmf)
2798 spin_lock(vmf->ptl);
2799 same = pte_same(ptep_get(vmf->pte), vmf->orig_pte);
2800 spin_unlock(vmf->ptl);
2803 pte_unmap(vmf->pte);
2804 vmf->pte = NULL;
2815 struct vm_fault *vmf)
2820 struct vm_area_struct *vma = vmf->vma;
2822 unsigned long addr = vmf->address;
2845 vmf->pte = NULL;
2846 if (!arch_has_hw_pte_young() && !pte_young(vmf->orig_pte)) {
2849 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
2850 if (unlikely(!vmf->pte || !pte_same(ptep_get(vmf->pte), vmf->orig_pte))) {
2855 if (vmf->pte)
2856 update_mmu_tlb(vma, addr, vmf->pte);
2861 entry = pte_mkyoung(vmf->orig_pte);
2862 if (ptep_set_access_flags(vma, addr, vmf->pte, entry, 0))
2863 update_mmu_cache_range(vmf, vma, addr, vmf->pte, 1);
2873 if (vmf->pte)
2877 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
2878 if (unlikely(!vmf->pte || !pte_same(ptep_get(vmf->pte), vmf->orig_pte))) {
2880 if (vmf->pte)
2881 update_mmu_tlb(vma, addr, vmf->pte);
2904 if (vmf->pte)
2905 pte_unmap_unlock(vmf->pte, vmf->ptl);
2932 static vm_fault_t do_page_mkwrite(struct vm_fault *vmf, struct folio *folio)
2935 unsigned int old_flags = vmf->flags;
2937 vmf->flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE;
2939 if (vmf->vma->vm_file &&
2940 IS_SWAPFILE(vmf->vma->vm_file->f_mapping->host))
2943 ret = vmf->vma->vm_ops->page_mkwrite(vmf);
2945 vmf->flags = old_flags;
2965 static vm_fault_t fault_dirty_shared_page(struct vm_fault *vmf)
2967 struct vm_area_struct *vma = vmf->vma;
2969 struct folio *folio = page_folio(vmf->page);
2999 fpin = maybe_unlock_mmap_for_io(vmf, NULL);
3018 static inline void wp_page_reuse(struct vm_fault *vmf)
3019 __releases(vmf->ptl)
3021 struct vm_area_struct *vma = vmf->vma;
3022 struct page *page = vmf->page;
3025 VM_BUG_ON(!(vmf->flags & FAULT_FLAG_WRITE));
3036 flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
3037 entry = pte_mkyoung(vmf->orig_pte);
3039 if (ptep_set_access_flags(vma, vmf->address, vmf->pte, entry, 1))
3040 update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1);
3041 pte_unmap_unlock(vmf->pte, vmf->ptl);
3062 static vm_fault_t wp_page_copy(struct vm_fault *vmf)
3064 const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
3065 struct vm_area_struct *vma = vmf->vma;
3076 if (vmf->page)
3077 old_folio = page_folio(vmf->page);
3081 if (is_zero_pfn(pte_pfn(vmf->orig_pte))) {
3082 new_folio = vma_alloc_zeroed_movable_folio(vma, vmf->address);
3087 vmf->address, false);
3091 ret = __wp_page_copy_user(&new_folio->page, vmf->page, vmf);
3107 kmsan_copy_page_meta(&new_folio->page, vmf->page);
3117 vmf->address & PAGE_MASK,
3118 (vmf->address & PAGE_MASK) + PAGE_SIZE);
3124 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, vmf->address, &vmf->ptl);
3125 if (likely(vmf->pte && pte_same(ptep_get(vmf->pte), vmf->orig_pte))) {
3132 ksm_might_unmap_zero_page(mm, vmf->orig_pte);
3135 flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
3139 if (pte_soft_dirty(vmf->orig_pte))
3141 if (pte_uffd_wp(vmf->orig_pte))
3154 ptep_clear_flush(vma, vmf->address, vmf->pte);
3155 folio_add_new_anon_rmap(new_folio, vma, vmf->address);
3160 uxpte_set_present(vma, vmf->address);
3170 set_pte_at_notify(mm, vmf->address, vmf->pte, entry);
3171 update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1);
3195 page_remove_rmap(vmf->page, vma, false);
3201 pte_unmap_unlock(vmf->pte, vmf->ptl);
3202 } else if (vmf->pte) {
3203 update_mmu_tlb(vma, vmf->address, vmf->pte);
3204 pte_unmap_unlock(vmf->pte, vmf->ptl);
3233 * @vmf: structure describing the fault
3245 vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf)
3247 WARN_ON_ONCE(!(vmf->vma->vm_flags & VM_SHARED));
3248 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address,
3249 &vmf->ptl);
3250 if (!vmf->pte)
3256 if (!pte_same(ptep_get(vmf->pte), vmf->orig_pte)) {
3257 update_mmu_tlb(vmf->vma, vmf->address, vmf->pte);
3258 pte_unmap_unlock(vmf->pte, vmf->ptl);
3261 wp_page_reuse(vmf);
3269 static vm_fault_t wp_pfn_shared(struct vm_fault *vmf)
3271 struct vm_area_struct *vma = vmf->vma;
3276 pte_unmap_unlock(vmf->pte, vmf->ptl);
3277 if (vmf->flags & FAULT_FLAG_VMA_LOCK) {
3278 vma_end_read(vmf->vma);
3282 vmf->flags |= FAULT_FLAG_MKWRITE;
3283 ret = vma->vm_ops->pfn_mkwrite(vmf);
3286 return finish_mkwrite_fault(vmf);
3288 wp_page_reuse(vmf);
3292 static vm_fault_t wp_page_shared(struct vm_fault *vmf, struct folio *folio)
3293 __releases(vmf->ptl)
3295 struct vm_area_struct *vma = vmf->vma;
3303 pte_unmap_unlock(vmf->pte, vmf->ptl);
3304 if (vmf->flags & FAULT_FLAG_VMA_LOCK) {
3306 vma_end_read(vmf->vma);
3310 tmp = do_page_mkwrite(vmf, folio);
3316 tmp = finish_mkwrite_fault(vmf);
3323 wp_page_reuse(vmf);
3326 ret |= fault_dirty_shared_page(vmf);
3354 static vm_fault_t do_wp_page(struct vm_fault *vmf)
3355 __releases(vmf->ptl)
3357 const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
3358 struct vm_area_struct *vma = vmf->vma;
3362 if (userfaultfd_pte_wp(vma, ptep_get(vmf->pte))) {
3363 pte_unmap_unlock(vmf->pte, vmf->ptl);
3364 return handle_userfault(vmf, VM_UFFD_WP);
3371 if (unlikely(userfaultfd_wp(vmf->vma) &&
3372 mm_tlb_flush_pending(vmf->vma->vm_mm)))
3373 flush_tlb_page(vmf->vma, vmf->address);
3376 vmf->page = vm_normal_page(vma, vmf->address, vmf->orig_pte);
3378 if (vmf->page)
3379 folio = page_folio(vmf->page);
3393 if (!vmf->page)
3394 return wp_pfn_shared(vmf);
3395 return wp_page_shared(vmf, folio);
3407 if (PageAnonExclusive(vmf->page))
3440 page_move_anon_rmap(vmf->page, vma);
3444 pte_unmap_unlock(vmf->pte, vmf->ptl);
3447 wp_page_reuse(vmf);
3451 if ((vmf->flags & FAULT_FLAG_VMA_LOCK) && !vma->anon_vma) {
3452 pte_unmap_unlock(vmf->pte, vmf->ptl);
3453 vma_end_read(vmf->vma);
3463 pte_unmap_unlock(vmf->pte, vmf->ptl);
3468 return wp_page_copy(vmf);
3602 static vm_fault_t remove_device_exclusive_entry(struct vm_fault *vmf)
3604 struct folio *folio = page_folio(vmf->page);
3605 struct vm_area_struct *vma = vmf->vma;
3620 ret = folio_lock_or_retry(folio, vmf);
3626 vma->vm_mm, vmf->address & PAGE_MASK,
3627 (vmf->address & PAGE_MASK) + PAGE_SIZE, NULL);
3630 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
3631 &vmf->ptl);
3632 if (likely(vmf->pte && pte_same(ptep_get(vmf->pte), vmf->orig_pte)))
3633 restore_exclusive_pte(vma, vmf->page, vmf->address, vmf->pte);
3635 if (vmf->pte)
3636 pte_unmap_unlock(vmf->pte, vmf->ptl);
3663 static vm_fault_t pte_marker_clear(struct vm_fault *vmf)
3665 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd,
3666 vmf->address, &vmf->ptl);
3667 if (!vmf->pte)
3677 if (pte_same(vmf->orig_pte, ptep_get(vmf->pte)))
3678 pte_clear(vmf->vma->vm_mm, vmf->address, vmf->pte);
3679 pte_unmap_unlock(vmf->pte, vmf->ptl);
3683 static vm_fault_t do_pte_missing(struct vm_fault *vmf)
3685 if (vma_is_anonymous(vmf->vma))
3686 return do_anonymous_page(vmf);
3688 return do_fault(vmf);
3695 static vm_fault_t pte_marker_handle_uffd_wp(struct vm_fault *vmf)
3701 if (unlikely(!userfaultfd_wp(vmf->vma)))
3702 return pte_marker_clear(vmf);
3704 return do_pte_missing(vmf);
3707 static vm_fault_t handle_pte_marker(struct vm_fault *vmf)
3709 swp_entry_t entry = pte_to_swp_entry(vmf->orig_pte);
3724 return pte_marker_handle_uffd_wp(vmf);
3738 vm_fault_t do_swap_page(struct vm_fault *vmf)
3740 struct vm_area_struct *vma = vmf->vma;
3752 if (!pte_unmap_same(vmf))
3755 entry = pte_to_swp_entry(vmf->orig_pte);
3758 migration_entry_wait(vma->vm_mm, vmf->pmd,
3759 vmf->address);
3761 vmf->page = pfn_swap_entry_to_page(entry);
3762 ret = remove_device_exclusive_entry(vmf);
3764 if (vmf->flags & FAULT_FLAG_VMA_LOCK) {
3774 vmf->page = pfn_swap_entry_to_page(entry);
3775 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
3776 vmf->address, &vmf->ptl);
3777 if (unlikely(!vmf->pte ||
3778 !pte_same(ptep_get(vmf->pte),
3779 vmf->orig_pte)))
3786 get_page(vmf->page);
3787 pte_unmap_unlock(vmf->pte, vmf->ptl);
3788 ret = vmf->page->pgmap->ops->migrate_to_ram(vmf);
3789 put_page(vmf->page);
3793 ret = handle_pte_marker(vmf);
3795 print_bad_pte(vma, vmf->address, vmf->orig_pte, NULL);
3806 folio = swap_cache_get_folio(entry, vma, vmf->address);
3830 vma, vmf->address, false);
3857 vmf);
3868 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
3869 vmf->address, &vmf->ptl);
3870 if (likely(vmf->pte &&
3871 pte_same(ptep_get(vmf->pte), vmf->orig_pte)))
3889 ret |= folio_lock_or_retry(folio, vmf);
3910 page = ksm_might_need_to_copy(page, vma, vmf->address);
3926 if ((vmf->flags & FAULT_FLAG_WRITE) && folio == swapcache &&
3936 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
3937 &vmf->ptl);
3938 if (unlikely(!vmf->pte || !pte_same(ptep_get(vmf->pte), vmf->orig_pte)))
3962 exclusive = pte_swp_exclusive(vmf->orig_pte);
4006 if (should_try_to_free_swap(folio, vma, vmf->flags))
4021 if (vmf->flags & FAULT_FLAG_WRITE) {
4023 vmf->flags &= ~FAULT_FLAG_WRITE;
4028 if (pte_swp_soft_dirty(vmf->orig_pte))
4030 if (pte_swp_uffd_wp(vmf->orig_pte))
4032 vmf->orig_pte = pte;
4036 page_add_new_anon_rmap(page, vma, vmf->address);
4039 page_add_anon_rmap(page, vma, vmf->address, rmap_flags);
4044 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte);
4045 arch_do_swap_page(vma->vm_mm, vma, vmf->address, pte, vmf->orig_pte);
4061 if (vmf->flags & FAULT_FLAG_WRITE) {
4062 ret |= do_wp_page(vmf);
4069 update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1);
4071 if (vmf->pte)
4072 pte_unmap_unlock(vmf->pte, vmf->ptl);
4081 if (vmf->pte)
4082 pte_unmap_unlock(vmf->pte, vmf->ptl);
4103 static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
4105 bool uffd_wp = vmf_orig_pte_uffd_wp(vmf);
4106 struct vm_area_struct *vma = vmf->vma;
4119 if (pte_alloc(vma->vm_mm, vmf->pmd))
4125 if (do_uxpte_page_fault(vmf, &entry))
4132 if (!(vmf->flags & FAULT_FLAG_WRITE) &&
4134 entry = pte_mkspecial(pfn_pte(my_zero_pfn(vmf->address),
4139 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
4140 vmf->address, &vmf->ptl);
4141 if (!vmf->pte)
4143 if (vmf_pte_changed(vmf)) {
4144 update_mmu_tlb(vma, vmf->address, vmf->pte);
4152 pte_unmap_unlock(vmf->pte, vmf->ptl);
4153 return handle_userfault(vmf, VM_UFFD_MISSING);
4161 folio = vma_alloc_zeroed_movable_folio(vma, vmf->address);
4181 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
4182 &vmf->ptl);
4183 if (!vmf->pte)
4185 if (vmf_pte_changed(vmf)) {
4186 update_mmu_tlb(vma, vmf->address, vmf->pte);
4196 pte_unmap_unlock(vmf->pte, vmf->ptl);
4198 return handle_userfault(vmf, VM_UFFD_MISSING);
4202 folio_add_new_anon_rmap(folio, vma, vmf->address);
4211 uxpte_set_present(vma, vmf->address);
4215 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry);
4218 update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1);
4220 if (vmf->pte)
4221 pte_unmap_unlock(vmf->pte, vmf->ptl);
4237 static vm_fault_t __do_fault(struct vm_fault *vmf)
4239 struct vm_area_struct *vma = vmf->vma;
4257 if (pmd_none(*vmf->pmd) && !vmf->prealloc_pte) {
4258 vmf->prealloc_pte = pte_alloc_one(vma->vm_mm);
4259 if (!vmf->prealloc_pte)
4263 ret = vma->vm_ops->fault(vmf);
4268 if (unlikely(PageHWPoison(vmf->page))) {
4269 struct page *page = vmf->page;
4281 vmf->page = NULL;
4286 lock_page(vmf->page);
4288 VM_BUG_ON_PAGE(!PageLocked(vmf->page), vmf->page);
4294 static void deposit_prealloc_pte(struct vm_fault *vmf)
4296 struct vm_area_struct *vma = vmf->vma;
4298 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, vmf->prealloc_pte);
4304 vmf->prealloc_pte = NULL;
4307 vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
4309 struct vm_area_struct *vma = vmf->vma;
4310 bool write = vmf->flags & FAULT_FLAG_WRITE;
4311 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
4335 if (arch_needs_pgtable_deposit() && !vmf->prealloc_pte) {
4336 vmf->prealloc_pte = pte_alloc_one(vma->vm_mm);
4337 if (!vmf->prealloc_pte)
4341 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
4342 if (unlikely(!pmd_none(*vmf->pmd)))
4358 deposit_prealloc_pte(vmf);
4360 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
4362 update_mmu_cache_pmd(vma, haddr, vmf->pmd);
4368 spin_unlock(vmf->ptl);
4372 vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
4380 * @vmf: Fault decription.
4386 void set_pte_range(struct vm_fault *vmf, struct folio *folio,
4389 struct vm_area_struct *vma = vmf->vma;
4390 bool uffd_wp = vmf_orig_pte_uffd_wp(vmf);
4391 bool write = vmf->flags & FAULT_FLAG_WRITE;
4392 bool prefault = in_range(vmf->address, addr, nr * PAGE_SIZE);
4417 set_ptes(vma->vm_mm, addr, vmf->pte, entry, nr);
4420 update_mmu_cache_range(vmf, vma, addr, vmf->pte, nr);
4423 static bool vmf_pte_changed(struct vm_fault *vmf)
4425 if (vmf->flags & FAULT_FLAG_ORIG_PTE_VALID)
4426 return !pte_same(ptep_get(vmf->pte), vmf->orig_pte);
4428 return !pte_none(ptep_get(vmf->pte));
4434 * @vmf: structure describing the fault
4446 vm_fault_t finish_fault(struct vm_fault *vmf)
4448 struct vm_area_struct *vma = vmf->vma;
4453 if ((vmf->flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED))
4454 page = vmf->cow_page;
4456 page = vmf->page;
4468 if (pmd_none(*vmf->pmd)) {
4470 ret = do_set_pmd(vmf, page);
4475 if (vmf->prealloc_pte)
4476 pmd_install(vma->vm_mm, vmf->pmd, &vmf->prealloc_pte);
4477 else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd)))
4481 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
4482 vmf->address, &vmf->ptl);
4483 if (!vmf->pte)
4487 if (likely(!vmf_pte_changed(vmf))) {
4490 set_pte_range(vmf, folio, page, 1, vmf->address);
4493 update_mmu_tlb(vma, vmf->address, vmf->pte);
4497 pte_unmap_unlock(vmf->pte, vmf->ptl);
4560 static vm_fault_t do_fault_around(struct vm_fault *vmf)
4563 pgoff_t pte_off = pte_index(vmf->address);
4564 /* The page offset of vmf->address within the VMA. */
4565 pgoff_t vma_off = vmf->pgoff - vmf->vma->vm_pgoff;
4575 pte_off + vma_pages(vmf->vma) - vma_off) - 1;
4577 if (pmd_none(*vmf->pmd)) {
4578 vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm);
4579 if (!vmf->prealloc_pte)
4584 ret = vmf->vma->vm_ops->map_pages(vmf,
4585 vmf->pgoff + from_pte - pte_off,
4586 vmf->pgoff + to_pte - pte_off);
4593 static inline bool should_fault_around(struct vm_fault *vmf)
4596 if (!vmf->vma->vm_ops->map_pages)
4599 if (uffd_disable_fault_around(vmf->vma))
4606 static vm_fault_t do_read_fault(struct vm_fault *vmf)
4616 if (should_fault_around(vmf)) {
4617 ret = do_fault_around(vmf);
4622 if (vmf->flags & FAULT_FLAG_VMA_LOCK) {
4623 vma_end_read(vmf->vma);
4627 ret = __do_fault(vmf);
4631 ret |= finish_fault(vmf);
4632 folio = page_folio(vmf->page);
4639 static vm_fault_t do_cow_fault(struct vm_fault *vmf)
4641 struct vm_area_struct *vma = vmf->vma;
4644 if (vmf->flags & FAULT_FLAG_VMA_LOCK) {
4652 vmf->cow_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vmf->address);
4653 if (!vmf->cow_page)
4656 if (mem_cgroup_charge(page_folio(vmf->cow_page), vma->vm_mm,
4658 put_page(vmf->cow_page);
4661 folio_throttle_swaprate(page_folio(vmf->cow_page), GFP_KERNEL);
4663 ret = __do_fault(vmf);
4669 copy_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma);
4670 __SetPageUptodate(vmf->cow_page);
4672 ret |= finish_fault(vmf);
4673 unlock_page(vmf->page);
4674 put_page(vmf->page);
4679 put_page(vmf->cow_page);
4683 static vm_fault_t do_shared_fault(struct vm_fault *vmf)
4685 struct vm_area_struct *vma = vmf->vma;
4689 if (vmf->flags & FAULT_FLAG_VMA_LOCK) {
4694 ret = __do_fault(vmf);
4698 folio = page_folio(vmf->page);
4706 tmp = do_page_mkwrite(vmf, folio);
4714 ret |= finish_fault(vmf);
4722 ret |= fault_dirty_shared_page(vmf);
4734 static vm_fault_t do_fault(struct vm_fault *vmf)
4736 struct vm_area_struct *vma = vmf->vma;
4744 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd,
4745 vmf->address, &vmf->ptl);
4746 if (unlikely(!vmf->pte))
4756 if (unlikely(pte_none(ptep_get(vmf->pte))))
4761 pte_unmap_unlock(vmf->pte, vmf->ptl);
4763 } else if (!(vmf->flags & FAULT_FLAG_WRITE))
4764 ret = do_read_fault(vmf);
4766 ret = do_cow_fault(vmf);
4768 ret = do_shared_fault(vmf);
4771 if (vmf->prealloc_pte) {
4772 pte_free(vm_mm, vmf->prealloc_pte);
4773 vmf->prealloc_pte = NULL;
4795 static vm_fault_t do_numa_page(struct vm_fault *vmf)
4797 struct vm_area_struct *vma = vmf->vma;
4811 spin_lock(vmf->ptl);
4812 if (unlikely(!pte_same(ptep_get(vmf->pte), vmf->orig_pte))) {
4813 pte_unmap_unlock(vmf->pte, vmf->ptl);
4818 old_pte = ptep_get(vmf->pte);
4827 can_change_pte_writable(vma, vmf->address, pte))
4830 page = vm_normal_page(vma, vmf->address, pte);
4866 target_nid = numa_migrate_prep(page, vma, vmf->address, page_nid,
4872 pte_unmap_unlock(vmf->pte, vmf->ptl);
4881 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
4882 vmf->address, &vmf->ptl);
4883 if (unlikely(!vmf->pte))
4885 if (unlikely(!pte_same(ptep_get(vmf->pte), vmf->orig_pte))) {
4886 pte_unmap_unlock(vmf->pte, vmf->ptl);
4901 old_pte = ptep_modify_prot_start(vma, vmf->address, vmf->pte);
4906 ptep_modify_prot_commit(vma, vmf->address, vmf->pte, old_pte, pte);
4907 update_mmu_cache_range(vmf, vma, vmf->address, vmf->pte, 1);
4908 pte_unmap_unlock(vmf->pte, vmf->ptl);
4912 static inline vm_fault_t create_huge_pmd(struct vm_fault *vmf)
4914 struct vm_area_struct *vma = vmf->vma;
4916 return do_huge_pmd_anonymous_page(vmf);
4918 return vma->vm_ops->huge_fault(vmf, PMD_ORDER);
4923 static inline vm_fault_t wp_huge_pmd(struct vm_fault *vmf)
4925 struct vm_area_struct *vma = vmf->vma;
4926 const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
4931 userfaultfd_huge_pmd_wp(vma, vmf->orig_pmd))
4932 return handle_userfault(vmf, VM_UFFD_WP);
4933 return do_huge_pmd_wp_page(vmf);
4938 ret = vma->vm_ops->huge_fault(vmf, PMD_ORDER);
4945 __split_huge_pmd(vma, vmf->pmd, vmf->address, false, NULL);
4950 static vm_fault_t create_huge_pud(struct vm_fault *vmf)
4954 struct vm_area_struct *vma = vmf->vma;
4959 return vma->vm_ops->huge_fault(vmf, PUD_ORDER);
4964 static vm_fault_t wp_huge_pud(struct vm_fault *vmf, pud_t orig_pud)
4968 struct vm_area_struct *vma = vmf->vma;
4976 ret = vma->vm_ops->huge_fault(vmf, PUD_ORDER);
4983 __split_huge_pud(vma, vmf->pud, vmf->address);
5003 static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
5007 if (unlikely(pmd_none(*vmf->pmd))) {
5014 vmf->pte = NULL;
5015 vmf->flags &= ~FAULT_FLAG_ORIG_PTE_VALID;
5023 vmf->pte = pte_offset_map_nolock(vmf->vma->vm_mm, vmf->pmd,
5024 vmf->address, &vmf->ptl);
5025 if (unlikely(!vmf->pte))
5027 vmf->orig_pte = ptep_get_lockless(vmf->pte);
5028 vmf->flags |= FAULT_FLAG_ORIG_PTE_VALID;
5030 if (pte_none(vmf->orig_pte)) {
5031 pte_unmap(vmf->pte);
5032 vmf->pte = NULL;
5036 if (!vmf->pte)
5037 return do_pte_missing(vmf);
5039 if (!pte_present(vmf->orig_pte))
5040 return do_swap_page(vmf);
5042 if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma))
5043 return do_numa_page(vmf);
5045 spin_lock(vmf->ptl);
5046 entry = vmf->orig_pte;
5047 if (unlikely(!pte_same(ptep_get(vmf->pte), entry))) {
5048 update_mmu_tlb(vmf->vma, vmf->address, vmf->pte);
5051 if (vmf->flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) {
5053 return do_wp_page(vmf);
5054 else if (likely(vmf->flags & FAULT_FLAG_WRITE))
5058 if (ptep_set_access_flags(vmf->vma, vmf->address, vmf->pte, entry,
5059 vmf->flags & FAULT_FLAG_WRITE)) {
5060 update_mmu_cache_range(vmf, vmf->vma, vmf->address,
5061 vmf->pte, 1);
5064 if (vmf->flags & FAULT_FLAG_TRIED)
5072 if (vmf->flags & FAULT_FLAG_WRITE)
5073 flush_tlb_fix_spurious_fault(vmf->vma, vmf->address,
5074 vmf->pte);
5077 pte_unmap_unlock(vmf->pte, vmf->ptl);
5090 struct vm_fault vmf = {
5109 vmf.pud = pud_alloc(mm, p4d, address);
5110 if (!vmf.pud)
5113 if (pud_none(*vmf.pud) &&
5115 ret = create_huge_pud(&vmf);
5119 pud_t orig_pud = *vmf.pud;
5129 ret = wp_huge_pud(&vmf, orig_pud);
5133 huge_pud_set_accessed(&vmf, orig_pud);
5139 vmf.pmd = pmd_alloc(mm, vmf.pud, address);
5140 if (!vmf.pmd)
5144 if (pud_trans_unstable(vmf.pud))
5147 if (pmd_none(*vmf.pmd) &&
5149 ret = create_huge_pmd(&vmf);
5153 vmf.orig_pmd = pmdp_get_lockless(vmf.pmd);
5155 if (unlikely(is_swap_pmd(vmf.orig_pmd))) {
5157 !is_pmd_migration_entry(vmf.orig_pmd));
5158 if (is_pmd_migration_entry(vmf.orig_pmd))
5159 pmd_migration_entry_wait(mm, vmf.pmd);
5162 if (pmd_trans_huge(vmf.orig_pmd) || pmd_devmap(vmf.orig_pmd)) {
5163 if (pmd_protnone(vmf.orig_pmd) && vma_is_accessible(vma))
5164 return do_huge_pmd_numa_page(&vmf);
5167 !pmd_write(vmf.orig_pmd)) {
5168 ret = wp_huge_pmd(&vmf);
5172 huge_pmd_set_accessed(&vmf);
5178 return handle_pte_fault(&vmf);