Lines Matching refs:vmf

2629 				 struct vm_fault *vmf)
2635 struct vm_area_struct *vma = vmf->vma;
2637 unsigned long addr = vmf->address;
2657 if (arch_faults_on_old_pte() && !pte_young(vmf->orig_pte)) {
2660 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
2662 if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) {
2667 update_mmu_tlb(vma, addr, vmf->pte);
2672 entry = pte_mkyoung(vmf->orig_pte);
2673 if (ptep_set_access_flags(vma, addr, vmf->pte, entry, 0))
2674 update_mmu_cache(vma, addr, vmf->pte);
2688 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
2690 if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) {
2692 update_mmu_tlb(vma, addr, vmf->pte);
2716 pte_unmap_unlock(vmf->pte, vmf->ptl);
2743 static vm_fault_t do_page_mkwrite(struct vm_fault *vmf)
2746 struct page *page = vmf->page;
2747 unsigned int old_flags = vmf->flags;
2749 vmf->flags = FAULT_FLAG_WRITE|FAULT_FLAG_MKWRITE;
2751 if (vmf->vma->vm_file &&
2752 IS_SWAPFILE(vmf->vma->vm_file->f_mapping->host))
2755 ret = vmf->vma->vm_ops->page_mkwrite(vmf);
2757 vmf->flags = old_flags;
2777 static vm_fault_t fault_dirty_shared_page(struct vm_fault *vmf)
2779 struct vm_area_struct *vma = vmf->vma;
2781 struct page *page = vmf->page;
2811 fpin = maybe_unlock_mmap_for_io(vmf, NULL);
2830 static inline void wp_page_reuse(struct vm_fault *vmf)
2831 __releases(vmf->ptl)
2833 struct vm_area_struct *vma = vmf->vma;
2834 struct page *page = vmf->page;
2844 flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
2845 entry = pte_mkyoung(vmf->orig_pte);
2847 if (ptep_set_access_flags(vma, vmf->address, vmf->pte, entry, 1))
2848 update_mmu_cache(vma, vmf->address, vmf->pte);
2849 pte_unmap_unlock(vmf->pte, vmf->ptl);
2869 static vm_fault_t wp_page_copy(struct vm_fault *vmf)
2871 struct vm_area_struct *vma = vmf->vma;
2873 struct page *old_page = vmf->page;
2882 if (is_zero_pfn(pte_pfn(vmf->orig_pte))) {
2884 vmf->address);
2889 vmf->address);
2893 if (!cow_user_page(new_page, old_page, vmf)) {
2914 vmf->address & PAGE_MASK,
2915 (vmf->address & PAGE_MASK) + PAGE_SIZE);
2921 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, vmf->address, &vmf->ptl);
2922 if (likely(pte_same(*vmf->pte, vmf->orig_pte))) {
2932 flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
2942 ptep_clear_flush_notify(vma, vmf->address, vmf->pte);
2943 page_add_new_anon_rmap(new_page, vma, vmf->address, false);
2947 uxpte_set_present(vma, vmf->address);
2955 set_pte_at_notify(mm, vmf->address, vmf->pte, entry);
2956 update_mmu_cache(vma, vmf->address, vmf->pte);
2957 xpm_integrity_update_hook(vma, vmf->flags, new_page);
2988 update_mmu_tlb(vma, vmf->address, vmf->pte);
2994 pte_unmap_unlock(vmf->pte, vmf->ptl);
3026 * @vmf: structure describing the fault
3038 vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf)
3040 WARN_ON_ONCE(!(vmf->vma->vm_flags & VM_SHARED));
3041 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address,
3042 &vmf->ptl);
3047 if (!pte_same(*vmf->pte, vmf->orig_pte)) {
3048 update_mmu_tlb(vmf->vma, vmf->address, vmf->pte);
3049 pte_unmap_unlock(vmf->pte, vmf->ptl);
3053 if (unlikely(xpm_integrity_validate_hook(vmf->vma, vmf->flags,
3054 vmf->address, vmf->page))) {
3055 pte_unmap_unlock(vmf->pte, vmf->ptl);
3059 wp_page_reuse(vmf);
3067 static vm_fault_t wp_pfn_shared(struct vm_fault *vmf)
3069 struct vm_area_struct *vma = vmf->vma;
3074 pte_unmap_unlock(vmf->pte, vmf->ptl);
3075 vmf->flags |= FAULT_FLAG_MKWRITE;
3076 ret = vma->vm_ops->pfn_mkwrite(vmf);
3079 return finish_mkwrite_fault(vmf);
3081 wp_page_reuse(vmf);
3085 static vm_fault_t wp_page_shared(struct vm_fault *vmf)
3086 __releases(vmf->ptl)
3088 struct vm_area_struct *vma = vmf->vma;
3091 get_page(vmf->page);
3096 pte_unmap_unlock(vmf->pte, vmf->ptl);
3097 tmp = do_page_mkwrite(vmf);
3100 put_page(vmf->page);
3103 tmp = finish_mkwrite_fault(vmf);
3105 unlock_page(vmf->page);
3106 put_page(vmf->page);
3110 if (unlikely(xpm_integrity_validate_hook(vmf->vma, vmf->flags, vmf->address,
3111 vmf->page))){
3112 pte_unmap_unlock(vmf->pte, vmf->ptl);
3113 put_page(vmf->page);
3117 wp_page_reuse(vmf);
3118 lock_page(vmf->page);
3120 ret |= fault_dirty_shared_page(vmf);
3121 put_page(vmf->page);
3144 static vm_fault_t do_wp_page(struct vm_fault *vmf)
3145 __releases(vmf->ptl)
3147 struct vm_area_struct *vma = vmf->vma;
3149 if (userfaultfd_pte_wp(vma, *vmf->pte)) {
3150 pte_unmap_unlock(vmf->pte, vmf->ptl);
3151 return handle_userfault(vmf, VM_UFFD_WP);
3158 if (unlikely(userfaultfd_wp(vmf->vma) &&
3159 mm_tlb_flush_pending(vmf->vma->vm_mm)))
3160 flush_tlb_page(vmf->vma, vmf->address);
3162 vmf->page = vm_normal_page(vma, vmf->address, vmf->orig_pte);
3163 if (!vmf->page) {
3173 return wp_pfn_shared(vmf);
3175 pte_unmap_unlock(vmf->pte, vmf->ptl);
3176 return wp_page_copy(vmf);
3183 if (PageAnon(vmf->page)) {
3184 struct page *page = vmf->page;
3202 if (unlikely(xpm_integrity_validate_hook(vmf->vma, vmf->flags, vmf->address,
3203 vmf->page))){
3204 pte_unmap_unlock(vmf->pte, vmf->ptl);
3208 wp_page_reuse(vmf);
3212 return wp_page_shared(vmf);
3218 get_page(vmf->page);
3220 pte_unmap_unlock(vmf->pte, vmf->ptl);
3221 return wp_page_copy(vmf);
3358 vm_fault_t do_swap_page(struct vm_fault *vmf)
3360 struct vm_area_struct *vma = vmf->vma;
3369 if (!pte_unmap_same(vma->vm_mm, vmf->pmd, vmf->pte, vmf->orig_pte))
3372 entry = pte_to_swp_entry(vmf->orig_pte);
3375 migration_entry_wait(vma->vm_mm, vmf->pmd,
3376 vmf->address);
3378 vmf->page = device_private_entry_to_page(entry);
3379 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
3380 vmf->address, &vmf->ptl);
3381 if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte))) {
3382 spin_unlock(vmf->ptl);
3390 get_page(vmf->page);
3391 pte_unmap_unlock(vmf->pte, vmf->ptl);
3392 vmf->page->pgmap->ops->migrate_to_ram(vmf);
3393 put_page(vmf->page);
3397 print_bad_pte(vma, vmf->address, vmf->orig_pte, NULL);
3405 page = lookup_swap_cache(entry, vma, vmf->address);
3415 vmf->address);
3442 vmf);
3451 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
3452 vmf->address, &vmf->ptl);
3453 if (likely(pte_same(*vmf->pte, vmf->orig_pte)))
3473 locked = lock_page_or_retry(page, vma->vm_mm, vmf->flags);
3491 page = ksm_might_need_to_copy(page, vma, vmf->address);
3503 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
3504 &vmf->ptl);
3505 if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte)))
3522 if (unlikely(xpm_integrity_validate_hook(vmf->vma, vmf->flags,
3523 vmf->address, page))){
3531 if ((vmf->flags & FAULT_FLAG_WRITE) && reuse_swap_page(page, NULL)) {
3533 vmf->flags &= ~FAULT_FLAG_WRITE;
3538 if (pte_swp_soft_dirty(vmf->orig_pte))
3540 if (pte_swp_uffd_wp(vmf->orig_pte)) {
3544 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte);
3545 arch_do_swap_page(vma->vm_mm, vma, vmf->address, pte, vmf->orig_pte);
3546 vmf->orig_pte = pte;
3550 page_add_new_anon_rmap(page, vma, vmf->address, false);
3553 do_page_add_anon_rmap(page, vma, vmf->address, exclusive);
3574 if (vmf->flags & FAULT_FLAG_WRITE) {
3575 ret |= do_wp_page(vmf);
3582 update_mmu_cache(vma, vmf->address, vmf->pte);
3584 pte_unmap_unlock(vmf->pte, vmf->ptl);
3588 pte_unmap_unlock(vmf->pte, vmf->ptl);
3605 static vm_fault_t do_anonymous_page(struct vm_fault *vmf)
3607 struct vm_area_struct *vma = vmf->vma;
3626 if (pte_alloc(vma->vm_mm, vmf->pmd))
3630 if (unlikely(pmd_trans_unstable(vmf->pmd)))
3635 if (do_uxpte_page_fault(vmf, &entry))
3638 if(xpm_integrity_check_hook(vma, vmf->flags, vmf->address,
3646 if (!(vmf->flags & FAULT_FLAG_WRITE) &&
3648 entry = pte_mkspecial(pfn_pte(my_zero_pfn(vmf->address),
3651 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
3652 vmf->address, &vmf->ptl);
3653 if (!pte_none(*vmf->pte)) {
3654 update_mmu_tlb(vma, vmf->address, vmf->pte);
3662 pte_unmap_unlock(vmf->pte, vmf->ptl);
3663 return handle_userfault(vmf, VM_UFFD_MISSING);
3671 page = alloc_zeroed_user_highpage_movable(vma, vmf->address);
3691 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
3692 &vmf->ptl);
3693 if (!pte_none(*vmf->pte)) {
3694 update_mmu_cache(vma, vmf->address, vmf->pte);
3704 pte_unmap_unlock(vmf->pte, vmf->ptl);
3706 return handle_userfault(vmf, VM_UFFD_MISSING);
3710 page_add_new_anon_rmap(page, vma, vmf->address, false);
3717 uxpte_set_present(vma, vmf->address);
3720 xpm_integrity_update_hook(vma, vmf->flags, page);
3723 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry);
3726 update_mmu_cache(vma, vmf->address, vmf->pte);
3728 pte_unmap_unlock(vmf->pte, vmf->ptl);
3744 static vm_fault_t __do_fault(struct vm_fault *vmf)
3746 struct vm_area_struct *vma = vmf->vma;
3764 if (pmd_none(*vmf->pmd) && !vmf->prealloc_pte) {
3765 vmf->prealloc_pte = pte_alloc_one(vma->vm_mm);
3766 if (!vmf->prealloc_pte)
3771 ret = vma->vm_ops->fault(vmf);
3776 if (unlikely(PageHWPoison(vmf->page))) {
3777 struct page *page = vmf->page;
3789 vmf->page = NULL;
3794 lock_page(vmf->page);
3796 VM_BUG_ON_PAGE(!PageLocked(vmf->page), vmf->page);
3812 static vm_fault_t pte_alloc_one_map(struct vm_fault *vmf)
3814 struct vm_area_struct *vma = vmf->vma;
3816 if (!pmd_none(*vmf->pmd))
3818 if (vmf->prealloc_pte) {
3819 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
3820 if (unlikely(!pmd_none(*vmf->pmd))) {
3821 spin_unlock(vmf->ptl);
3826 pmd_populate(vma->vm_mm, vmf->pmd, vmf->prealloc_pte);
3827 spin_unlock(vmf->ptl);
3828 vmf->prealloc_pte = NULL;
3829 } else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd))) {
3844 if (pmd_devmap_trans_unstable(vmf->pmd))
3848 * At this point we know that our vmf->pmd points to a page of ptes
3851 * we zap the ptes pointed to by our vmf->pmd, the vmf->ptl will still
3852 * be valid and we will re-check to make sure the vmf->pte isn't
3853 * pte_none() under vmf->ptl protection when we return to
3856 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
3857 &vmf->ptl);
3862 static void deposit_prealloc_pte(struct vm_fault *vmf)
3864 struct vm_area_struct *vma = vmf->vma;
3866 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, vmf->prealloc_pte);
3872 vmf->prealloc_pte = NULL;
3875 static vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
3877 struct vm_area_struct *vma = vmf->vma;
3878 bool write = vmf->flags & FAULT_FLAG_WRITE;
3879 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
3895 if (arch_needs_pgtable_deposit() && !vmf->prealloc_pte) {
3896 vmf->prealloc_pte = pte_alloc_one(vma->vm_mm);
3897 if (!vmf->prealloc_pte)
3902 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
3903 if (unlikely(!pmd_none(*vmf->pmd)))
3919 deposit_prealloc_pte(vmf);
3921 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
3923 update_mmu_cache_pmd(vma, haddr, vmf->pmd);
3929 spin_unlock(vmf->ptl);
3933 static vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page)
3944 * @vmf: fault environment
3947 * Caller must take care of unlocking vmf->ptl, if vmf->pte is non-NULL on
3955 vm_fault_t alloc_set_pte(struct vm_fault *vmf, struct page *page)
3957 struct vm_area_struct *vma = vmf->vma;
3958 bool write = vmf->flags & FAULT_FLAG_WRITE;
3962 if (pmd_none(*vmf->pmd) && PageTransCompound(page)) {
3963 ret = do_set_pmd(vmf, page);
3968 if (!vmf->pte) {
3969 ret = pte_alloc_one_map(vmf);
3975 if (unlikely(!pte_none(*vmf->pte))) {
3976 update_mmu_tlb(vma, vmf->address, vmf->pte);
3981 if (unlikely(xpm_integrity_validate_hook(vmf->vma, vmf->flags,
3982 vmf->address, page)))
3993 page_add_new_anon_rmap(page, vma, vmf->address, false);
3999 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry);
4002 update_mmu_cache(vma, vmf->address, vmf->pte);
4011 * @vmf: structure describing the fault
4023 vm_fault_t finish_fault(struct vm_fault *vmf)
4029 if ((vmf->flags & FAULT_FLAG_WRITE) &&
4030 !(vmf->vma->vm_flags & VM_SHARED))
4031 page = vmf->cow_page;
4033 page = vmf->page;
4039 if (!(vmf->vma->vm_flags & VM_SHARED))
4040 ret = check_stable_address_space(vmf->vma->vm_mm);
4042 ret = alloc_set_pte(vmf, page);
4043 if (vmf->pte)
4044 pte_unmap_unlock(vmf->pte, vmf->ptl);
4108 static vm_fault_t do_fault_around(struct vm_fault *vmf)
4110 unsigned long address = vmf->address, nr_pages, mask;
4111 pgoff_t start_pgoff = vmf->pgoff;
4119 vmf->address = max(address & mask, vmf->vma->vm_start);
4120 off = ((address - vmf->address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
4128 ((vmf->address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) +
4130 end_pgoff = min3(end_pgoff, vma_pages(vmf->vma) + vmf->vma->vm_pgoff - 1,
4133 if (pmd_none(*vmf->pmd)) {
4134 vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm);
4135 if (!vmf->prealloc_pte)
4140 vmf->vma->vm_ops->map_pages(vmf, start_pgoff, end_pgoff);
4143 if (pmd_trans_huge(*vmf->pmd)) {
4149 if (!vmf->pte)
4153 vmf->pte -= (vmf->address >> PAGE_SHIFT) - (address >> PAGE_SHIFT);
4154 if (!pte_none(*vmf->pte))
4156 pte_unmap_unlock(vmf->pte, vmf->ptl);
4158 vmf->address = address;
4159 vmf->pte = NULL;
4163 static vm_fault_t do_read_fault(struct vm_fault *vmf)
4165 struct vm_area_struct *vma = vmf->vma;
4174 ret = do_fault_around(vmf);
4179 ret = __do_fault(vmf);
4183 ret |= finish_fault(vmf);
4184 unlock_page(vmf->page);
4186 put_page(vmf->page);
4190 static vm_fault_t do_cow_fault(struct vm_fault *vmf)
4192 struct vm_area_struct *vma = vmf->vma;
4198 vmf->cow_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vmf->address);
4199 if (!vmf->cow_page)
4202 if (mem_cgroup_charge(vmf->cow_page, vma->vm_mm, GFP_KERNEL)) {
4203 put_page(vmf->cow_page);
4206 cgroup_throttle_swaprate(vmf->cow_page, GFP_KERNEL);
4208 ret = __do_fault(vmf);
4214 copy_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma);
4215 __SetPageUptodate(vmf->cow_page);
4217 ret |= finish_fault(vmf);
4218 unlock_page(vmf->page);
4219 put_page(vmf->page);
4224 put_page(vmf->cow_page);
4228 static vm_fault_t do_shared_fault(struct vm_fault *vmf)
4230 struct vm_area_struct *vma = vmf->vma;
4233 ret = __do_fault(vmf);
4242 unlock_page(vmf->page);
4243 tmp = do_page_mkwrite(vmf);
4246 put_page(vmf->page);
4251 ret |= finish_fault(vmf);
4254 unlock_page(vmf->page);
4255 put_page(vmf->page);
4259 ret |= fault_dirty_shared_page(vmf);
4271 static vm_fault_t do_fault(struct vm_fault *vmf)
4273 struct vm_area_struct *vma = vmf->vma;
4285 if (unlikely(!pmd_present(*vmf->pmd)))
4288 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm,
4289 vmf->pmd,
4290 vmf->address,
4291 &vmf->ptl);
4299 if (unlikely(pte_none(*vmf->pte)))
4304 pte_unmap_unlock(vmf->pte, vmf->ptl);
4306 } else if (!(vmf->flags & FAULT_FLAG_WRITE))
4307 ret = do_read_fault(vmf);
4309 ret = do_cow_fault(vmf);
4311 ret = do_shared_fault(vmf);
4314 if (vmf->prealloc_pte) {
4315 pte_free(vm_mm, vmf->prealloc_pte);
4316 vmf->prealloc_pte = NULL;
4336 static vm_fault_t do_numa_page(struct vm_fault *vmf)
4338 struct vm_area_struct *vma = vmf->vma;
4345 bool was_writable = pte_savedwrite(vmf->orig_pte);
4353 vmf->ptl = pte_lockptr(vma->vm_mm, vmf->pmd);
4354 spin_lock(vmf->ptl);
4355 if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte))) {
4356 pte_unmap_unlock(vmf->pte, vmf->ptl);
4364 old_pte = ptep_modify_prot_start(vma, vmf->address, vmf->pte);
4369 ptep_modify_prot_commit(vma, vmf->address, vmf->pte, old_pte, pte);
4370 update_mmu_cache(vma, vmf->address, vmf->pte);
4372 page = vm_normal_page(vma, vmf->address, pte);
4374 pte_unmap_unlock(vmf->pte, vmf->ptl);
4380 pte_unmap_unlock(vmf->pte, vmf->ptl);
4404 target_nid = numa_migrate_prep(page, vma, vmf->address, page_nid,
4406 pte_unmap_unlock(vmf->pte, vmf->ptl);
4426 static inline vm_fault_t create_huge_pmd(struct vm_fault *vmf)
4428 if (vma_is_anonymous(vmf->vma))
4429 return do_huge_pmd_anonymous_page(vmf);
4430 if (vmf->vma->vm_ops->huge_fault)
4431 return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD);
4436 static inline vm_fault_t wp_huge_pmd(struct vm_fault *vmf, pmd_t orig_pmd)
4438 if (vma_is_anonymous(vmf->vma)) {
4439 if (userfaultfd_huge_pmd_wp(vmf->vma, orig_pmd))
4440 return handle_userfault(vmf, VM_UFFD_WP);
4441 return do_huge_pmd_wp_page(vmf, orig_pmd);
4443 if (vmf->vma->vm_ops->huge_fault) {
4444 vm_fault_t ret = vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PMD);
4451 __split_huge_pmd(vmf->vma, vmf->pmd, vmf->address, false, NULL);
4456 static vm_fault_t create_huge_pud(struct vm_fault *vmf)
4461 if (vma_is_anonymous(vmf->vma))
4463 if (vmf->vma->vm_ops->huge_fault)
4464 return vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD);
4469 static vm_fault_t wp_huge_pud(struct vm_fault *vmf, pud_t orig_pud)
4474 if (vma_is_anonymous(vmf->vma))
4476 if (vmf->vma->vm_ops->huge_fault) {
4477 vm_fault_t ret = vmf->vma->vm_ops->huge_fault(vmf, PE_SIZE_PUD);
4484 __split_huge_pud(vmf->vma, vmf->pud, vmf->address);
4504 static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
4508 if (unlikely(pmd_none(*vmf->pmd))) {
4515 vmf->pte = NULL;
4518 if (pmd_devmap_trans_unstable(vmf->pmd))
4526 vmf->pte = pte_offset_map(vmf->pmd, vmf->address);
4527 vmf->orig_pte = *vmf->pte;
4538 if (pte_none(vmf->orig_pte)) {
4539 pte_unmap(vmf->pte);
4540 vmf->pte = NULL;
4544 if (!vmf->pte) {
4545 if (vma_is_anonymous(vmf->vma))
4546 return do_anonymous_page(vmf);
4548 return do_fault(vmf);
4551 if (!pte_present(vmf->orig_pte))
4552 return do_swap_page(vmf);
4554 if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma))
4555 return do_numa_page(vmf);
4557 vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd);
4558 spin_lock(vmf->ptl);
4559 entry = vmf->orig_pte;
4560 if (unlikely(!pte_same(*vmf->pte, entry))) {
4561 update_mmu_tlb(vmf->vma, vmf->address, vmf->pte);
4564 if (vmf->flags & FAULT_FLAG_WRITE) {
4566 return do_wp_page(vmf);
4570 if (ptep_set_access_flags(vmf->vma, vmf->address, vmf->pte, entry,
4571 vmf->flags & FAULT_FLAG_WRITE)) {
4572 update_mmu_cache(vmf->vma, vmf->address, vmf->pte);
4575 if (vmf->flags & FAULT_FLAG_TRIED)
4583 if (vmf->flags & FAULT_FLAG_WRITE)
4584 flush_tlb_fix_spurious_fault(vmf->vma, vmf->address);
4587 pte_unmap_unlock(vmf->pte, vmf->ptl);
4600 struct vm_fault vmf = {
4618 vmf.pud = pud_alloc(mm, p4d, address);
4619 if (!vmf.pud)
4622 if (pud_none(*vmf.pud) && __transparent_hugepage_enabled(vma)) {
4623 ret = create_huge_pud(&vmf);
4627 pud_t orig_pud = *vmf.pud;
4635 ret = wp_huge_pud(&vmf, orig_pud);
4639 huge_pud_set_accessed(&vmf, orig_pud);
4645 vmf.pmd = pmd_alloc(mm, vmf.pud, address);
4646 if (!vmf.pmd)
4650 if (pud_trans_unstable(vmf.pud))
4653 if (pmd_none(*vmf.pmd) && __transparent_hugepage_enabled(vma)) {
4654 ret = create_huge_pmd(&vmf);
4658 pmd_t orig_pmd = *vmf.pmd;
4665 pmd_migration_entry_wait(mm, vmf.pmd);
4670 return do_huge_pmd_numa_page(&vmf, orig_pmd);
4673 ret = wp_huge_pmd(&vmf, orig_pmd);
4677 huge_pmd_set_accessed(&vmf, orig_pmd);
4683 return handle_pte_fault(&vmf);