Lines Matching refs:vm_mm
506 pgd_t *pgd = pgd_offset(vma->vm_mm, addr);
804 struct mm_struct *src_mm = src_vma->vm_mm;
860 set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte);
873 struct mm_struct *src_mm = src_vma->vm_mm;
912 set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte);
940 struct mm_struct *dst_mm = dst_vma->vm_mm;
941 struct mm_struct *src_mm = src_vma->vm_mm;
1047 struct mm_struct *dst_mm = dst_vma->vm_mm;
1048 struct mm_struct *src_mm = src_vma->vm_mm;
1084 struct mm_struct *dst_mm = dst_vma->vm_mm;
1085 struct mm_struct *src_mm = src_vma->vm_mm;
1121 struct mm_struct *dst_mm = dst_vma->vm_mm;
1147 struct mm_struct *dst_mm = dst_vma->vm_mm;
1148 struct mm_struct *src_mm = src_vma->vm_mm;
1475 pgd = pgd_offset(vma->vm_mm, addr);
1553 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, vma->vm_mm,
1576 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
1578 tlb_gather_mmu(&tlb, vma->vm_mm, start, range.end);
1579 update_hiwater_rss(vma->vm_mm);
1603 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
1605 tlb_gather_mmu(&tlb, vma->vm_mm, address, range.end);
1606 update_hiwater_rss(vma->vm_mm);
1698 struct mm_struct *mm = vma->vm_mm;
1739 struct mm_struct *const mm = vma->vm_mm;
1812 BUG_ON(mmap_read_trylock(vma->vm_mm));
1870 BUG_ON(mmap_read_trylock(vma->vm_mm));
1962 struct mm_struct *mm = vma->vm_mm;
2316 struct mm_struct *mm = vma->vm_mm;
2636 struct mm_struct *mm = vma->vm_mm;
2872 struct mm_struct *mm = vma->vm_mm;
3041 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address,
3159 mm_tlb_flush_pending(vmf->vma->vm_mm)))
3369 if (!pte_unmap_same(vma->vm_mm, vmf->pmd, vmf->pte, vmf->orig_pte))
3375 migration_entry_wait(vma->vm_mm, vmf->pmd,
3379 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
3425 err = mem_cgroup_charge(page, vma->vm_mm,
3451 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
3462 count_memcg_event_mm(vma->vm_mm, PGMAJFAULT);
3473 locked = lock_page_or_retry(page, vma->vm_mm, vmf->flags);
3503 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
3528 inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
3529 dec_mm_counter_fast(vma->vm_mm, MM_SWAPENTS);
3544 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte);
3545 arch_do_swap_page(vma->vm_mm, vma, vmf->address, pte, vmf->orig_pte);
3626 if (pte_alloc(vma->vm_mm, vmf->pmd))
3647 !mm_forbids_zeropage(vma->vm_mm)) {
3651 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd,
3657 ret = check_stable_address_space(vma->vm_mm);
3675 if (mem_cgroup_charge(page, vma->vm_mm, GFP_KERNEL))
3691 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
3698 ret = check_stable_address_space(vma->vm_mm);
3709 inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
3723 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry);
3765 vmf->prealloc_pte = pte_alloc_one(vma->vm_mm);
3819 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
3825 mm_inc_nr_ptes(vma->vm_mm);
3826 pmd_populate(vma->vm_mm, vmf->pmd, vmf->prealloc_pte);
3829 } else if (unlikely(pte_alloc(vma->vm_mm, vmf->pmd))) {
3856 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
3866 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, vmf->prealloc_pte);
3871 mm_inc_nr_ptes(vma->vm_mm);
3896 vmf->prealloc_pte = pte_alloc_one(vma->vm_mm);
3902 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
3913 add_mm_counter(vma->vm_mm, mm_counter_file(page), HPAGE_PMD_NR);
3921 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
3992 inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
3996 inc_mm_counter_fast(vma->vm_mm, mm_counter_file(page));
3999 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry);
4040 ret = check_stable_address_space(vmf->vma->vm_mm);
4134 vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm);
4202 if (mem_cgroup_charge(vmf->cow_page, vma->vm_mm, GFP_KERNEL)) {
4274 struct mm_struct *vm_mm = vma->vm_mm;
4288 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm,
4315 pte_free(vm_mm, vmf->prealloc_pte);
4353 vmf->ptl = pte_lockptr(vma->vm_mm, vmf->pmd);
4557 vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd);
4608 struct mm_struct *mm = vma->vm_mm;
4761 count_memcg_event_mm(vma->vm_mm, PGFAULT);
4779 ret = hugetlb_fault(vma->vm_mm, vma, address, flags);
4991 ret = follow_pte(vma->vm_mm, address, &ptep, &ptl);
5012 if (follow_pte(vma->vm_mm, address, &ptep, &ptl))