Lines Matching defs:address
115 * Randomize the address space (stacks, mmaps, brk, etc.).
345 * of the address space and the top of it (using -1 for the
348 * the address space, but end 0 and ceiling 0 refer to the top
579 * This restricts such mappings to be a linear translation from virtual address
1004 * will allocate page according to address). This
1531 * @tlb: address of the caller's struct mmu_gather
1533 * @start_addr: virtual address at which to start unmapping
1534 * @end_addr: virtual address at which to end unmapping
1540 * The VMA list must be sorted in ascending virtual address order.
1542 * unmap_vmas() assumes that the caller will flush the whole unmapped address
1564 * @start: starting address of pages to zap
1590 * @address: starting address of pages to zap
1596 static void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
1604 address, address + size);
1605 tlb_gather_mmu(&tlb, vma->vm_mm, address, range.end);
1608 unmap_single_vma(&tlb, vma, address, range.end, details);
1610 tlb_finish_mmu(&tlb, address, range.end);
1616 * @address: starting address of pages to zap
1621 * The entire address range must be fully contained within the vma.
1624 void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
1627 if (address < vma->vm_start || address + size > vma->vm_end ||
1631 zap_page_range_single(vma, address, size, NULL);
1791 * @addr: target start user address of these pages
1836 * @addr: target user address of this page
2015 * @addr: target user address of this page
2064 * @addr: target user address of this page
2151 * @addr: target user address of this page
2301 * @addr: target page aligned user address to start at
2302 * @pfn: page frame number of kernel physical memory address
2637 unsigned long addr = vmf->address;
2647 * just copying from the original user address. If that
2738 * Notify the address space that the page is about to become writable so that
2844 flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
2847 if (ptep_set_access_flags(vma, vmf->address, vmf->pte, entry, 1))
2848 update_mmu_cache(vma, vmf->address, vmf->pte);
2884 vmf->address);
2889 vmf->address);
2897 * the same address and we will handle the fault
2914 vmf->address & PAGE_MASK,
2915 (vmf->address & PAGE_MASK) + PAGE_SIZE);
2921 vmf->pte = pte_offset_map_lock(mm, vmf->pmd, vmf->address, &vmf->ptl);
2932 flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte));
2942 ptep_clear_flush_notify(vma, vmf->address, vmf->pte);
2943 page_add_new_anon_rmap(new_page, vma, vmf->address, false);
2947 uxpte_set_present(vma, vmf->address);
2955 set_pte_at_notify(mm, vmf->address, vmf->pte, entry);
2956 update_mmu_cache(vma, vmf->address, vmf->pte);
2988 update_mmu_tlb(vma, vmf->address, vmf->pte);
3041 vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address,
3048 update_mmu_tlb(vmf->vma, vmf->address, vmf->pte);
3054 vmf->address, vmf->page))) {
3110 if (unlikely(xpm_integrity_validate_hook(vmf->vma, vmf->flags, vmf->address,
3128 * to a shared page. It is done by copying the page to a new address
3160 flush_tlb_page(vmf->vma, vmf->address);
3162 vmf->page = vm_normal_page(vma, vmf->address, vmf->orig_pte);
3202 if (unlikely(xpm_integrity_validate_hook(vmf->vma, vmf->flags, vmf->address,
3288 * @mapping: The address space containing pages to be unmapped.
3293 * Unmap the pages in this address space from any userspace process which
3320 * @mapping: the address space containing mmaps to be unmapped.
3376 vmf->address);
3380 vmf->address, &vmf->ptl);
3397 print_bad_pte(vma, vmf->address, vmf->orig_pte, NULL);
3405 page = lookup_swap_cache(entry, vma, vmf->address);
3415 vmf->address);
3452 vmf->address, &vmf->ptl);
3491 page = ksm_might_need_to_copy(page, vma, vmf->address);
3503 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
3523 vmf->address, page))){
3544 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, pte);
3545 arch_do_swap_page(vma->vm_mm, vma, vmf->address, pte, vmf->orig_pte);
3550 page_add_new_anon_rmap(page, vma, vmf->address, false);
3553 do_page_add_anon_rmap(page, vma, vmf->address, exclusive);
3582 update_mmu_cache(vma, vmf->address, vmf->pte);
3638 if(xpm_integrity_check_hook(vma, vmf->flags, vmf->address,
3648 entry = pte_mkspecial(pfn_pte(my_zero_pfn(vmf->address),
3652 vmf->address, &vmf->ptl);
3654 update_mmu_tlb(vma, vmf->address, vmf->pte);
3671 page = alloc_zeroed_user_highpage_movable(vma, vmf->address);
3691 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
3694 update_mmu_cache(vma, vmf->address, vmf->pte);
3710 page_add_new_anon_rmap(page, vma, vmf->address, false);
3717 uxpte_set_present(vma, vmf->address);
3723 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry);
3726 update_mmu_cache(vma, vmf->address, vmf->pte);
3856 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address,
3879 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
3976 update_mmu_tlb(vma, vmf->address, vmf->pte);
3982 vmf->address, page)))
3993 page_add_new_anon_rmap(page, vma, vmf->address, false);
3999 set_pte_at(vma->vm_mm, vmf->address, vmf->pte, entry);
4002 update_mmu_cache(vma, vmf->address, vmf->pte);
4085 * do_fault_around() tries to map few pages around the fault address. The hope
4094 * the page table corresponding to the fault address.
4103 * The virtual address of the area that we map is naturally aligned to
4110 unsigned long address = vmf->address, nr_pages, mask;
4119 vmf->address = max(address & mask, vmf->vma->vm_start);
4120 off = ((address - vmf->address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
4128 ((vmf->address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) +
4153 vmf->pte -= (vmf->address >> PAGE_SHIFT) - (address >> PAGE_SHIFT);
4158 vmf->address = address;
4198 vmf->cow_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vmf->address);
4214 copy_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma);
4238 * Check if the backing address space wants to know that the page is
4290 vmf->address,
4364 old_pte = ptep_modify_prot_start(vma, vmf->address, vmf->pte);
4369 ptep_modify_prot_commit(vma, vmf->address, vmf->pte, old_pte, pte);
4370 update_mmu_cache(vma, vmf->address, vmf->pte);
4372 page = vm_normal_page(vma, vmf->address, pte);
4396 * Flag if the page is shared between multiple address spaces. This
4404 target_nid = numa_migrate_prep(page, vma, vmf->address, page_nid,
4451 __split_huge_pmd(vmf->vma, vmf->pmd, vmf->address, false, NULL);
4484 __split_huge_pud(vmf->vma, vmf->pud, vmf->address);
4526 vmf->pte = pte_offset_map(vmf->pmd, vmf->address);
4561 update_mmu_tlb(vmf->vma, vmf->address, vmf->pte);
4570 if (ptep_set_access_flags(vmf->vma, vmf->address, vmf->pte, entry,
4572 update_mmu_cache(vmf->vma, vmf->address, vmf->pte);
4584 flush_tlb_fix_spurious_fault(vmf->vma, vmf->address);
4598 unsigned long address, unsigned int flags)
4602 .address = address & PAGE_MASK,
4604 .pgoff = linear_page_index(vma, address),
4613 pgd = pgd_offset(mm, address);
4614 p4d = p4d_alloc(mm, pgd, address);
4618 vmf.pud = pud_alloc(mm, p4d, address);
4645 vmf.pmd = pmd_alloc(mm, vmf.pud, address);
4692 * @address: the faulted address.
4702 unsigned long address, unsigned int flags,
4710 * - Unsuccessful faults (e.g. when the address wasn't valid). That
4742 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
4744 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
4753 vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
4779 ret = hugetlb_fault(vma->vm_mm, vma, address, flags);
4781 ret = __handle_mm_fault(vma, address, flags);
4795 mm_account_fault(regs, address, flags, ret);
4806 int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
4808 p4d_t *new = p4d_alloc_one(mm, address);
4829 int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address)
4831 pud_t *new = pud_alloc_one(mm, address);
4853 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
4856 pmd_t *new = pmd_alloc_one(mm, address);
4873 int follow_invalidate_pte(struct mm_struct *mm, unsigned long address,
4883 pgd = pgd_offset(mm, address);
4887 p4d = p4d_offset(pgd, address);
4891 pud = pud_offset(p4d, address);
4895 pmd = pmd_offset(pud, address);
4904 NULL, mm, address & PMD_MASK,
4905 (address & PMD_MASK) + PMD_SIZE);
4923 address & PAGE_MASK,
4924 (address & PAGE_MASK) + PAGE_SIZE);
4927 ptep = pte_offset_map_lock(mm, pmd, address, ptlp);
4941 * follow_pte - look up PTE at a user virtual address
4942 * @mm: the mm_struct of the target address space
4943 * @address: user virtual address
4961 int follow_pte(struct mm_struct *mm, unsigned long address,
4964 return follow_invalidate_pte(mm, address, NULL, ptepp, NULL, ptlp);
4969 * follow_pfn - look up PFN at a user virtual address
4971 * @address: user virtual address
4981 int follow_pfn(struct vm_area_struct *vma, unsigned long address,
4991 ret = follow_pte(vma->vm_mm, address, &ptep, &ptl);
5002 unsigned long address, unsigned int flags,
5012 if (follow_pte(vma->vm_mm, address, &ptep, &ptl))
5056 * Access another process' address space as given in mm. If non-NULL, use the
5123 * access_remote_vm - access another process' address space
5124 * @mm: the mm_struct of the target address space
5125 * @addr: start address to access
5141 * Access another process' address space.