Lines Matching defs:address
796 * Convert the address within this vma to the page offset within
800 struct vm_area_struct *vma, unsigned long address)
802 return ((address - vma->vm_start) >> huge_page_shift(h)) +
807 unsigned long address)
809 return vma_hugecache_offset(hstate_vma(vma), vma, address);
949 * address space within the inode. This original address space
951 * address space embedded within the inode.
1010 * This address is already reserved by other process(chg == 0),
1138 unsigned long address, int avoid_reserve,
1161 nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
1632 * Find and lock address space (mapping) in write mode.
2005 unsigned long address)
2014 node = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
2303 struct vm_area_struct *vma, unsigned long address,
2307 long rc = vma_needs_reservation(h, vma, address);
2323 rc = vma_add_reservation(h, vma, address);
2331 vma_end_reservation(h, vma, address);
3776 unsigned long address, pte_t *ptep)
3781 if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
3782 update_mmu_cache(vma, address, ptep);
3927 unsigned long address;
3955 address = start;
3956 for (; address < end; address += sz) {
3957 ptep = huge_pte_offset(mm, address, sz);
3962 if (huge_pmd_unshare(mm, vma, &address, ptep)) {
3964 tlb_flush_pmd_range(tlb, address & PUD_MASK, PUD_SIZE);
3980 huge_pte_clear(mm, address, ptep, sz);
4004 pte = huge_ptep_get_and_clear(mm, address, ptep);
4005 tlb_remove_huge_tlb_entry(h, tlb, ptep, address);
4090 struct page *page, unsigned long address)
4101 address = address & huge_page_mask(h);
4102 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
4133 unmap_hugepage_range(iter_vma, address,
4134 address + huge_page_size(h), page);
4146 unsigned long address, pte_t *ptep,
4154 unsigned long haddr = address & huge_page_mask(h);
4173 * page is used to determine if the reserve at this address was
4176 * of the full address range.
4249 copy_user_huge_page(new_page, old_page, address, vma,
4289 /* Return the pagecache page at a given address within a VMA */
4291 struct vm_area_struct *vma, unsigned long address)
4297 idx = vma_hugecache_offset(h, vma, address);
4303 * Return whether there is a pagecache page to back given address within VMA.
4307 struct vm_area_struct *vma, unsigned long address)
4314 idx = vma_hugecache_offset(h, vma, address);
4348 unsigned long address, pte_t *ptep, unsigned int flags)
4357 unsigned long haddr = address & huge_page_mask(h);
4390 .address = haddr,
4436 clear_huge_page(page, address, pages_per_huge_page(h));
4459 * don't have hwpoisoned swap entry for errored virtual address.
4501 ret = hugetlb_cow(mm, vma, address, ptep, page, ptl);
4554 unsigned long address, unsigned int flags)
4566 unsigned long haddr = address & huge_page_mask(h);
4618 return hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags);
4675 ret = hugetlb_cow(mm, vma, address, ptep,
5039 unsigned long address, unsigned long end, pgprot_t newprot)
5042 unsigned long start = address;
5059 BUG_ON(address >= end);
5064 for (; address < end; address += huge_page_size(h)) {
5066 ptep = huge_pte_offset(mm, address, huge_page_size(h));
5070 if (huge_pmd_unshare(mm, vma, &address, ptep)) {
5089 set_huge_swap_pte_at(mm, address, ptep,
5099 old_pte = huge_ptep_modify_prot_start(vma, address, ptep);
5102 huge_ptep_modify_prot_commit(vma, address, ptep, old_pte, pte);
5404 * table entries associated with the address space. This is important as we
5489 * This update of passed address optimizes loops sequentially
5492 * Update address to the 'last page' in the cleared area so that
5550 * entry at address @addr
5553 * address @addr, or NULL if a !p*d_present() entry is encountered and the
5592 follow_huge_addr(struct mm_struct *mm, unsigned long address,
5600 unsigned long address, hugepd_t hpd, int flags, int pdshift)
5607 follow_huge_pmd_pte(struct vm_area_struct *vma, unsigned long address, int flags)
5621 ptep = huge_pte_offset(mm, address, huge_page_size(h));
5629 ((address & ~huge_page_mask(h)) >> PAGE_SHIFT);
5659 follow_huge_pud(struct mm_struct *mm, unsigned long address,
5665 return pte_page(*(pte_t *)pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT);
5669 follow_huge_pgd(struct mm_struct *mm, unsigned long address, pgd_t *pgd, int flags)
5674 return pte_page(*(pte_t *)pgd) + ((address & ~PGDIR_MASK) >> PAGE_SHIFT);