Lines Matching defs:addr

2476 		struct vm_area_struct *vma, unsigned long addr)
2484 nid = huge_node(vma, addr, gfp_mask, &mpol, &nodemask);
2682 * vma_needs_reservation is called to determine if the huge page at addr
2715 struct vm_area_struct *vma, unsigned long addr,
2727 idx = vma_hugecache_offset(h, vma, addr);
2795 struct vm_area_struct *vma, unsigned long addr)
2797 return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV);
2801 struct vm_area_struct *vma, unsigned long addr)
2803 return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV);
2807 struct vm_area_struct *vma, unsigned long addr)
2809 (void)__vma_reservation_common(h, vma, addr, VMA_END_RESV);
2813 struct vm_area_struct *vma, unsigned long addr)
2815 return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV);
2819 struct vm_area_struct *vma, unsigned long addr)
2821 return __vma_reservation_common(h, vma, addr, VMA_DEL_RESV);
3045 unsigned long addr, int avoid_reserve)
3062 map_chg = gbl_chg = vma_needs_reservation(h, vma, addr);
3076 vma_end_reservation(h, vma, addr);
3112 folio = dequeue_hugetlb_folio_vma(h, vma, addr, avoid_reserve, gbl_chg);
3115 folio = alloc_buddy_hugetlb_folio_with_mpol(h, vma, addr);
3141 map_commit = vma_commit_reservation(h, vma, addr);
3171 vma_end_reservation(h, vma, addr);
4907 static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr)
4909 if (addr & ~(huge_page_mask(hstate_vma(vma))))
4915 * split, unshare PMDs in the PUD_SIZE interval surrounding addr now.
4917 if (addr & ~PUD_MASK) {
4923 unsigned long floor = addr & PUD_MASK;
5021 hugetlb_install_folio(struct vm_area_struct *vma, pte_t *ptep, unsigned long addr,
5027 hugepage_add_new_anon_rmap(new_folio, vma, addr);
5030 set_huge_pte_at(vma->vm_mm, addr, ptep, newpte, sz);
5041 unsigned long addr;
5068 for (addr = src_vma->vm_start; addr < src_vma->vm_end; addr += sz) {
5070 src_pte = hugetlb_walk(src_vma, addr, sz);
5072 addr |= last_addr_mask;
5075 dst_pte = huge_pte_alloc(dst, dst_vma, addr, sz);
5090 addr |= last_addr_mask;
5107 set_huge_pte_at(dst, addr, dst_pte, entry, sz);
5122 set_huge_pte_at(src, addr, src_pte, entry, sz);
5126 set_huge_pte_at(dst, addr, dst_pte, entry, sz);
5132 set_huge_pte_at(dst, addr, dst_pte,
5159 new_folio = alloc_hugetlb_folio(dst_vma, addr, 1);
5167 addr, dst_vma);
5180 restore_reserve_on_error(h, dst_vma, addr,
5186 hugetlb_install_folio(dst_vma, dst_pte, addr,
5201 huge_ptep_set_wrprotect(src, addr, src_pte);
5208 set_huge_pte_at(dst, addr, dst_pte, entry, sz);
5805 unsigned long addr,
5812 .real_address = addr,
6914 unsigned long addr, pgoff_t idx)
6931 if (pmd_index(addr) != pmd_index(saddr) ||
6940 bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr)
6942 unsigned long start = addr & PUD_MASK;
6998 unsigned long addr, pud_t *pud)
7001 pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
7013 saddr = page_table_shareable(svma, vma, addr, idx);
7037 pte = (pte_t *)pmd_alloc(mm, pud, addr);
7055 unsigned long addr, pte_t *ptep)
7057 pgd_t *pgd = pgd_offset(mm, addr);
7058 p4d_t *p4d = p4d_offset(pgd, addr);
7059 pud_t *pud = pud_offset(p4d, addr);
7076 unsigned long addr, pud_t *pud)
7082 unsigned long addr, pte_t *ptep)
7092 bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr)
7100 unsigned long addr, unsigned long sz)
7107 pgd = pgd_offset(mm, addr);
7108 p4d = p4d_alloc(mm, pgd, addr);
7111 pud = pud_alloc(mm, p4d, addr);
7117 if (want_pmd_share(vma, addr) && pud_none(*pud))
7118 pte = huge_pmd_share(mm, vma, addr, pud);
7120 pte = (pte_t *)pmd_alloc(mm, pud, addr);
7135 * entry at address @addr
7138 * address @addr, or NULL if a !p*d_present() entry is encountered and the
7143 unsigned long addr, unsigned long sz)
7150 pgd = pgd_offset(mm, addr);
7153 p4d = p4d_offset(pgd, addr);
7157 pud = pud_offset(p4d, addr);
7165 pmd = pmd_offset(pud, addr);