Lines Matching defs:address
554 * Do not try to align to THP boundary if allocation at the address
586 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
605 clear_huge_page(page, vmf->address, HPAGE_PMD_NR);
716 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
835 unsigned long addr = vmf->address & PMD_MASK;
926 unsigned long addr = vmf->address & PUD_MASK;
1241 haddr = vmf->address & HPAGE_PUD_MASK;
1243 update_mmu_cache_pud(vmf->vma, vmf->address, vmf->pud);
1263 haddr = vmf->address & HPAGE_PMD_MASK;
1265 update_mmu_cache_pmd(vmf->vma, vmf->address, vmf->pmd);
1275 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
1317 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
1326 __split_huge_pmd(vma, vmf->pmd, vmf->address, false, NULL);
1416 unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
1535 vmf->pmd, pmd, vmf->address, page, target_nid);
1551 update_mmu_cache_pmd(vma, vmf->address, vmf->pmd);
1968 unsigned long address)
1974 address & HPAGE_PUD_MASK,
1975 (address & HPAGE_PUD_MASK) + HPAGE_PUD_SIZE);
2101 * for the same virtual address to be loaded simultaneously. So instead
2210 unsigned long address, bool freeze, struct page *page)
2218 address & HPAGE_PMD_MASK,
2219 (address & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE);
2288 void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
2296 pgd = pgd_offset(vma->vm_mm, address);
2300 p4d = p4d_offset(pgd, address);
2304 pud = pud_offset(p4d, address);
2308 pmd = pmd_offset(pud, address);
2310 __split_huge_pmd(vma, pmd, address, freeze, page);
2319 * If the new start address isn't hpage aligned and it could
2329 * If the new end address isn't hpage aligned and it could
2962 unsigned long address = pvmw->address;
2970 flush_cache_range(vma, address, address + HPAGE_PMD_SIZE);
2971 pmdval = pmdp_invalidate(vma, address, pvmw->pmd);
2978 set_pmd_at(mm, address, pvmw->pmd, pmdswp);
2987 unsigned long address = pvmw->address;
2988 unsigned long mmun_start = address & HPAGE_PMD_MASK;
3013 update_mmu_cache_pmd(vma, address, pvmw->pmd);