/kernel/linux/linux-6.6/arch/arm64/mm/ |
H A D | hugetlbpage.c | 63 case PUD_SIZE: in __hugetlb_valid_size() 130 case PUD_SIZE: in num_contig_ptes() 291 if (sz == PUD_SIZE) { in huge_pte_alloc() 332 if (sz != PUD_SIZE && pud_none(pud)) in huge_pte_offset() 362 case PUD_SIZE: in hugetlb_mask_last_page() 363 return PGDIR_SIZE - PUD_SIZE; in hugetlb_mask_last_page() 366 return PUD_SIZE - CONT_PMD_SIZE; in hugetlb_mask_last_page() 368 return PUD_SIZE - PMD_SIZE; in hugetlb_mask_last_page() 387 } else if (pagesize != PUD_SIZE && pagesize != PMD_SIZE) { in arch_make_huge_pte()
|
/kernel/linux/linux-6.6/arch/riscv/mm/ |
H A D | kasan_init.c | 96 if (pud_none(*pudp) && IS_ALIGNED(vaddr, PUD_SIZE) && (next - vaddr) >= PUD_SIZE) { in kasan_populate_pud() 97 phys_addr = memblock_phys_alloc(PUD_SIZE, PUD_SIZE); in kasan_populate_pud() 100 memset(__va(phys_addr), KASAN_SHADOW_INIT, PUD_SIZE); in kasan_populate_pud() local 178 if (IS_ALIGNED(vaddr, PUD_SIZE) && (next - vaddr) >= PUD_SIZE) { in kasan_early_clear_pud() 249 if (pud_none(*pudp) && IS_ALIGNED(vaddr, PUD_SIZE) && in kasan_early_populate_pud() 250 (next - vaddr) >= PUD_SIZE) { in kasan_early_populate_pud()
|
H A D | hugetlbpage.c | 51 if (sz == PUD_SIZE) { in huge_pte_alloc() 104 if (sz == PUD_SIZE) in huge_pte_offset() 134 case PUD_SIZE: in hugetlb_mask_last_page() 135 return P4D_SIZE - PUD_SIZE; in hugetlb_mask_last_page() 138 return PUD_SIZE - PMD_SIZE; in hugetlb_mask_last_page() 236 else if (sz >= PUD_SIZE) in set_huge_pte_at() 416 else if (IS_ENABLED(CONFIG_64BIT) && size == PUD_SIZE) in __hugetlb_valid_size()
|
H A D | init.c | 563 if (sz == PUD_SIZE) { in create_pud_mapping() 676 if (!(pa & (PUD_SIZE - 1)) && !(va & (PUD_SIZE - 1)) && size >= PUD_SIZE) in best_map_size() 677 return PUD_SIZE; in best_map_size() 793 PUD_SIZE, PAGE_TABLE); in set_satp_mode() 1052 nr_pos = (PUD_SIZE - kernel_size) / PMD_SIZE; in setup_vm() 1123 * to map PUD_SIZE aligned on PUD_SIZE: if the relocation offset in setup_vm() 1124 * makes the kernel cross over a PUD_SIZE boundar in setup_vm() [all...] |
/kernel/linux/linux-5.10/include/asm-generic/ |
H A D | pgtable-nopud.h | 20 #define PUD_SIZE (1UL << PUD_SHIFT) macro 21 #define PUD_MASK (~(PUD_SIZE-1))
|
/kernel/linux/linux-6.6/include/asm-generic/ |
H A D | pgtable-nopud.h | 20 #define PUD_SIZE (1UL << PUD_SHIFT) macro 21 #define PUD_MASK (~(PUD_SIZE-1))
|
/kernel/linux/linux-5.10/arch/powerpc/include/asm/nohash/64/ |
H A D | pgtable-4k.h | 36 #define PUD_SIZE (1UL << PUD_SHIFT) macro 37 #define PUD_MASK (~(PUD_SIZE-1))
|
/kernel/linux/linux-5.10/arch/x86/include/asm/ |
H A D | pgtable_64_types.h | 100 #define PUD_SIZE (_AC(1, UL) << PUD_SHIFT) macro 101 #define PUD_MASK (~(PUD_SIZE - 1))
|
/kernel/linux/linux-6.6/arch/powerpc/include/asm/nohash/64/ |
H A D | pgtable-4k.h | 36 #define PUD_SIZE (1UL << PUD_SHIFT) macro 37 #define PUD_MASK (~(PUD_SIZE-1))
|
/kernel/linux/linux-5.10/arch/arm64/mm/ |
H A D | hugetlbpage.c | 66 case PUD_SIZE: in arch_hugetlb_migration_supported() 130 case PUD_SIZE: in num_contig_ptes() 270 if (sz == PUD_SIZE) { in huge_pte_alloc() 319 if (sz != PUD_SIZE && pud_none(pud)) in huge_pte_offset() 352 } else if (pagesize != PUD_SIZE && pagesize != PMD_SIZE) { in arch_make_huge_pte() 509 case PUD_SIZE: in arch_hugetlb_valid_size()
|
/kernel/linux/linux-5.10/arch/s390/mm/ |
H A D | kasan_init.c | 151 address = (address + PUD_SIZE) & PUD_MASK; in kasan_early_vmemmap_populate() 158 IS_ALIGNED(address, PUD_SIZE) && in kasan_early_vmemmap_populate() 159 end - address >= PUD_SIZE) { in kasan_early_vmemmap_populate() 162 address = (address + PUD_SIZE) & PUD_MASK; in kasan_early_vmemmap_populate() 318 BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, PUD_SIZE)); in kasan_early_init() 319 BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PUD_SIZE)); in kasan_early_init()
|
H A D | vmem.c | 291 const unsigned long end = start + PUD_SIZE; in try_free_pmd_table() 328 if (IS_ALIGNED(addr, PUD_SIZE) && in modify_pud_table() 329 IS_ALIGNED(next, PUD_SIZE)) { in modify_pud_table() 336 if (IS_ALIGNED(addr, PUD_SIZE) && in modify_pud_table() 337 IS_ALIGNED(next, PUD_SIZE) && in modify_pud_table()
|
H A D | hugetlbpage.c | 140 size = PUD_SIZE; in clear_huge_pte_skeys() 205 if (sz == PUD_SIZE) in huge_pte_alloc() 261 else if (MACHINE_HAS_EDAT2 && size == PUD_SIZE) in arch_hugetlb_valid_size()
|
/kernel/linux/linux-5.10/mm/ |
H A D | ioremap.c | 137 if ((end - addr) != PUD_SIZE) in ioremap_try_huge_pud() 140 if (!IS_ALIGNED(addr, PUD_SIZE)) in ioremap_try_huge_pud() 143 if (!IS_ALIGNED(phys_addr, PUD_SIZE)) in ioremap_try_huge_pud()
|
/kernel/linux/linux-5.10/drivers/dax/ |
H A D | device.c | 154 unsigned int fault_size = PUD_SIZE; in __dev_dax_pud_fault() 160 if (dev_dax->align > PUD_SIZE) { in __dev_dax_pud_fault() 173 (pud_addr + PUD_SIZE) > vmf->vma->vm_end) in __dev_dax_pud_fault() 177 phys = dax_pgoff_to_phys(dev_dax, pgoff, PUD_SIZE); in __dev_dax_pud_fault() 220 fault_size = PUD_SIZE; in dev_dax_huge_fault()
|
/kernel/linux/linux-6.6/arch/x86/include/asm/ |
H A D | pgtable_64_types.h | 101 #define PUD_SIZE (_AC(1, UL) << PUD_SHIFT) macro 102 #define PUD_MASK (~(PUD_SIZE - 1))
|
/kernel/linux/linux-5.10/arch/powerpc/mm/book3s64/ |
H A D | radix_pgtable.c | 88 if (map_page_size == PUD_SIZE) { in early_map_kernel_page() 154 if (map_page_size == PUD_SIZE) { in __map_kernel_page() 290 if (IS_ALIGNED(addr, PUD_SIZE) && gap >= PUD_SIZE && in create_physical_mapping() 292 mapping_size = PUD_SIZE; in create_physical_mapping() 864 if (!IS_ALIGNED(addr, PUD_SIZE) || in remove_pud_table() 865 !IS_ALIGNED(next, PUD_SIZE)) { in remove_pud_table() 1172 flush_tlb_kernel_range(addr, addr + PUD_SIZE); in pud_free_pmd_page()
|
/kernel/linux/linux-6.6/arch/arc/include/asm/ |
H A D | pgtable-levels.h | 75 #define PUD_SIZE BIT(PUD_SHIFT) macro 76 #define PUD_MASK (~(PUD_SIZE - 1))
|
/kernel/linux/linux-5.10/arch/riscv/mm/ |
H A D | hugetlbpage.c | 19 else if (IS_ENABLED(CONFIG_64BIT) && size == PUD_SIZE) in arch_hugetlb_valid_size()
|
/kernel/linux/linux-6.6/arch/s390/mm/ |
H A D | hugetlbpage.c | 133 size = PUD_SIZE; in clear_huge_pte_skeys() 204 if (sz == PUD_SIZE) in huge_pte_alloc() 250 else if (MACHINE_HAS_EDAT2 && size == PUD_SIZE) in arch_hugetlb_valid_size()
|
/kernel/linux/linux-5.10/arch/x86/mm/ |
H A D | init.c | 373 unsigned long start = round_down(mr[i].start, PUD_SIZE); in adjust_range_page_size_mask() 374 unsigned long end = round_up(mr[i].end, PUD_SIZE); in adjust_range_page_size_mask() 445 end_pfn = round_up(pfn, PFN_DOWN(PUD_SIZE)); in split_mem_range() 458 start_pfn = round_up(pfn, PFN_DOWN(PUD_SIZE)); in split_mem_range() 459 end_pfn = round_down(limit_pfn, PFN_DOWN(PUD_SIZE)); in split_mem_range() 608 * We can not set step_size to be PUD_SIZE (1G) yet. in get_new_step_size()
|
H A D | kasan_init_64.c | 84 ((end - addr) == PUD_SIZE) && in kasan_populate_pud() 85 IS_ALIGNED(addr, PUD_SIZE)) { in kasan_populate_pud() 86 p = early_alloc(PUD_SIZE, nid, false); in kasan_populate_pud() 90 memblock_free(__pa(p), PUD_SIZE); in kasan_populate_pud()
|
/kernel/linux/linux-6.6/arch/x86/mm/ |
H A D | init.c | 368 unsigned long start = round_down(mr[i].start, PUD_SIZE); in adjust_range_page_size_mask() 369 unsigned long end = round_up(mr[i].end, PUD_SIZE); in adjust_range_page_size_mask() 440 end_pfn = round_up(pfn, PFN_DOWN(PUD_SIZE)); in split_mem_range() 453 start_pfn = round_up(pfn, PFN_DOWN(PUD_SIZE)); in split_mem_range() 454 end_pfn = round_down(limit_pfn, PFN_DOWN(PUD_SIZE)); in split_mem_range() 603 * We can not set step_size to be PUD_SIZE (1G) yet. in get_new_step_size()
|
/kernel/linux/linux-6.6/arch/s390/boot/ |
H A D | vmem.c | 149 IS_ALIGNED(addr, PUD_SIZE) && end - addr >= PUD_SIZE) { in kasan_pud_populate_zero_shadow() 274 IS_ALIGNED(addr, PUD_SIZE) && (end - addr) >= PUD_SIZE; in can_large_pud()
|
/kernel/linux/linux-5.10/mm/kasan/ |
H A D | init.c | 154 if (IS_ALIGNED(addr, PUD_SIZE) && end - addr >= PUD_SIZE) { in zero_pud_populate() 411 if (IS_ALIGNED(addr, PUD_SIZE) && in kasan_remove_pud_table() 412 IS_ALIGNED(next, PUD_SIZE)) { in kasan_remove_pud_table()
|