/kernel/linux/linux-5.10/arch/s390/mm/ |
H A D | vmem.c | 79 * from unused_pmd_start to next PMD_SIZE boundary. 88 ALIGN(unused_pmd_start, PMD_SIZE) - unused_pmd_start); in vmemmap_flush_unused_pmd() 111 if (likely(IS_ALIGNED(unused_pmd_start, PMD_SIZE))) in vmemmap_use_sub_pmd() 121 void *page = __va(ALIGN_DOWN(start, PMD_SIZE)); in vmemmap_use_new_sub_pmd() 129 if (!IS_ALIGNED(start, PMD_SIZE)) in vmemmap_use_new_sub_pmd() 136 if (!IS_ALIGNED(end, PMD_SIZE)) in vmemmap_use_new_sub_pmd() 143 void *page = __va(ALIGN_DOWN(start, PMD_SIZE)); in vmemmap_unuse_sub_pmd() 147 return !memchr_inv(page, PAGE_UNUSED, PMD_SIZE); in vmemmap_unuse_sub_pmd() 227 if (IS_ALIGNED(addr, PMD_SIZE) && in modify_pmd_table() 228 IS_ALIGNED(next, PMD_SIZE)) { in modify_pmd_table() [all...] |
/kernel/linux/linux-6.6/arch/riscv/mm/ |
H A D | init.c | 209 * Make sure we align the reservation on PMD_SIZE since we will in setup_bootmem() 214 vmlinux_end = (vmlinux_end + PMD_SIZE - 1) & PMD_MASK; in setup_bootmem() 460 if (sz == PMD_SIZE) { in create_pmd_mapping() 679 if (!(pa & (PMD_SIZE - 1)) && !(va & (PMD_SIZE - 1)) && size >= PMD_SIZE) in best_map_size() 680 return PMD_SIZE; in best_map_size() 797 PMD_SIZE, PAGE_KERNEL_EXEC); in set_satp_mode() 799 set_satp_mode_pmd + PMD_SIZE, in set_satp_mode() 800 set_satp_mode_pmd + PMD_SIZE, in set_satp_mode() [all...] |
/kernel/linux/linux-6.6/arch/x86/mm/ |
H A D | init.c | 169 * the 0-ISA_END_ADDRESS range and secondly for the initial PMD_SIZE mapping. 355 unsigned long start = round_down(mr[i].start, PMD_SIZE); in adjust_range_page_size_mask() 356 unsigned long end = round_up(mr[i].end, PMD_SIZE); in adjust_range_page_size_mask() 422 end_pfn = PFN_DOWN(PMD_SIZE); in split_mem_range() 424 end_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE)); in split_mem_range() 426 end_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE)); in split_mem_range() 436 start_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE)); in split_mem_range() 438 end_pfn = round_down(limit_pfn, PFN_DOWN(PMD_SIZE)); in split_mem_range() 441 if (end_pfn > round_down(limit_pfn, PFN_DOWN(PMD_SIZE))) in split_mem_range() 442 end_pfn = round_down(limit_pfn, PFN_DOWN(PMD_SIZE)); in split_mem_range() [all...] |
H A D | init_64.c | 378 for (; size; phys += PMD_SIZE, size -= PMD_SIZE) { in __init_extra_mapping() 430 unsigned long end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1; in cleanup_highmap() 441 for (; vaddr + PMD_SIZE - 1 < vaddr_end; pmd++, vaddr += PMD_SIZE) { in cleanup_highmap() 521 paddr_next = (paddr & PMD_MASK) + PMD_SIZE; in phys_pmd_init() 835 * from unused_pmd_start to next PMD_SIZE boundary. 847 ALIGN(unused_pmd_start, PMD_SIZE) - unused_pmd_start); in vmemmap_flush_unused_pmd() 855 unsigned long start = ALIGN_DOWN(addr, PMD_SIZE); in vmemmap_pmd_is_unused() 864 return !memchr_inv((void *)start, PAGE_UNUSED, PMD_SIZE); in vmemmap_pmd_is_unused() [all...] |
H A D | mem_encrypt_identity.c | 96 static char sme_workarea[2 * PMD_SIZE] __section(".init.scratch"); 201 ppd->vaddr += PMD_SIZE; in __sme_map_range_pmd() 202 ppd->paddr += PMD_SIZE; in __sme_map_range_pmd() 228 ppd->vaddr_end = ALIGN(ppd->vaddr, PMD_SIZE); in __sme_map_range() 328 kernel_end = ALIGN(__pa_symbol(_end), PMD_SIZE); in sme_encrypt_kernel() 358 * intermediate copy buffer (PMD_SIZE) in sme_encrypt_kernel() 363 execute_end = execute_start + (PAGE_SIZE * 2) + PMD_SIZE; in sme_encrypt_kernel() 386 workarea_end = ALIGN(workarea_start + workarea_len, PMD_SIZE); in sme_encrypt_kernel()
|
/kernel/linux/linux-5.10/arch/x86/mm/ |
H A D | init.c | 173 * By default need 3 4k for initial PMD_SIZE, 3 4k for 0-ISA_END_ADDRESS. 360 unsigned long start = round_down(mr[i].start, PMD_SIZE); in adjust_range_page_size_mask() 361 unsigned long end = round_up(mr[i].end, PMD_SIZE); in adjust_range_page_size_mask() 427 end_pfn = PFN_DOWN(PMD_SIZE); in split_mem_range() 429 end_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE)); in split_mem_range() 431 end_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE)); in split_mem_range() 441 start_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE)); in split_mem_range() 443 end_pfn = round_down(limit_pfn, PFN_DOWN(PMD_SIZE)); in split_mem_range() 446 if (end_pfn > round_down(limit_pfn, PFN_DOWN(PMD_SIZE))) in split_mem_range() 447 end_pfn = round_down(limit_pfn, PFN_DOWN(PMD_SIZE)); in split_mem_range() [all...] |
H A D | init_64.c | 372 for (; size; phys += PMD_SIZE, size -= PMD_SIZE) { in __init_extra_mapping() 424 unsigned long end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1; in cleanup_highmap() 435 for (; vaddr + PMD_SIZE - 1 < vaddr_end; pmd++, vaddr += PMD_SIZE) { in cleanup_highmap() 515 paddr_next = (paddr & PMD_MASK) + PMD_SIZE; in phys_pmd_init() 900 vmem_altmap_free(altmap, PMD_SIZE / PAGE_SIZE); in free_hugepage_table() 902 free_pagetable(page, get_order(PMD_SIZE)); in free_hugepage_table() 1047 if (IS_ALIGNED(addr, PMD_SIZE) && in remove_pmd_table() 1048 IS_ALIGNED(next, PMD_SIZE)) { in remove_pmd_table() [all...] |
/kernel/linux/linux-6.6/arch/s390/mm/ |
H A D | vmem.c | 85 * from unused_sub_pmd_start to next PMD_SIZE boundary. 94 ALIGN(unused_sub_pmd_start, PMD_SIZE) - unused_sub_pmd_start); in vmemmap_flush_unused_sub_pmd() 117 if (likely(IS_ALIGNED(unused_sub_pmd_start, PMD_SIZE))) in vmemmap_use_sub_pmd() 127 unsigned long page = ALIGN_DOWN(start, PMD_SIZE); in vmemmap_use_new_sub_pmd() 135 if (!IS_ALIGNED(start, PMD_SIZE)) in vmemmap_use_new_sub_pmd() 142 if (!IS_ALIGNED(end, PMD_SIZE)) in vmemmap_use_new_sub_pmd() 149 unsigned long page = ALIGN_DOWN(start, PMD_SIZE); in vmemmap_unuse_sub_pmd() 153 return !memchr_inv((void *)page, PAGE_UNUSED, PMD_SIZE); in vmemmap_unuse_sub_pmd() 233 if (IS_ALIGNED(addr, PMD_SIZE) && in modify_pmd_table() 234 IS_ALIGNED(next, PMD_SIZE)) { in modify_pmd_table() [all...] |
/kernel/linux/linux-5.10/arch/m68k/mm/ |
H A D | kmap.c | 51 #define IO_SIZE PMD_SIZE 86 virtaddr += PMD_SIZE; in __free_io_area() 87 size -= PMD_SIZE; in __free_io_area() 249 if (!(virtaddr & (PMD_SIZE-1))) in __ioremap() 264 physaddr += PMD_SIZE; in __ioremap() 265 virtaddr += PMD_SIZE; in __ioremap() 266 size -= PMD_SIZE; in __ioremap() 371 virtaddr += PMD_SIZE; in kernel_set_cachemode() 372 size -= PMD_SIZE; in kernel_set_cachemode()
|
/kernel/linux/linux-6.6/arch/m68k/mm/ |
H A D | kmap.c | 50 #define IO_SIZE PMD_SIZE 85 virtaddr += PMD_SIZE; in __free_io_area() 86 size -= PMD_SIZE; in __free_io_area() 254 if (!(virtaddr & (PMD_SIZE-1))) in __ioremap() 269 physaddr += PMD_SIZE; in __ioremap() 270 virtaddr += PMD_SIZE; in __ioremap() 271 size -= PMD_SIZE; in __ioremap() 379 virtaddr += PMD_SIZE; in kernel_set_cachemode() 380 size -= PMD_SIZE; in kernel_set_cachemode()
|
/kernel/linux/linux-5.10/arch/riscv/mm/ |
H A D | init.c | 178 * Make sure we align the reservation on PMD_SIZE since we will in setup_bootmem() 182 vmlinux_end = (vmlinux_end + PMD_SIZE - 1) & PMD_MASK; in setup_bootmem() 367 if (sz == PMD_SIZE) { in create_pmd_mapping() 430 /* Upgrade to PMD_SIZE mappings whenever possible */ in best_map_size() 431 if ((base & (PMD_SIZE - 1)) || (size & (PMD_SIZE - 1))) in best_map_size() 434 return PMD_SIZE; in best_map_size() 489 map_size = PMD_SIZE; in setup_vm() 508 (uintptr_t)fixmap_pte, PMD_SIZE, PAGE_TABLE); in setup_vm() 513 load_pa, PMD_SIZE, PAGE_KERNEL_EXE in setup_vm() [all...] |
/kernel/linux/linux-5.10/arch/sh/include/asm/ |
H A D | pgtable-3level.h | 23 #define PMD_SIZE (1UL << PMD_SHIFT) macro 24 #define PMD_MASK (~(PMD_SIZE-1)) 26 #define PTRS_PER_PMD ((1 << PGDIR_SHIFT) / PMD_SIZE)
|
/kernel/linux/linux-6.6/arch/sh/include/asm/ |
H A D | pgtable-3level.h | 23 #define PMD_SIZE (1UL << PMD_SHIFT) macro 24 #define PMD_MASK (~(PMD_SIZE-1)) 26 #define PTRS_PER_PMD ((1 << PGDIR_SHIFT) / PMD_SIZE)
|
/kernel/linux/linux-6.6/arch/arm64/mm/ |
H A D | hugetlbpage.c | 67 case PMD_SIZE: in __hugetlb_valid_size() 116 *pgsize = PMD_SIZE; in find_num_contig() 135 case PMD_SIZE: in num_contig_ptes() 139 *pgsize = PMD_SIZE; in num_contig_ptes() 300 } else if (sz == PMD_SIZE) { in huge_pte_alloc() 344 if (!(sz == PMD_SIZE || sz == CONT_PMD_SIZE) && in huge_pte_offset() 367 case PMD_SIZE: in hugetlb_mask_last_page() 368 return PUD_SIZE - PMD_SIZE; in hugetlb_mask_last_page() 370 return PMD_SIZE - CONT_PTE_SIZE; in hugetlb_mask_last_page() 387 } else if (pagesize != PUD_SIZE && pagesize != PMD_SIZE) { in arch_make_huge_pte() [all...] |
/kernel/linux/linux-5.10/arch/loongarch/include/asm/ |
H A D | pgtable-64.h | 30 #define PMD_SIZE (1UL << PMD_SHIFT) macro 31 #define PMD_MASK (~(PMD_SIZE-1)) 35 #define PMD_SIZE (1UL << PMD_SHIFT) macro 36 #define PMD_MASK (~(PMD_SIZE-1)) 78 min(PTRS_PER_PGD * PTRS_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, (1UL << cpu_vabits)) - PMD_SIZE - VMEMMAP_SIZE) 82 min(PTRS_PER_PGD * PTRS_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, (1UL << cpu_vabits) / 2) - PMD_SIZE - VMEMMAP_SIZE) 85 #define vmemmap ((struct page *)((VMALLOC_END + PMD_SIZE) & PMD_MASK))
|
/kernel/linux/linux-5.10/arch/x86/include/asm/ |
H A D | pgtable_32_types.h | 12 # define PMD_SIZE (1UL << PMD_SHIFT) macro 13 # define PMD_MASK (~(PMD_SIZE - 1))
|
/kernel/linux/linux-6.6/arch/x86/include/asm/ |
H A D | pgtable_32_types.h | 12 # define PMD_SIZE (1UL << PMD_SHIFT) macro 13 # define PMD_MASK (~(PMD_SIZE - 1))
|
/kernel/linux/linux-5.10/arch/arm64/mm/ |
H A D | hugetlbpage.c | 68 case PMD_SIZE: in arch_hugetlb_migration_supported() 116 *pgsize = PMD_SIZE; in find_num_contig() 132 case PMD_SIZE: in num_contig_ptes() 136 *pgsize = PMD_SIZE; in num_contig_ptes() 286 } else if (sz == PMD_SIZE) { in huge_pte_alloc() 331 if (!(sz == PMD_SIZE || sz == CONT_PMD_SIZE) && in huge_pte_offset() 352 } else if (pagesize != PUD_SIZE && pagesize != PMD_SIZE) { in arch_make_huge_pte() 512 case PMD_SIZE: in arch_hugetlb_valid_size()
|
/kernel/linux/linux-5.10/arch/parisc/kernel/ |
H A D | pci-dma.c | 85 if (end > PMD_SIZE) in map_pte_uncached() 86 end = PMD_SIZE; in map_pte_uncached() 120 vaddr = (vaddr + PMD_SIZE) & PMD_MASK; in map_pmd_uncached() 121 orig_vaddr += PMD_SIZE; in map_pmd_uncached() 170 if (end > PMD_SIZE) in unmap_uncached_pte() 171 end = PMD_SIZE; in unmap_uncached_pte() 210 vaddr = (vaddr + PMD_SIZE) & PMD_MASK; in unmap_uncached_pmd() 211 orig_vaddr += PMD_SIZE; in unmap_uncached_pmd()
|
/kernel/linux/linux-6.6/arch/parisc/kernel/ |
H A D | pci-dma.c | 85 if (end > PMD_SIZE) in map_pte_uncached() 86 end = PMD_SIZE; in map_pte_uncached() 120 vaddr = (vaddr + PMD_SIZE) & PMD_MASK; in map_pmd_uncached() 121 orig_vaddr += PMD_SIZE; in map_pmd_uncached() 170 if (end > PMD_SIZE) in unmap_uncached_pte() 171 end = PMD_SIZE; in unmap_uncached_pte() 210 vaddr = (vaddr + PMD_SIZE) & PMD_MASK; in unmap_uncached_pmd() 211 orig_vaddr += PMD_SIZE; in unmap_uncached_pmd()
|
/kernel/linux/linux-6.6/arch/powerpc/mm/book3s64/ |
H A D | radix_pgtable.c | 100 if (map_page_size == PMD_SIZE) { in early_map_kernel_page() 163 if (map_page_size == PMD_SIZE) { in __map_kernel_page() 323 } else if (IS_ALIGNED(addr, PMD_SIZE) && gap >= PMD_SIZE && in create_physical_mapping() 325 mapping_size = PMD_SIZE; in create_physical_mapping() 676 unsigned long start = ALIGN_DOWN(addr, PMD_SIZE); in vmemmap_pmd_is_unused() 678 return !vmemmap_populated(start, PMD_SIZE); in vmemmap_pmd_is_unused() 771 if (IS_ALIGNED(addr, PMD_SIZE) && in remove_pmd_table() 772 IS_ALIGNED(next, PMD_SIZE)) { in remove_pmd_table() 774 free_vmemmap_pages(pmd_page(*pmd), altmap, get_order(PMD_SIZE)); in remove_pmd_table() [all...] |
/kernel/linux/linux-6.6/arch/loongarch/include/asm/ |
H A D | pgtable.h | 29 #define PMD_SIZE (1UL << PMD_SHIFT) macro 30 #define PMD_MASK (~(PMD_SIZE-1)) 34 #define PMD_SIZE (1UL << PMD_SHIFT) macro 35 #define PMD_MASK (~(PMD_SIZE-1)) 96 min(PTRS_PER_PGD * PTRS_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, (1UL << cpu_vabits)) - PMD_SIZE - VMEMMAP_SIZE - KFENCE_AREA_SIZE) 100 min(PTRS_PER_PGD * PTRS_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, (1UL << cpu_vabits) / 2) - PMD_SIZE - VMEMMAP_SIZE - KFENCE_AREA_SIZE) 103 #define vmemmap ((struct page *)((VMALLOC_END + PMD_SIZE) & PMD_MASK))
|
/kernel/linux/linux-5.10/arch/riscv/include/asm/ |
H A D | pgtable-64.h | 18 #define PMD_SIZE (_AC(1, UL) << PMD_SHIFT) macro 19 #define PMD_MASK (~(PMD_SIZE - 1))
|
/kernel/linux/linux-5.10/include/asm-generic/ |
H A D | pgtable-nopmd.h | 22 #define PMD_SIZE (1UL << PMD_SHIFT) macro 23 #define PMD_MASK (~(PMD_SIZE-1))
|
/kernel/linux/linux-6.6/include/asm-generic/ |
H A D | pgtable-nopmd.h | 22 #define PMD_SIZE (1UL << PMD_SHIFT) macro 23 #define PMD_MASK (~(PMD_SIZE-1))
|