/kernel/linux/linux-6.6/arch/arm64/include/asm/ |
H A D | memory.h | 41 * VA_BITS - the maximum number of bits for virtual addresses. 43 #define VA_BITS (CONFIG_ARM64_VA_BITS) macro 45 #define PAGE_OFFSET (_PAGE_OFFSET(VA_BITS)) 50 #define VMEMMAP_START (-(UL(1) << (VA_BITS - VMEMMAP_SHIFT))) 56 #if VA_BITS > 48 59 #define VA_BITS_MIN (VA_BITS) 186 #if VA_BITS > 48 189 #define vabits_actual ((u64)VA_BITS)
|
H A D | kernel-pgtable.h | 89 #if VA_BITS < 48
|
H A D | processor.h | 55 #define TASK_SIZE_MAX (UL(1) << VA_BITS)
|
H A D | pgtable-hwdef.h | 72 #define PTRS_PER_PGD (1 << (VA_BITS - PGDIR_SHIFT))
|
/kernel/linux/linux-5.10/arch/sparc/include/asm/ |
H A D | processor_64.h | 23 #define VA_BITS 44 macro 25 #define VPTE_SIZE (1UL << (VA_BITS - PAGE_SHIFT + 3)) 27 #define VPTE_SIZE (1 << (VA_BITS - PAGE_SHIFT + 3))
|
/kernel/linux/linux-6.6/arch/riscv/include/asm/ |
H A D | processor.h | 28 else if ((((_addr) >= VA_USER_SV48)) && (VA_BITS >= VA_BITS_SV48)) \ 43 else if (((_addr) >= VA_USER_SV57) && (VA_BITS >= VA_BITS_SV57)) \ 45 else if ((((_addr) >= VA_USER_SV48)) && (VA_BITS >= VA_BITS_SV48)) \
|
H A D | pgtable.h | 71 #define VA_BITS (pgtable_l5_enabled ? \ macro 74 #define VA_BITS VA_BITS_SV32 macro 78 (VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT) 131 #define MMAP_VA_BITS_64 ((VA_BITS >= VA_BITS_SV48) ? VA_BITS_SV48 : VA_BITS) 136 #define MMAP_VA_BITS ((VA_BITS >= VA_BITS_SV48) ? VA_BITS_SV48 : VA_BITS) 864 #define KERN_VIRT_START (-(BIT(VA_BITS)) + TASK_SIZE)
|
H A D | kasan.h | 30 #define KASAN_SHADOW_SIZE (UL(1) << ((VA_BITS - 1) - KASAN_SHADOW_SCALE_SHIFT))
|
/kernel/linux/linux-6.6/arch/sparc/include/asm/ |
H A D | processor_64.h | 23 #define VA_BITS 44 macro 25 #define VPTE_SIZE (1UL << (VA_BITS - PAGE_SHIFT + 3)) 27 #define VPTE_SIZE (1 << (VA_BITS - PAGE_SHIFT + 3))
|
/kernel/linux/linux-5.10/arch/arm64/include/asm/ |
H A D | memory.h | 41 * VA_BITS - the maximum number of bits for virtual addresses. 43 #define VA_BITS (CONFIG_ARM64_VA_BITS) macro 45 #define PAGE_OFFSET (_PAGE_OFFSET(VA_BITS)) 59 #if VA_BITS > 48 62 #define VA_BITS_MIN (VA_BITS)
|
H A D | processor.h | 12 #define USER_DS ((UL(1) << VA_BITS) - 1)
|
H A D | pgtable-hwdef.h | 72 #define PTRS_PER_PGD (1 << (VA_BITS - PGDIR_SHIFT))
|
/kernel/linux/linux-5.10/arch/powerpc/include/asm/book3s/64/ |
H A D | mmu-hash.h | 566 #define VA_BITS 68 macro 568 #define ESID_BITS (VA_BITS - (SID_SHIFT + CONTEXT_BITS)) 569 #define ESID_BITS_1T (VA_BITS - (SID_SHIFT_1T + CONTEXT_BITS)) 639 #define VSID_BITS_256M (VA_BITS - SID_SHIFT) 647 #define VSID_BITS_1T (VA_BITS - SID_SHIFT_1T) 769 unsigned long va_bits = VA_BITS; in get_vsid()
|
/kernel/linux/linux-6.6/arch/powerpc/include/asm/book3s/64/ |
H A D | mmu-hash.h | 578 #define VA_BITS 68 macro 580 #define ESID_BITS (VA_BITS - (SID_SHIFT + CONTEXT_BITS)) 581 #define ESID_BITS_1T (VA_BITS - (SID_SHIFT_1T + CONTEXT_BITS)) 651 #define VSID_BITS_256M (VA_BITS - SID_SHIFT) 659 #define VSID_BITS_1T (VA_BITS - SID_SHIFT_1T) 781 unsigned long va_bits = VA_BITS; in get_vsid()
|
/kernel/linux/linux-6.6/scripts/gdb/linux/ |
H A D | mm.py | 48 self.VA_BITS = constants.LX_CONFIG_ARM64_VA_BITS 49 if self.VA_BITS > 48: 53 self.VA_BITS_MIN = self.VA_BITS 54 self.vabits_actual = self.VA_BITS 88 self.PAGE_OFFSET = self._PAGE_OFFSET(self.VA_BITS) 94 self.VMEMMAP_START = (-(1 << (self.VA_BITS - self.VMEMMAP_SHIFT))) & 0xffffffffffffffff
|
/kernel/linux/linux-6.6/arch/arm64/kernel/ |
H A D | head.S | 114 #if VA_BITS > 48 117 mov x0, #VA_BITS 344 * - 39 <= VA_BITS < 48, and the ID map needs up to 48 VA bits to cover 349 * - VA_BITS == 48, and the ID map needs more than 48 VA bits. This can 356 * translation are not supported, e.g., VA_BITS==36 on 16k pages, or 357 * VA_BITS==39/4k pages with 5-level paging, where the input address 360 #if (VA_BITS < 48) 361 #define IDMAP_PGD_ORDER (VA_BITS - PGDIR_SHIFT) 365 * If VA_BITS < 48, we have to configure an additional table level. 367 * VA_BITS wa [all...] |
H A D | crash_core.c | 22 VMCOREINFO_NUMBER(VA_BITS); in arch_crash_save_vmcoreinfo()
|
/kernel/linux/linux-5.10/arch/loongarch/include/asm/ |
H A D | processor.h | 32 #define TASK_SIZE64 (0x1UL << ((cpu_vabits > VA_BITS) ? VA_BITS : cpu_vabits))
|
H A D | pgtable-64.h | 46 #define VA_BITS (PGDIR_SHIFT + (PAGE_SHIFT + PGD_ORDER - 3)) macro
|
/kernel/linux/linux-6.6/arch/loongarch/include/asm/ |
H A D | processor.h | 33 #define TASK_SIZE64 (0x1UL << ((cpu_vabits > VA_BITS) ? VA_BITS : cpu_vabits))
|
/kernel/linux/linux-5.10/arch/arm64/kernel/ |
H A D | crash_core.c | 21 VMCOREINFO_NUMBER(VA_BITS); in arch_crash_save_vmcoreinfo()
|
/kernel/linux/linux-6.6/arch/riscv/kernel/ |
H A D | crash_core.c | 14 VMCOREINFO_NUMBER(VA_BITS); in arch_crash_save_vmcoreinfo()
|
/kernel/linux/linux-5.10/arch/arm64/mm/ |
H A D | kasan_init.c | 170 BUILD_BUG_ON(!IS_ALIGNED(_KASAN_SHADOW_START(VA_BITS), PGDIR_SIZE)); in kasan_early_init()
|
/kernel/linux/linux-6.6/arch/arm64/mm/ |
H A D | kasan_init.c | 173 BUILD_BUG_ON(!IS_ALIGNED(_KASAN_SHADOW_START(VA_BITS), PGDIR_SIZE)); in kasan_early_init()
|
H A D | mmu.c | 50 #if VA_BITS > 48 768 if (VA_BITS < 48 && idmap_t0sz < (64 - VA_BITS_MIN)) { in create_idmap() 770 set_pgd(&idmap_pg_dir[start >> VA_BITS], in create_idmap()
|