/kernel/linux/linux-5.10/include/asm-generic/ |
H A D | pgtable-nop4d.h | 9 typedef struct { pgd_t pgd; } p4d_t; member 20 * into the pgd entry) 22 static inline int pgd_none(pgd_t pgd) { return 0; } in pgd_none() argument 23 static inline int pgd_bad(pgd_t pgd) { return 0; } in pgd_bad() argument 24 static inline int pgd_present(pgd_t pgd) { return 1; } in pgd_present() argument 25 static inline void pgd_clear(pgd_t *pgd) { } in pgd_clear() argument 26 #define p4d_ERROR(p4d) (pgd_ERROR((p4d).pgd)) 28 #define pgd_populate(mm, pgd, p4d) do { } while (0) 29 #define pgd_populate_safe(mm, pgd, p4d) do { } while (0) 36 static inline p4d_t *p4d_offset(pgd_t *pgd, unsigne argument [all...] |
/kernel/linux/linux-6.6/include/asm-generic/ |
H A D | pgtable-nop4d.h | 9 typedef struct { pgd_t pgd; } p4d_t; member 19 * into the pgd entry) 21 static inline int pgd_none(pgd_t pgd) { return 0; } in pgd_none() argument 22 static inline int pgd_bad(pgd_t pgd) { return 0; } in pgd_bad() argument 23 static inline int pgd_present(pgd_t pgd) { return 1; } in pgd_present() argument 24 static inline void pgd_clear(pgd_t *pgd) { } in pgd_clear() argument 25 #define p4d_ERROR(p4d) (pgd_ERROR((p4d).pgd)) 27 #define pgd_populate(mm, pgd, p4d) do { } while (0) 28 #define pgd_populate_safe(mm, pgd, p4d) do { } while (0) 35 static inline p4d_t *p4d_offset(pgd_t *pgd, unsigne argument [all...] |
/kernel/linux/linux-5.10/arch/x86/mm/ |
H A D | pgtable.c | 93 static inline void pgd_list_add(pgd_t *pgd) in pgd_list_add() argument 95 struct page *page = virt_to_page(pgd); in pgd_list_add() 100 static inline void pgd_list_del(pgd_t *pgd) in pgd_list_del() argument 102 struct page *page = virt_to_page(pgd); in pgd_list_del() 113 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm) in pgd_set_mm() argument 115 virt_to_page(pgd)->pt_mm = mm; in pgd_set_mm() 123 static void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) in pgd_ctor() argument 125 /* If the pgd points to a shared pagetable level (either the in pgd_ctor() 131 clone_pgd_range(pgd + KERNEL_PGD_BOUNDARY, in pgd_ctor() 138 pgd_set_mm(pgd, m in pgd_ctor() 143 pgd_dtor(pgd_t *pgd) pgd_dtor() argument 264 pgd_t pgd = *pgdp; mop_up_one_pmd() local 296 pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[]) pgd_prepopulate_pmd() argument 403 _pgd_free(pgd_t *pgd) _pgd_free() argument 418 _pgd_free(pgd_t *pgd) _pgd_free() argument 426 pgd_t *pgd; pgd_alloc() local 471 pgd_free(struct mm_struct *mm, pgd_t *pgd) pgd_free() argument [all...] |
H A D | kasan_init_64.c | 125 static void __init kasan_populate_pgd(pgd_t *pgd, unsigned long addr, in kasan_populate_pgd() argument 132 if (pgd_none(*pgd)) { in kasan_populate_pgd() 134 pgd_populate(&init_mm, pgd, p); in kasan_populate_pgd() 137 p4d = p4d_offset(pgd, addr); in kasan_populate_pgd() 147 pgd_t *pgd; in kasan_populate_shadow() local 152 pgd = pgd_offset_k(addr); in kasan_populate_shadow() 155 kasan_populate_pgd(pgd, addr, next, nid); in kasan_populate_shadow() 156 } while (pgd++, addr = next, addr != end); in kasan_populate_shadow() 173 pgd_t *pgd; in clear_pgds() local 178 pgd in clear_pgds() 194 early_p4d_offset(pgd_t *pgd, unsigned long addr) early_p4d_offset() argument 206 kasan_early_p4d_populate(pgd_t *pgd, unsigned long addr, unsigned long end) kasan_early_p4d_populate() argument 233 kasan_map_early_shadow(pgd_t *pgd) kasan_map_early_shadow() argument 247 kasan_shallow_populate_p4ds(pgd_t *pgd, unsigned long addr, unsigned long end) kasan_shallow_populate_p4ds() argument 269 pgd_t *pgd; kasan_shallow_populate_pgds() local [all...] |
H A D | pti.c | 124 pgd_t __pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd) in __pti_set_user_pgtbl() argument 132 * Top-level entries added to init_mm's usermode pgd after boot in __pti_set_user_pgtbl() 136 return pgd; in __pti_set_user_pgtbl() 142 kernel_to_user_pgdp(pgdp)->pgd = pgd.pgd; in __pti_set_user_pgtbl() 155 * - we're clearing the PGD (i.e. the new pgd is not present). in __pti_set_user_pgtbl() 157 if ((pgd.pgd & (_PAGE_USER|_PAGE_PRESENT)) == (_PAGE_USER|_PAGE_PRESENT) && in __pti_set_user_pgtbl() 159 pgd in __pti_set_user_pgtbl() 173 pgd_t *pgd = kernel_to_user_pgdp(pgd_offset_k(address)); pti_user_pagetable_walk_p4d() local 315 pgd_t *pgd; pti_clone_pgtable() local [all...] |
/kernel/linux/linux-5.10/arch/powerpc/include/asm/book3s/64/ |
H A D | pgalloc.h | 39 static inline void radix__pgd_free(struct mm_struct *mm, pgd_t *pgd) in radix__pgd_free() argument 42 free_page((unsigned long)pgd); in radix__pgd_free() 44 free_pages((unsigned long)pgd, 4); in radix__pgd_free() 50 pgd_t *pgd; in pgd_alloc() local 55 pgd = kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE), in pgd_alloc() 57 if (unlikely(!pgd)) in pgd_alloc() 58 return pgd; in pgd_alloc() 65 kmemleak_no_scan(pgd); in pgd_alloc() 76 memset(pgd, 0, PGD_TABLE_SIZE); in pgd_alloc() 78 return pgd; in pgd_alloc() 81 pgd_free(struct mm_struct *mm, pgd_t *pgd) pgd_free() argument 88 p4d_populate(struct mm_struct *mm, p4d_t *pgd, pud_t *pud) p4d_populate() argument [all...] |
/kernel/linux/linux-6.6/arch/powerpc/include/asm/book3s/64/ |
H A D | pgalloc.h | 39 static inline void radix__pgd_free(struct mm_struct *mm, pgd_t *pgd) in radix__pgd_free() argument 42 free_page((unsigned long)pgd); in radix__pgd_free() 44 free_pages((unsigned long)pgd, 4); in radix__pgd_free() 50 pgd_t *pgd; in pgd_alloc() local 55 pgd = kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE), in pgd_alloc() 57 if (unlikely(!pgd)) in pgd_alloc() 58 return pgd; in pgd_alloc() 65 kmemleak_no_scan(pgd); in pgd_alloc() 76 memset(pgd, 0, PGD_TABLE_SIZE); in pgd_alloc() 78 return pgd; in pgd_alloc() 81 pgd_free(struct mm_struct *mm, pgd_t *pgd) pgd_free() argument 88 p4d_populate(struct mm_struct *mm, p4d_t *pgd, pud_t *pud) p4d_populate() argument [all...] |
/kernel/linux/linux-5.10/arch/nds32/mm/ |
H A D | mm-nds32.c | 43 void pgd_free(struct mm_struct *mm, pgd_t * pgd) in pgd_free() argument 48 if (!pgd) in pgd_free() 51 pmd = (pmd_t *) pgd; in pgd_free() 62 dec_zone_page_state(virt_to_page((unsigned long *)pgd), NR_PAGETABLE); in pgd_free() 67 free_pages((unsigned long)pgd, 0); in pgd_free() 78 pgd_t *pgd; in setup_mm_for_reboot() local 84 if (current->mm && current->mm->pgd) in setup_mm_for_reboot() 85 pgd = current->mm->pgd; in setup_mm_for_reboot() 87 pgd in setup_mm_for_reboot() [all...] |
/kernel/linux/linux-5.10/arch/arm/mm/ |
H A D | pgd.c | 3 * linux/arch/arm/mm/pgd.c 21 #define __pgd_free(pgd) kfree(pgd) 24 #define __pgd_free(pgd) free_pages((unsigned long)pgd, 2) 144 pgd_t *pgd; in pgd_free() local 153 pgd = pgd_base + pgd_index(0); in pgd_free() 154 if (pgd_none_or_clear_bad(pgd)) in pgd_free() 157 p4d = p4d_offset(pgd, 0); in pgd_free() 181 pgd_clear(pgd); in pgd_free() [all...] |
/kernel/linux/linux-6.6/arch/arm/mm/ |
H A D | pgd.c | 3 * linux/arch/arm/mm/pgd.c 21 #define __pgd_free(pgd) kfree(pgd) 24 #define __pgd_free(pgd) free_pages((unsigned long)pgd, 2) 144 pgd_t *pgd; in pgd_free() local 153 pgd = pgd_base + pgd_index(0); in pgd_free() 154 if (pgd_none_or_clear_bad(pgd)) in pgd_free() 157 p4d = p4d_offset(pgd, 0); in pgd_free() 181 pgd_clear(pgd); in pgd_free() [all...] |
/kernel/linux/linux-6.6/arch/x86/mm/ |
H A D | kasan_init_64.c | 123 static void __init kasan_populate_pgd(pgd_t *pgd, unsigned long addr, in kasan_populate_pgd() argument 130 if (pgd_none(*pgd)) { in kasan_populate_pgd() 132 pgd_populate(&init_mm, pgd, p); in kasan_populate_pgd() 135 p4d = p4d_offset(pgd, addr); in kasan_populate_pgd() 145 pgd_t *pgd; in kasan_populate_shadow() local 150 pgd = pgd_offset_k(addr); in kasan_populate_shadow() 153 kasan_populate_pgd(pgd, addr, next, nid); in kasan_populate_shadow() 154 } while (pgd++, addr = next, addr != end); in kasan_populate_shadow() 171 pgd_t *pgd; in clear_pgds() local 176 pgd in clear_pgds() 192 early_p4d_offset(pgd_t *pgd, unsigned long addr) early_p4d_offset() argument 204 kasan_early_p4d_populate(pgd_t *pgd, unsigned long addr, unsigned long end) kasan_early_p4d_populate() argument 231 kasan_map_early_shadow(pgd_t *pgd) kasan_map_early_shadow() argument 245 kasan_shallow_populate_p4ds(pgd_t *pgd, unsigned long addr, unsigned long end) kasan_shallow_populate_p4ds() argument 267 pgd_t *pgd; kasan_shallow_populate_pgds() local [all...] |
H A D | pgtable.c | 93 static inline void pgd_list_add(pgd_t *pgd) in pgd_list_add() argument 95 struct ptdesc *ptdesc = virt_to_ptdesc(pgd); in pgd_list_add() 100 static inline void pgd_list_del(pgd_t *pgd) in pgd_list_del() argument 102 struct ptdesc *ptdesc = virt_to_ptdesc(pgd); in pgd_list_del() 113 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm) in pgd_set_mm() argument 115 virt_to_ptdesc(pgd)->pt_mm = mm; in pgd_set_mm() 123 static void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) in pgd_ctor() argument 125 /* If the pgd points to a shared pagetable level (either the in pgd_ctor() 131 clone_pgd_range(pgd + KERNEL_PGD_BOUNDARY, in pgd_ctor() 138 pgd_set_mm(pgd, m in pgd_ctor() 143 pgd_dtor(pgd_t *pgd) pgd_dtor() argument 273 pgd_t pgd = *pgdp; mop_up_one_pmd() local 305 pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[]) pgd_prepopulate_pmd() argument 409 _pgd_free(pgd_t *pgd) _pgd_free() argument 424 _pgd_free(pgd_t *pgd) _pgd_free() argument 432 pgd_t *pgd; pgd_alloc() local 484 pgd_free(struct mm_struct *mm, pgd_t *pgd) pgd_free() argument [all...] |
H A D | pti.c | 124 pgd_t __pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd) in __pti_set_user_pgtbl() argument 132 * Top-level entries added to init_mm's usermode pgd after boot in __pti_set_user_pgtbl() 136 return pgd; in __pti_set_user_pgtbl() 142 kernel_to_user_pgdp(pgdp)->pgd = pgd.pgd; in __pti_set_user_pgtbl() 155 * - we're clearing the PGD (i.e. the new pgd is not present). in __pti_set_user_pgtbl() 157 if ((pgd.pgd & (_PAGE_USER|_PAGE_PRESENT)) == (_PAGE_USER|_PAGE_PRESENT) && in __pti_set_user_pgtbl() 159 pgd in __pti_set_user_pgtbl() 173 pgd_t *pgd = kernel_to_user_pgdp(pgd_offset_k(address)); pti_user_pagetable_walk_p4d() local 315 pgd_t *pgd; pti_clone_pgtable() local [all...] |
/kernel/linux/linux-6.6/arch/riscv/include/asm/ |
H A D | pgtable-64.h | 27 /* p4d is folded into pgd in case of 4-level page table */ 36 /* pud is folded into pgd in case of 3-level page table */ 347 static inline void set_pgd(pgd_t *pgdp, pgd_t pgd) in set_pgd() argument 350 *pgdp = pgd; in set_pgd() 352 set_p4d((p4d_t *)pgdp, (p4d_t){ pgd_val(pgd) }); in set_pgd() 355 static inline int pgd_none(pgd_t pgd) in pgd_none() argument 358 return (pgd_val(pgd) == 0); in pgd_none() 363 static inline int pgd_present(pgd_t pgd) in pgd_present() argument 366 return (pgd_val(pgd) & _PAGE_PRESENT); in pgd_present() 371 static inline int pgd_bad(pgd_t pgd) in pgd_bad() argument 379 pgd_clear(pgd_t *pgd) pgd_clear() argument 385 pgd_pgtable(pgd_t pgd) pgd_pgtable() argument 394 pgd_page(pgd_t pgd) pgd_page() argument 403 p4d_offset(pgd_t *pgd, unsigned long address) p4d_offset() argument [all...] |
H A D | pgalloc.h | 62 static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d) in pgd_populate() argument 67 set_pgd(pgd, __pgd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE)); in pgd_populate() 71 static inline void pgd_populate_safe(struct mm_struct *mm, pgd_t *pgd, in pgd_populate_safe() argument 77 set_pgd_safe(pgd, in pgd_populate_safe() 130 static inline void sync_kernel_mappings(pgd_t *pgd) in sync_kernel_mappings() argument 132 memcpy(pgd + USER_PTRS_PER_PGD, in sync_kernel_mappings() 133 init_mm.pgd + USER_PTRS_PER_PGD, in sync_kernel_mappings() 139 pgd_t *pgd; in pgd_alloc() local 141 pgd = (pgd_t *)__get_free_page(GFP_KERNEL); in pgd_alloc() 142 if (likely(pgd ! in pgd_alloc() [all...] |
/kernel/linux/linux-5.10/arch/x86/power/ |
H A D | hibernate_32.c | 30 static pmd_t *resume_one_md_table_init(pgd_t *pgd) in resume_one_md_table_init() argument 41 set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT)); in resume_one_md_table_init() 42 p4d = p4d_offset(pgd, 0); in resume_one_md_table_init() 47 p4d = p4d_offset(pgd, 0); in resume_one_md_table_init() 84 pgd_t *pgd; in resume_physical_mapping_init() local 90 pgd = pgd_base + pgd_idx; in resume_physical_mapping_init() 93 for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) { in resume_physical_mapping_init() 94 pmd = resume_one_md_table_init(pgd); in resume_physical_mapping_init() 147 pgd_t *pgd; in set_up_temporary_text_mapping() local 151 pgd in set_up_temporary_text_mapping() [all...] |
/kernel/linux/linux-6.6/arch/x86/power/ |
H A D | hibernate_32.c | 30 static pmd_t *resume_one_md_table_init(pgd_t *pgd) in resume_one_md_table_init() argument 41 set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT)); in resume_one_md_table_init() 42 p4d = p4d_offset(pgd, 0); in resume_one_md_table_init() 47 p4d = p4d_offset(pgd, 0); in resume_one_md_table_init() 84 pgd_t *pgd; in resume_physical_mapping_init() local 90 pgd = pgd_base + pgd_idx; in resume_physical_mapping_init() 93 for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) { in resume_physical_mapping_init() 94 pmd = resume_one_md_table_init(pgd); in resume_physical_mapping_init() 147 pgd_t *pgd; in set_up_temporary_text_mapping() local 151 pgd in set_up_temporary_text_mapping() [all...] |
/kernel/linux/linux-5.10/arch/sh/mm/ |
H A D | hugetlbpage.c | 27 pgd_t *pgd; in huge_pte_alloc() local 33 pgd = pgd_offset(mm, addr); in huge_pte_alloc() 34 if (pgd) { in huge_pte_alloc() 35 p4d = p4d_alloc(mm, pgd, addr); in huge_pte_alloc() 52 pgd_t *pgd; in huge_pte_offset() local 58 pgd = pgd_offset(mm, addr); in huge_pte_offset() 59 if (pgd) { in huge_pte_offset() 60 p4d = p4d_offset(pgd, addr); in huge_pte_offset()
|
H A D | fault.c | 39 pgd_t *pgd; in show_pte() local 42 pgd = mm->pgd; in show_pte() 44 pgd = get_TTB(); in show_pte() 46 if (unlikely(!pgd)) in show_pte() 47 pgd = swapper_pg_dir; in show_pte() 50 pr_alert("pgd = %p\n", pgd); in show_pte() 51 pgd += pgd_index(addr); in show_pte() 52 pr_alert("[%08lx] *pgd in show_pte() 120 vmalloc_sync_one(pgd_t *pgd, unsigned long address) vmalloc_sync_one() argument [all...] |
/kernel/linux/linux-6.6/arch/sh/mm/ |
H A D | hugetlbpage.c | 27 pgd_t *pgd; in huge_pte_alloc() local 33 pgd = pgd_offset(mm, addr); in huge_pte_alloc() 34 if (pgd) { in huge_pte_alloc() 35 p4d = p4d_alloc(mm, pgd, addr); in huge_pte_alloc() 52 pgd_t *pgd; in huge_pte_offset() local 58 pgd = pgd_offset(mm, addr); in huge_pte_offset() 59 if (pgd) { in huge_pte_offset() 60 p4d = p4d_offset(pgd, addr); in huge_pte_offset()
|
H A D | fault.c | 39 pgd_t *pgd; in show_pte() local 42 pgd = mm->pgd; in show_pte() 44 pgd = get_TTB(); in show_pte() 46 if (unlikely(!pgd)) in show_pte() 47 pgd = swapper_pg_dir; in show_pte() 50 pr_alert("pgd = %p\n", pgd); in show_pte() 51 pgd += pgd_index(addr); in show_pte() 52 pr_alert("[%08lx] *pgd in show_pte() 120 vmalloc_sync_one(pgd_t *pgd, unsigned long address) vmalloc_sync_one() argument [all...] |
/kernel/linux/linux-5.10/arch/riscv/include/asm/ |
H A D | pgalloc.h | 45 pgd_t *pgd; in pgd_alloc() local 47 pgd = (pgd_t *)__get_free_page(GFP_KERNEL); in pgd_alloc() 48 if (likely(pgd != NULL)) { in pgd_alloc() 49 memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t)); in pgd_alloc() 51 memcpy(pgd + USER_PTRS_PER_PGD, in pgd_alloc() 52 init_mm.pgd + USER_PTRS_PER_PGD, in pgd_alloc() 55 return pgd; in pgd_alloc()
|
/kernel/linux/linux-5.10/arch/hexagon/include/asm/ |
H A D | pgalloc.h | 23 pgd_t *pgd; in pgd_alloc() local 25 pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO); in pgd_alloc() 35 memcpy(pgd, swapper_pg_dir, PTRS_PER_PGD*sizeof(pgd_t)); in pgd_alloc() 39 mm->context.ptbase = __pa(pgd); in pgd_alloc() 41 return pgd; in pgd_alloc() 83 pmdindex = (pgd_t *)pmd - mm->pgd; in pmd_populate_kernel() 84 ppmd = (pmd_t *)current->active_mm->pgd + pmdindex; in pmd_populate_kernel()
|
/kernel/linux/linux-5.10/mm/kasan/ |
H A D | init.c | 38 static inline bool kasan_p4d_table(pgd_t pgd) in kasan_p4d_table() argument 40 return pgd_page(pgd) == virt_to_page(lm_alias(kasan_early_shadow_p4d)); in kasan_p4d_table() 43 static inline bool kasan_p4d_table(pgd_t pgd) in kasan_p4d_table() argument 186 static int __ref zero_p4d_populate(pgd_t *pgd, unsigned long addr, in zero_p4d_populate() argument 189 p4d_t *p4d = p4d_offset(pgd, addr); in zero_p4d_populate() 241 pgd_t *pgd = pgd_offset_k(addr); in kasan_populate_early_shadow() local 260 pgd_populate(&init_mm, pgd, in kasan_populate_early_shadow() 262 p4d = p4d_offset(pgd, addr); in kasan_populate_early_shadow() 274 if (pgd_none(*pgd)) { in kasan_populate_early_shadow() 278 p = p4d_alloc(&init_mm, pgd, add in kasan_populate_early_shadow() 337 kasan_free_p4d(p4d_t *p4d_start, pgd_t *pgd) kasan_free_p4d() argument 453 pgd_t *pgd; kasan_remove_zero_shadow() local [all...] |
/kernel/linux/linux-6.6/mm/kasan/ |
H A D | init.c | 33 static inline bool kasan_p4d_table(pgd_t pgd) in kasan_p4d_table() argument 35 return pgd_page(pgd) == virt_to_page(lm_alias(kasan_early_shadow_p4d)); in kasan_p4d_table() 38 static inline bool kasan_p4d_table(pgd_t pgd) in kasan_p4d_table() argument 188 static int __ref zero_p4d_populate(pgd_t *pgd, unsigned long addr, in zero_p4d_populate() argument 191 p4d_t *p4d = p4d_offset(pgd, addr); in zero_p4d_populate() 241 pgd_t *pgd = pgd_offset_k(addr); in kasan_populate_early_shadow() local 260 pgd_populate(&init_mm, pgd, in kasan_populate_early_shadow() 262 p4d = p4d_offset(pgd, addr); in kasan_populate_early_shadow() 274 if (pgd_none(*pgd)) { in kasan_populate_early_shadow() 278 p = p4d_alloc(&init_mm, pgd, add in kasan_populate_early_shadow() 337 kasan_free_p4d(p4d_t *p4d_start, pgd_t *pgd) kasan_free_p4d() argument 456 pgd_t *pgd; kasan_remove_zero_shadow() local [all...] |