/kernel/linux/linux-5.10/arch/openrisc/kernel/ |
H A D | dma.c | 46 .pte_entry = page_set_nocache, 65 .pte_entry = page_clear_nocache,
|
/kernel/linux/linux-6.6/arch/openrisc/kernel/ |
H A D | dma.c | 46 .pte_entry = page_set_nocache, 65 .pte_entry = page_clear_nocache,
|
/kernel/linux/linux-6.6/mm/ |
H A D | pagewalk.c | 30 err = ops->pte_entry(pte, addr, addr + PAGE_SIZE, walk); in walk_pte_range_inner() 85 if (!ops->pte_entry) in walk_hugepd_range() 96 err = ops->pte_entry(pte, addr, addr + page_size, walk); in walk_hugepd_range() 156 !(ops->pte_entry)) in walk_pmd_range() 210 !(ops->pmd_entry || ops->pte_entry)) in walk_pud_range() 255 else if (ops->pud_entry || ops->pmd_entry || ops->pte_entry) in walk_p4d_range() 292 else if (ops->p4d_entry || ops->pud_entry || ops->pmd_entry || ops->pte_entry) in walk_pgd_range() 441 * pte_entry(), and/or hugetlb_entry(). If you don't set up for some of these 540 * will also not lock the PTEs for the pte_entry() callback. This is useful for
|
H A D | purgeable.c | 262 long pte_entry = 0; in __mm_purg_pages_info() local 275 pte_entry = uxpte_read(&(uxpte[index])); in __mm_purg_pages_info() 276 if (uxpte_present(pte_entry) == 0) /* not present */ in __mm_purg_pages_info() 279 if (uxpte_refcnt(pte_entry) > 0) /* pined by user */ in __mm_purg_pages_info()
|
H A D | mapping_dirty_helpers.c | 232 .pte_entry = clean_record_pte, 241 .pte_entry = wp_pte,
|
H A D | ptdump.c | 147 .pte_entry = ptdump_pte_entry,
|
H A D | mprotect.c | 569 .pte_entry = prot_none_pte_entry,
|
/kernel/linux/linux-5.10/mm/ |
H A D | purgeable.c | 260 long pte_entry = 0; in __mm_purg_pages_info() local 273 pte_entry = uxpte_read(&(uxpte[index])); in __mm_purg_pages_info() 274 if (uxpte_present(pte_entry) == 0) /* not present */ in __mm_purg_pages_info() 277 if (uxpte_refcnt(pte_entry) > 0) /* pined by user */ in __mm_purg_pages_info()
|
H A D | pagewalk.c | 30 err = ops->pte_entry(pte, addr, addr + PAGE_SIZE, walk); in walk_pte_range_inner() 102 !(ops->pte_entry)) in walk_pmd_range() 152 !(ops->pmd_entry || ops->pte_entry)) in walk_pud_range() 192 if (ops->pud_entry || ops->pmd_entry || ops->pte_entry) in walk_p4d_range() 228 ops->pte_entry) in walk_pgd_range() 350 * pte_entry(), and/or hugetlb_entry(). If you don't set up for some of these 440 * will also not lock the PTEs for the pte_entry() callback. This is useful for
|
H A D | mapping_dirty_helpers.c | 243 .pte_entry = clean_record_pte, 252 .pte_entry = wp_pte,
|
H A D | ptdump.c | 136 .pte_entry = ptdump_pte_entry,
|
H A D | mprotect.c | 403 .pte_entry = prot_none_pte_entry,
|
/kernel/linux/linux-5.10/include/linux/ |
H A D | pagewalk.h | 18 * @pte_entry: if set, called for each non-empty PTE (lowest-level) 46 int (*pte_entry)(pte_t *pte, unsigned long addr, member
|
/kernel/linux/linux-6.6/include/linux/ |
H A D | pagewalk.h | 28 * @pte_entry: if set, called for each PTE (lowest-level) entry, 67 int (*pte_entry)(pte_t *pte, unsigned long addr, member
|
/kernel/linux/linux-5.10/arch/riscv/mm/ |
H A D | pageattr.c | 103 .pte_entry = pageattr_pte_entry,
|
/kernel/linux/linux-6.6/arch/riscv/mm/ |
H A D | pageattr.c | 90 .pte_entry = pageattr_pte_entry,
|
/kernel/linux/linux-5.10/arch/s390/mm/ |
H A D | gmap.c | 2639 .pte_entry = __s390_enable_skey_pte, 2677 .pte_entry = __s390_reset_cmma, 2702 .pte_entry = __s390_reset_acc,
|
/kernel/linux/linux-6.6/arch/s390/mm/ |
H A D | gmap.c | 2671 .pte_entry = __s390_enable_skey_pte, 2710 .pte_entry = __s390_reset_cmma, 2747 .pte_entry = s390_gather_pages,
|