Searched refs:PTE_RDONLY (Results 1 - 17 of 17) sorted by relevance
/kernel/linux/linux-5.10/arch/arm64/include/asm/ |
H A D | pgtable-prot.h | 69 #define PAGE_KERNEL_RO __pgprot((PROT_NORMAL & ~PTE_WRITE) | PTE_RDONLY) 70 #define PAGE_KERNEL_ROX __pgprot((PROT_NORMAL & ~(PTE_WRITE | PTE_PXN)) | PTE_RDONLY) 84 #define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN) 85 /* shared+writable pages are clean by default, hence PTE_RDONLY|PTE_WRITE */ 86 #define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE) 87 #define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_WRITE) 88 #define PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN) 89 #define PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN)
|
H A D | pgtable.h | 117 #define pte_hw_dirty(pte) (pte_write(pte) && !(pte_val(pte) & PTE_RDONLY)) 177 pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY)); in pte_mkwrite() 184 pte = set_pte_bit(pte, __pgprot(PTE_RDONLY)); in pte_mkclean() 194 pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY)); in pte_mkdirty() 202 * If hardware-dirty (PTE_WRITE/DBM bit set and PTE_RDONLY in pte_wrprotect() 209 pte = set_pte_bit(pte, __pgprot(PTE_RDONLY)); in pte_wrprotect() 274 * Dirty Writable | PTE_RDONLY PTE_WRITE PTE_DIRTY (sw) 283 * PTE_DIRTY || (PTE_WRITE && !PTE_RDONLY) 740 const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY | in pte_modify() 861 * dirty status (PTE_DBM && !PTE_RDONLY) t [all...] |
H A D | pgtable-hwdef.h | 140 #define PTE_RDONLY (_AT(pteval_t, 1) << 7) /* AP[2] */ macro
|
/kernel/linux/linux-6.6/arch/arm64/include/asm/ |
H A D | pgtable-prot.h | 49 #define _PAGE_KERNEL_RO ((PROT_NORMAL & ~PTE_WRITE) | PTE_RDONLY) 50 #define _PAGE_KERNEL_ROX ((PROT_NORMAL & ~(PTE_WRITE | PTE_PXN)) | PTE_RDONLY) 54 #define _PAGE_SHARED (_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE) 55 #define _PAGE_SHARED_EXEC (_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_WRITE) 56 #define _PAGE_READONLY (_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN) 57 #define _PAGE_READONLY_EXEC (_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN) 58 #define _PAGE_EXECONLY (_PAGE_DEFAULT | PTE_RDONLY | PTE_NG | PTE_PXN) 100 #define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PROT_NONE | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN) 101 /* shared+writable pages are clean by default, hence PTE_RDONLY|PTE_WRITE */
|
H A D | kernel-pgtable.h | 118 #define SWAPPER_RX_MMUFLAGS (SWAPPER_RW_MMUFLAGS | PTE_RDONLY)
|
H A D | pgtable.h | 106 #define pte_rdonly(pte) (!!(pte_val(pte) & PTE_RDONLY)) 187 pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY)); in pte_mkwrite_novma() 194 pte = set_pte_bit(pte, __pgprot(PTE_RDONLY)); in pte_mkclean() 204 pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY)); in pte_mkdirty() 212 * If hardware-dirty (PTE_WRITE/DBM bit set and PTE_RDONLY in pte_wrprotect() 219 pte = set_pte_bit(pte, __pgprot(PTE_RDONLY)); in pte_wrprotect() 285 * Dirty Writable | PTE_RDONLY PTE_WRITE PTE_DIRTY (sw) 294 * PTE_DIRTY || (PTE_WRITE && !PTE_RDONLY) 821 const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY | in pte_modify() 968 * dirty status (PTE_DBM && !PTE_RDONLY) t [all...] |
H A D | pgtable-hwdef.h | 148 #define PTE_RDONLY (_AT(pteval_t, 1) << 7) /* AP[2] */ macro
|
/kernel/linux/linux-5.10/arch/arm64/mm/ |
H A D | pageattr.c | 94 if (rodata_full && (pgprot_val(set_mask) == PTE_RDONLY || in change_memory_common() 95 pgprot_val(clear_mask) == PTE_RDONLY)) { in change_memory_common() 114 __pgprot(PTE_RDONLY), in set_memory_ro() 122 __pgprot(PTE_RDONLY)); in set_memory_rw() 170 .clear_mask = __pgprot(PTE_RDONLY), in set_direct_map_default_noflush()
|
H A D | ptdump.c | 110 .mask = PTE_RDONLY, 111 .val = PTE_RDONLY, 248 if ((st->current_prot & PTE_RDONLY) == PTE_RDONLY) in note_prot_wx()
|
H A D | fault.c | 203 pte_val(entry) &= PTE_RDONLY | PTE_AF | PTE_WRITE | PTE_DIRTY; in ptep_set_access_flags() 207 * hardware update of the access/dirty state. The PTE_RDONLY bit must in ptep_set_access_flags() 211 pte_val(entry) ^= PTE_RDONLY; in ptep_set_access_flags() 215 pteval ^= PTE_RDONLY; in ptep_set_access_flags() 217 pteval ^= PTE_RDONLY; in ptep_set_access_flags()
|
H A D | mmu.c | 126 pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE | PTE_NG; in pgattr_change_is_safe()
|
/kernel/linux/linux-6.6/arch/arm64/mm/ |
H A D | pageattr.c | 108 if (rodata_full && (pgprot_val(set_mask) == PTE_RDONLY || in change_memory_common() 109 pgprot_val(clear_mask) == PTE_RDONLY)) { in change_memory_common() 128 __pgprot(PTE_RDONLY), in set_memory_ro() 136 __pgprot(PTE_RDONLY)); in set_memory_rw() 184 .clear_mask = __pgprot(PTE_RDONLY), in set_direct_map_default_noflush()
|
H A D | ptdump.c | 106 .mask = PTE_RDONLY, 107 .val = PTE_RDONLY, 240 if ((st->current_prot & PTE_RDONLY) == PTE_RDONLY) in note_prot_wx()
|
H A D | fault.c | 223 pte_val(entry) &= PTE_RDONLY | PTE_AF | PTE_WRITE | PTE_DIRTY; in ptep_set_access_flags() 227 * hardware update of the access/dirty state. The PTE_RDONLY bit must in ptep_set_access_flags() 231 pte_val(entry) ^= PTE_RDONLY; in ptep_set_access_flags() 235 pteval ^= PTE_RDONLY; in ptep_set_access_flags() 237 pteval ^= PTE_RDONLY; in ptep_set_access_flags()
|
H A D | mmu.c | 140 pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE | PTE_NG; in pgattr_change_is_safe()
|
/kernel/linux/linux-5.10/arch/arm64/kernel/ |
H A D | efi.c | 104 pte = set_pte_bit(pte, __pgprot(PTE_RDONLY)); in set_permissions()
|
/kernel/linux/linux-6.6/arch/arm64/kernel/ |
H A D | efi.c | 113 pte = set_pte_bit(pte, __pgprot(PTE_RDONLY)); in set_permissions()
|
Completed in 18 milliseconds