Home
last modified time | relevance | path

Searched refs:_PAGE_INVALID (Results 1 - 25 of 33) sorted by relevance

12

/kernel/linux/linux-5.10/arch/s390/mm/
H A Dpgtable.c94 if (unlikely(pte_val(old) & _PAGE_INVALID)) in ptep_flush_direct()
113 if (unlikely(pte_val(old) & _PAGE_INVALID)) in ptep_flush_lazy()
118 pte_val(*ptep) |= _PAGE_INVALID; in ptep_flush_lazy()
179 if (!mm_uses_skeys(mm) || pte_val(pte) & _PAGE_INVALID) in pgste_update_all()
201 if (!mm_uses_skeys(mm) || pte_val(entry) & _PAGE_INVALID) in pgste_set_key()
203 VM_BUG_ON(!(pte_val(*ptep) & _PAGE_INVALID)); in pgste_set_key()
221 !(pte_val(entry) & _PAGE_INVALID)) { in pgste_set_pte()
272 if (pte_val(old) & _PAGE_INVALID) in ptep_xchg_commit()
274 if (pte_val(new) & _PAGE_INVALID) { in ptep_xchg_commit()
568 pte_val(*ptep) = _PAGE_INVALID; in pgtable_trans_huge_withdraw()
[all...]
H A Ddump_pagetables.c91 if (pr & _PAGE_INVALID) { in print_prot()
104 if (st->current_prot & _PAGE_INVALID) in note_prot_wx()
132 if (level == 4 && (val & _PAGE_INVALID)) in note_page()
133 prot = _PAGE_INVALID; in note_page()
136 prot = _PAGE_INVALID; in note_page()
H A Dhugetlbpage.c54 rste |= move_set_bit(pte_val(pte), _PAGE_INVALID, in __pte_to_rste()
111 _PAGE_INVALID); in __rste_to_pte()
125 pte_val(pte) = _PAGE_INVALID; in __rste_to_pte()
H A Dpgalloc.c174 memset64(table, _PAGE_INVALID, PTRS_PER_PTE); in page_table_alloc_pgste()
233 memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE); in page_table_alloc()
238 memset64((u64 *)table, _PAGE_INVALID, 2 * PTRS_PER_PTE); in page_table_alloc()
342 memset64(table, _PAGE_INVALID, PTRS_PER_PTE); in base_pgt_alloc()
H A Dpageattr.c350 pte_val(*pte) &= ~_PAGE_INVALID; in __kernel_map_pages()
H A Dvmem.c62 memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE); in vmem_pte_alloc()
H A Dkasan_init.c80 memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE); in kasan_early_pte_alloc()
/kernel/linux/linux-6.6/arch/s390/mm/
H A Dpgtable.c94 if (unlikely(pte_val(old) & _PAGE_INVALID)) in ptep_flush_direct()
113 if (unlikely(pte_val(old) & _PAGE_INVALID)) in ptep_flush_lazy()
118 set_pte(ptep, set_pte_bit(*ptep, __pgprot(_PAGE_INVALID))); in ptep_flush_lazy()
179 if (!mm_uses_skeys(mm) || pte_val(pte) & _PAGE_INVALID) in pgste_update_all()
201 if (!mm_uses_skeys(mm) || pte_val(entry) & _PAGE_INVALID) in pgste_set_key()
203 VM_BUG_ON(!(pte_val(*ptep) & _PAGE_INVALID)); in pgste_set_key()
221 !(pte_val(entry) & _PAGE_INVALID)) { in pgste_set_pte()
272 if (pte_val(old) & _PAGE_INVALID) in ptep_xchg_commit()
274 if (pte_val(new) & _PAGE_INVALID) { in ptep_xchg_commit()
607 set_pte(ptep, __pte(_PAGE_INVALID)); in pgtable_trans_huge_withdraw()
[all...]
H A Ddump_pagetables.c115 if (pr & _PAGE_INVALID) { in print_prot()
128 if (st->current_prot & _PAGE_INVALID) in note_prot_wx()
161 if (level == 4 && (val & _PAGE_INVALID)) in note_page()
162 prot = _PAGE_INVALID; in note_page()
165 prot = _PAGE_INVALID; in note_page()
H A Dhugetlbpage.c55 rste |= move_set_bit(pte_val(pte), _PAGE_INVALID, in __pte_to_rste()
109 pteval |= move_set_bit(rste, _SEGMENT_ENTRY_INVALID, _PAGE_INVALID); in __rste_to_pte()
118 pteval = _PAGE_INVALID; in __rste_to_pte()
H A Dpgalloc.c150 memset64(table, _PAGE_INVALID, PTRS_PER_PTE); in page_table_alloc_pgste()
296 memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE); in page_table_alloc()
301 memset64((u64 *)table, _PAGE_INVALID, 2 * PTRS_PER_PTE); in page_table_alloc()
478 memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE); in base_pgt_alloc()
H A Dpageattr.c107 new = set_pte_bit(new, __pgprot(_PAGE_INVALID)); in walk_pte_level()
440 pte = clear_pte_bit(*ptep, __pgprot(_PAGE_INVALID)); in __kernel_map_pages()
/kernel/linux/linux-5.10/arch/s390/include/asm/
H A Dpgtable.h166 #define _PAGE_INVALID 0x400 /* HW invalid bit */ macro
402 #define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_INVALID | _PAGE_PROTECT)
404 _PAGE_NOEXEC | _PAGE_INVALID | _PAGE_PROTECT)
406 _PAGE_INVALID | _PAGE_PROTECT)
408 _PAGE_NOEXEC | _PAGE_INVALID | _PAGE_PROTECT)
410 _PAGE_INVALID | _PAGE_PROTECT)
771 return pte_val(pte) == _PAGE_INVALID; in pte_none()
870 * young/old accounting is not supported, i.e _PAGE_PROTECT and _PAGE_INVALID
915 pte_val(*ptep) = _PAGE_INVALID; in pte_clear()
931 pte_val(pte) &= ~_PAGE_INVALID; in pte_modify()
[all...]
/kernel/linux/linux-6.6/arch/s390/include/asm/
H A Dpgtable.h163 #define _PAGE_INVALID 0x400 /* HW invalid bit */ macro
407 #define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_INVALID | _PAGE_PROTECT)
409 _PAGE_NOEXEC | _PAGE_INVALID | _PAGE_PROTECT)
411 _PAGE_INVALID | _PAGE_PROTECT)
413 _PAGE_NOEXEC | _PAGE_INVALID | _PAGE_PROTECT)
415 _PAGE_INVALID | _PAGE_PROTECT)
792 return pte_val(pte) == _PAGE_INVALID; in pte_none()
902 * young/old accounting is not supported, i.e _PAGE_PROTECT and _PAGE_INVALID
972 set_pte(ptep, __pte(_PAGE_INVALID)); in pte_clear()
988 pte = clear_pte_bit(pte, __pgprot(_PAGE_INVALID)); in pte_modify()
[all...]
/kernel/linux/linux-5.10/arch/powerpc/mm/ptdump/
H A Dbook3s64.c44 .mask = _PAGE_PRESENT | _PAGE_INVALID,
/kernel/linux/linux-6.6/arch/powerpc/mm/ptdump/
H A Dbook3s64.c44 .mask = _PAGE_PRESENT | _PAGE_INVALID,
/kernel/linux/linux-5.10/arch/powerpc/mm/book3s64/
H A Dpgtable.c111 old_pmd = pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, _PAGE_INVALID); in pmdp_invalidate()
434 pte_val = pte_update(vma->vm_mm, addr, ptep, _PAGE_PRESENT, _PAGE_INVALID, 0); in ptep_modify_prot_start()
H A Dhash_hugetlbpage.c140 _PAGE_PRESENT, _PAGE_INVALID, 1); in huge_ptep_modify_prot_start()
H A Dradix_pgtable.c1088 old_pte = __radix_pte_update(ptep, _PAGE_PRESENT, _PAGE_INVALID); in radix__ptep_set_access_flags()
1094 __radix_pte_update(ptep, _PAGE_INVALID, new_pte); in radix__ptep_set_access_flags()
/kernel/linux/linux-5.10/arch/powerpc/include/asm/book3s/64/
H A Dpgtable.h55 #define _PAGE_INVALID _RPAGE_SW0 macro
575 * invalid during ptep_set_access_flags. Hence we look for _PAGE_INVALID in pte_present()
581 return (pte_raw(pte) & cpu_to_be64(_PAGE_INVALID | _PAGE_PTE)) == in pte_present()
582 cpu_to_be64(_PAGE_INVALID | _PAGE_PTE); in pte_present()
901 * invalid during a split. Hence we look for _PAGE_INVALID in pmd_present()
904 if (pmd_raw(pmd) & cpu_to_be64(_PAGE_PRESENT | _PAGE_INVALID)) in pmd_present()
914 * and _PAGE_INVALID is set (see pmd_present, pmdp_invalidate). in pmd_is_serializing()
920 if ((pmd_raw(pmd) & cpu_to_be64(_PAGE_PRESENT | _PAGE_INVALID)) == in pmd_is_serializing()
921 cpu_to_be64(_PAGE_INVALID)) in pmd_is_serializing()
/kernel/linux/linux-6.6/arch/powerpc/include/asm/book3s/64/
H A Dpgtable.h54 #define _PAGE_INVALID _RPAGE_SW0 macro
518 * invalid during ptep_set_access_flags. Hence we look for _PAGE_INVALID in pte_present()
524 return (pte_raw(pte) & cpu_to_be64(_PAGE_INVALID | _PAGE_PTE)) == in pte_present()
525 cpu_to_be64(_PAGE_INVALID | _PAGE_PTE); in pte_present()
854 * invalid during a split. Hence we look for _PAGE_INVALID in pmd_present()
857 if (pmd_raw(pmd) & cpu_to_be64(_PAGE_PRESENT | _PAGE_INVALID)) in pmd_present()
867 * and _PAGE_INVALID is set (see pmd_present, pmdp_invalidate). in pmd_is_serializing()
873 if ((pmd_raw(pmd) & cpu_to_be64(_PAGE_PRESENT | _PAGE_INVALID)) == in pmd_is_serializing()
874 cpu_to_be64(_PAGE_INVALID)) in pmd_is_serializing()
/kernel/linux/linux-6.6/arch/powerpc/mm/book3s64/
H A Dhugetlbpage.c138 _PAGE_PRESENT, _PAGE_INVALID, 1); in huge_ptep_modify_prot_start()
H A Dpgtable.c173 old_pmd = pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, _PAGE_INVALID); in pmdp_invalidate()
530 pte_val = pte_update(vma->vm_mm, addr, ptep, _PAGE_PRESENT, _PAGE_INVALID, 0); in ptep_modify_prot_start()
/kernel/linux/linux-5.10/arch/s390/kernel/
H A Dvdso.c127 memset64((u64 *)page_table, _PAGE_INVALID, PTRS_PER_PTE); in vdso_alloc_per_cpu()
H A Duv.c193 if (pte_val(entry) & _PAGE_INVALID) in make_secure_pte()

Completed in 25 milliseconds

12