/kernel/linux/linux-6.6/mm/ |
H A D | page_vma_mapped.c | 18 pte_t ptent; in map_pte() local 40 ptent = ptep_get(pvmw->pte); in map_pte() 43 if (!is_swap_pte(ptent)) in map_pte() 45 } else if (is_swap_pte(ptent)) { in map_pte() 63 entry = pte_to_swp_entry(ptent); in map_pte() 67 } else if (!pte_present(ptent)) { in map_pte() 99 pte_t ptent = ptep_get(pvmw->pte); in check_pte() local 103 if (!is_swap_pte(ptent)) in check_pte() 105 entry = pte_to_swp_entry(ptent); in check_pte() 112 } else if (is_swap_pte(ptent)) { in check_pte() [all...] |
H A D | mapping_dirty_helpers.c | 38 pte_t ptent = ptep_get(pte); in wp_pte() local 40 if (pte_write(ptent)) { in wp_pte() 43 ptent = pte_wrprotect(old_pte); in wp_pte() 44 ptep_modify_prot_commit(walk->vma, addr, pte, old_pte, ptent); in wp_pte() 94 pte_t ptent = ptep_get(pte); in clean_record_pte() local 96 if (pte_dirty(ptent)) { in clean_record_pte() 101 ptent = pte_mkclean(old_pte); in clean_record_pte() 102 ptep_modify_prot_commit(walk->vma, addr, pte, old_pte, ptent); in clean_record_pte()
|
H A D | madvise.c | 351 pte_t *start_pte, *pte, ptent; in madvise_cold_or_pageout_pte_range() local 443 ptent = ptep_get(pte); in madvise_cold_or_pageout_pte_range() 445 if (pte_none(ptent)) in madvise_cold_or_pageout_pte_range() 448 if (!pte_present(ptent)) in madvise_cold_or_pageout_pte_range() 451 folio = vm_normal_folio(vma, addr, ptent); in madvise_cold_or_pageout_pte_range() 499 if (pte_young(ptent)) { in madvise_cold_or_pageout_pte_range() 500 ptent = ptep_get_and_clear_full(mm, addr, pte, in madvise_cold_or_pageout_pte_range() 502 ptent = pte_mkold(ptent); in madvise_cold_or_pageout_pte_range() 503 set_pte_at(mm, addr, pte, ptent); in madvise_cold_or_pageout_pte_range() 633 pte_t *start_pte, *pte, ptent; madvise_free_pte_range() local [all...] |
H A D | mprotect.c | 111 pte_t ptent; in change_pte_range() local 166 ptent = pte_modify(oldpte, newprot); in change_pte_range() 169 ptent = pte_mkuffd_wp(ptent); in change_pte_range() 171 ptent = pte_clear_uffd_wp(ptent); in change_pte_range() 187 !pte_write(ptent) && in change_pte_range() 188 can_change_pte_writable(vma, addr, ptent)) in change_pte_range() 189 ptent = pte_mkwrite(ptent, vm in change_pte_range() [all...] |
H A D | highmem.c | 194 pte_t ptent; in flush_all_zero_pkmaps() local 207 ptent = ptep_get(&pkmap_page_table[i]); in flush_all_zero_pkmaps() 208 BUG_ON(pte_none(ptent)); in flush_all_zero_pkmaps() 217 page = pte_page(ptent); in flush_all_zero_pkmaps()
|
H A D | mlock.c | 315 pte_t ptent; in mlock_pte_range() local 338 ptent = ptep_get(pte); in mlock_pte_range() 339 if (!pte_present(ptent)) in mlock_pte_range() 341 folio = vm_normal_folio(vma, addr, ptent); in mlock_pte_range()
|
H A D | khugepaged.c | 1547 pte_t ptent = ptep_get(pte); in collapse_pte_mapped_thp() local 1550 if (pte_none(ptent)) in collapse_pte_mapped_thp() 1554 if (!pte_present(ptent)) { in collapse_pte_mapped_thp() 1559 page = vm_normal_page(vma, addr, ptent); in collapse_pte_mapped_thp() 1599 pte_t ptent = ptep_get(pte); in collapse_pte_mapped_thp() local 1601 if (pte_none(ptent)) in collapse_pte_mapped_thp() 1609 if (!pte_present(ptent)) { in collapse_pte_mapped_thp() 1613 page = vm_normal_page(vma, addr, ptent); in collapse_pte_mapped_thp()
|
H A D | memory.c | 1011 pte_t ptent; in copy_pte_range() local 1057 ptent = ptep_get(src_pte); in copy_pte_range() 1058 if (pte_none(ptent)) { in copy_pte_range() 1062 if (unlikely(!pte_present(ptent))) { in copy_pte_range() 1418 pte_t ptent = ptep_get(pte); in zap_pte_range() local 1421 if (pte_none(ptent)) in zap_pte_range() 1427 if (pte_present(ptent)) { in zap_pte_range() 1430 page = vm_normal_page(vma, addr, ptent); in zap_pte_range() 1437 ptent = ptep_get_and_clear_full(mm, addr, pte, in zap_pte_range() 1439 arch_check_zapped_pte(vma, ptent); in zap_pte_range() [all...] |
H A D | memcontrol.c | 5743 unsigned long addr, pte_t ptent) in mc_handle_present_pte() 5745 struct page *page = vm_normal_page(vma, addr, ptent); in mc_handle_present_pte() 5763 pte_t ptent, swp_entry_t *entry) in mc_handle_swap_pte() 5766 swp_entry_t ent = pte_to_swp_entry(ptent); in mc_handle_swap_pte() 5796 pte_t ptent, swp_entry_t *entry) in mc_handle_swap_pte() 5803 unsigned long addr, pte_t ptent) in mc_handle_file_pte() 5947 * @ptent: the pte to be checked 5965 unsigned long addr, pte_t ptent, union mc_target *target) in get_mctgt_type() 5971 if (pte_present(ptent)) in get_mctgt_type() 5972 page = mc_handle_present_pte(vma, addr, ptent); in get_mctgt_type() 5742 mc_handle_present_pte(struct vm_area_struct *vma, unsigned long addr, pte_t ptent) mc_handle_present_pte() argument 5762 mc_handle_swap_pte(struct vm_area_struct *vma, pte_t ptent, swp_entry_t *entry) mc_handle_swap_pte() argument 5795 mc_handle_swap_pte(struct vm_area_struct *vma, pte_t ptent, swp_entry_t *entry) mc_handle_swap_pte() argument 5802 mc_handle_file_pte(struct vm_area_struct *vma, unsigned long addr, pte_t ptent) mc_handle_file_pte() argument 5964 get_mctgt_type(struct vm_area_struct *vma, unsigned long addr, pte_t ptent, union mc_target *target) get_mctgt_type() argument 6329 pte_t ptent = ptep_get(pte++); mem_cgroup_move_charge_pte_range() local [all...] |
H A D | rmap.c | 2222 pte_t ptent; in page_make_device_exclusive_one() local 2234 ptent = ptep_get(pvmw.pte); in page_make_device_exclusive_one() 2235 if (!pte_present(ptent)) { in page_make_device_exclusive_one() 2242 pte_pfn(ptent) - folio_pfn(folio)); in page_make_device_exclusive_one() 2246 flush_cache_page(vma, address, pte_pfn(ptent)); in page_make_device_exclusive_one()
|
H A D | ksm.c | 438 pte_t ptent; in break_ksm_pmd_entry() local 444 ptent = ptep_get(pte); in break_ksm_pmd_entry() 445 if (pte_present(ptent)) { in break_ksm_pmd_entry() 446 page = vm_normal_page(walk->vma, addr, ptent); in break_ksm_pmd_entry() 447 } else if (!pte_none(ptent)) { in break_ksm_pmd_entry() 448 swp_entry_t entry = pte_to_swp_entry(ptent); in break_ksm_pmd_entry()
|
H A D | memory-failure.c | 385 pte_t ptent; in dev_pagemap_mapping_shift() local 407 ptent = ptep_get(pte); in dev_pagemap_mapping_shift() 408 if (pte_present(ptent) && pte_devmap(ptent)) in dev_pagemap_mapping_shift()
|
H A D | vmalloc.c | 328 pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte); in vunmap_pte_range() local 329 WARN_ON(!pte_none(ptent) && !pte_present(ptent)); in vunmap_pte_range() 2947 pte_t ptent; in vmap_pfn_apply() local 2952 ptent = pte_mkspecial(pfn_pte(pfn, data->prot)); in vmap_pfn_apply() 2953 set_pte_at(&init_mm, addr, pte, ptent); in vmap_pfn_apply()
|
/kernel/linux/linux-5.10/mm/ |
H A D | mapping_dirty_helpers.c | 36 pte_t ptent = *pte; in wp_pte() local 38 if (pte_write(ptent)) { in wp_pte() 41 ptent = pte_wrprotect(old_pte); in wp_pte() 42 ptep_modify_prot_commit(walk->vma, addr, pte, old_pte, ptent); in wp_pte() 91 pte_t ptent = *pte; in clean_record_pte() local 93 if (pte_dirty(ptent)) { in clean_record_pte() 98 ptent = pte_mkclean(old_pte); in clean_record_pte() 99 ptep_modify_prot_commit(walk->vma, addr, pte, old_pte, ptent); in clean_record_pte()
|
H A D | mprotect.c | 80 pte_t ptent; in change_pte_range() local 120 ptent = pte_modify(oldpte, newprot); in change_pte_range() 122 ptent = pte_mk_savedwrite(ptent); in change_pte_range() 125 ptent = pte_wrprotect(ptent); in change_pte_range() 126 ptent = pte_mkuffd_wp(ptent); in change_pte_range() 134 ptent = pte_clear_uffd_wp(ptent); in change_pte_range() [all...] |
H A D | madvise.c | 321 pte_t *orig_pte, *pte, ptent; in madvise_cold_or_pageout_pte_range() local 404 ptent = *pte; in madvise_cold_or_pageout_pte_range() 406 if (pte_none(ptent)) in madvise_cold_or_pageout_pte_range() 409 if (!pte_present(ptent)) in madvise_cold_or_pageout_pte_range() 412 page = vm_normal_page(vma, addr, ptent); in madvise_cold_or_pageout_pte_range() 452 if (pte_young(ptent)) { in madvise_cold_or_pageout_pte_range() 453 ptent = ptep_get_and_clear_full(mm, addr, pte, in madvise_cold_or_pageout_pte_range() 455 ptent = pte_mkold(ptent); in madvise_cold_or_pageout_pte_range() 456 set_pte_at(mm, addr, pte, ptent); in madvise_cold_or_pageout_pte_range() 585 pte_t *orig_pte, *pte, ptent; madvise_free_pte_range() local [all...] |
H A D | memcontrol.c | 5657 unsigned long addr, pte_t ptent) in mc_handle_present_pte() 5659 struct page *page = vm_normal_page(vma, addr, ptent); in mc_handle_present_pte() 5678 pte_t ptent, swp_entry_t *entry) in mc_handle_swap_pte() 5681 swp_entry_t ent = pte_to_swp_entry(ptent); in mc_handle_swap_pte() 5716 pte_t ptent, swp_entry_t *entry) in mc_handle_swap_pte() 5723 unsigned long addr, pte_t ptent, swp_entry_t *entry) in mc_handle_file_pte() 5861 * @ptent: the pte to be checked 5884 unsigned long addr, pte_t ptent, union mc_target *target) in get_mctgt_type() 5890 if (pte_present(ptent)) in get_mctgt_type() 5891 page = mc_handle_present_pte(vma, addr, ptent); in get_mctgt_type() 5656 mc_handle_present_pte(struct vm_area_struct *vma, unsigned long addr, pte_t ptent) mc_handle_present_pte() argument 5677 mc_handle_swap_pte(struct vm_area_struct *vma, pte_t ptent, swp_entry_t *entry) mc_handle_swap_pte() argument 5715 mc_handle_swap_pte(struct vm_area_struct *vma, pte_t ptent, swp_entry_t *entry) mc_handle_swap_pte() argument 5722 mc_handle_file_pte(struct vm_area_struct *vma, unsigned long addr, pte_t ptent, swp_entry_t *entry) mc_handle_file_pte() argument 5883 get_mctgt_type(struct vm_area_struct *vma, unsigned long addr, pte_t ptent, union mc_target *target) get_mctgt_type() argument 6210 pte_t ptent = *(pte++); mem_cgroup_move_charge_pte_range() local [all...] |
H A D | memory.c | 1252 pte_t ptent = *pte; in zap_pte_range() local 1253 if (pte_none(ptent)) in zap_pte_range() 1259 if (pte_present(ptent)) { in zap_pte_range() 1262 page = vm_normal_page(vma, addr, ptent); in zap_pte_range() 1275 ptent = ptep_get_and_clear_full(mm, addr, pte, in zap_pte_range() 1283 if (pte_dirty(ptent)) { in zap_pte_range() 1287 if (pte_young(ptent) && in zap_pte_range() 1294 print_bad_pte(vma, addr, ptent, page); in zap_pte_range() 1303 entry = pte_to_swp_entry(ptent); in zap_pte_range() 1340 print_bad_pte(vma, addr, ptent, NUL in zap_pte_range() [all...] |
/kernel/linux/linux-5.10/include/linux/ |
H A D | xpm.h | 93 #define pte_user_mkexec(oldpte, ptent) \ 94 ((!pte_user_exec(oldpte) && pte_user_exec(ptent))) 96 #define pte_user_mkexec(oldpte, ptent) 1
|
/kernel/linux/linux-6.6/fs/proc/ |
H A D | task_mmu.c | 544 pte_t ptent = ptep_get(pte); in smaps_pte_entry() local 546 if (pte_present(ptent)) { in smaps_pte_entry() 547 page = vm_normal_page(vma, addr, ptent); in smaps_pte_entry() 548 young = pte_young(ptent); in smaps_pte_entry() 549 dirty = pte_dirty(ptent); in smaps_pte_entry() 550 } else if (is_swap_pte(ptent)) { in smaps_pte_entry() 551 swp_entry_t swpent = pte_to_swp_entry(ptent); in smaps_pte_entry() 741 pte_t ptent = ptep_get(pte); in smaps_hugetlb_range() local 743 if (pte_present(ptent)) { in smaps_hugetlb_range() 744 page = vm_normal_page(vma, addr, ptent); in smaps_hugetlb_range() 1118 pte_t ptent = ptep_get(pte); clear_soft_dirty() local 1176 pte_t *pte, ptent; clear_refs_pte_range() local 1908 pte_t ptent = ptep_get(pte); gather_pte_stats() local [all...] |
/kernel/linux/linux-6.6/mm/damon/ |
H A D | vaddr.c | 441 pte_t ptent; in damon_young_pmd_entry() local 484 ptent = ptep_get(pte); in damon_young_pmd_entry() 485 if (!pte_present(ptent)) in damon_young_pmd_entry() 487 folio = damon_get_folio(pte_pfn(ptent)); in damon_young_pmd_entry() 490 if (pte_young(ptent) || !folio_test_idle(folio) || in damon_young_pmd_entry()
|
/kernel/linux/linux-5.10/fs/proc/ |
H A D | task_mmu.c | 1106 pte_t ptent = *pte; in clear_soft_dirty() local 1108 if (pte_present(ptent)) { in clear_soft_dirty() 1111 if (pte_is_pinned(vma, addr, ptent)) in clear_soft_dirty() 1114 ptent = pte_wrprotect(old_pte); in clear_soft_dirty() 1115 ptent = pte_clear_soft_dirty(ptent); in clear_soft_dirty() 1116 ptep_modify_prot_commit(vma, addr, pte, old_pte, ptent); in clear_soft_dirty() 1117 } else if (is_swap_pte(ptent)) { in clear_soft_dirty() 1118 ptent = pte_swp_clear_soft_dirty(ptent); in clear_soft_dirty() 1164 pte_t *pte, ptent; clear_refs_pte_range() local [all...] |
/kernel/linux/linux-6.6/mm/kasan/ |
H A D | init.c | 356 pte_t ptent; in kasan_remove_pte_table() local 363 ptent = ptep_get(pte); in kasan_remove_pte_table() 365 if (!pte_present(ptent)) in kasan_remove_pte_table() 368 if (WARN_ON(!kasan_early_shadow_page_entry(ptent))) in kasan_remove_pte_table()
|
/kernel/linux/linux-6.6/drivers/gpu/drm/i915/gem/selftests/ |
H A D | i915_gem_mman.c | 1684 pte_t ptent = ptep_get(pte); in check_present_pte() local 1686 if (!pte_present(ptent) || pte_none(ptent)) { in check_present_pte() 1697 pte_t ptent = ptep_get(pte); in check_absent_pte() local 1699 if (pte_present(ptent) && !pte_none(ptent)) { in check_absent_pte()
|
/kernel/linux/linux-6.6/fs/ |
H A D | userfaultfd.c | 335 pte_t ptent; in userfaultfd_must_wait() local 375 ptent = ptep_get(pte); in userfaultfd_must_wait() 376 if (pte_none_mostly(ptent)) in userfaultfd_must_wait() 378 if (!pte_write(ptent) && (reason & VM_UFFD_WP)) in userfaultfd_must_wait()
|