/kernel/linux/linux-5.10/mm/ |
H A D | page_vma_mapped.c | 10 static inline bool not_found(struct page_vma_mapped_walk *pvmw) in not_found() argument 12 page_vma_mapped_walk_done(pvmw); in not_found() 16 static bool map_pte(struct page_vma_mapped_walk *pvmw) in map_pte() argument 18 pvmw->pte = pte_offset_map(pvmw->pmd, pvmw->address); in map_pte() 19 if (!(pvmw->flags & PVMW_SYNC)) { in map_pte() 20 if (pvmw->flags & PVMW_MIGRATION) { in map_pte() 21 if (!is_swap_pte(*pvmw->pte)) in map_pte() 39 if (is_swap_pte(*pvmw in map_pte() 85 check_pte(struct page_vma_mapped_walk *pvmw) check_pte() argument 118 step_forward(struct page_vma_mapped_walk *pvmw, unsigned long size) step_forward() argument 149 page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw) page_vma_mapped_walk() argument 305 struct page_vma_mapped_walk pvmw = { page_mapped_in_vma() local [all...] |
H A D | rmap.c | 779 struct page_vma_mapped_walk pvmw = { in page_referenced_one() local 786 while (page_vma_mapped_walk(&pvmw)) { in page_referenced_one() 787 address = pvmw.address; in page_referenced_one() 794 page_vma_mapped_walk_done(&pvmw); in page_referenced_one() 799 if (pvmw.pte) { in page_referenced_one() 801 pvmw.pte)) { in page_referenced_one() 815 pvmw.pmd)) in page_referenced_one() 913 struct page_vma_mapped_walk pvmw = { in page_mkclean_one() local 931 while (page_vma_mapped_walk(&pvmw)) { in page_mkclean_one() 934 address = pvmw in page_mkclean_one() 1390 struct page_vma_mapped_walk pvmw = { try_to_unmap_one() local [all...] |
H A D | page_idle.c | 55 struct page_vma_mapped_walk pvmw = { in page_idle_clear_pte_refs_one() local 62 while (page_vma_mapped_walk(&pvmw)) { in page_idle_clear_pte_refs_one() 63 addr = pvmw.address; in page_idle_clear_pte_refs_one() 64 if (pvmw.pte) { in page_idle_clear_pte_refs_one() 69 if (ptep_clear_young_notify(vma, addr, pvmw.pte)) in page_idle_clear_pte_refs_one() 72 if (pmdp_clear_young_notify(vma, addr, pvmw.pmd)) in page_idle_clear_pte_refs_one()
|
H A D | ksm.c | 1039 struct page_vma_mapped_walk pvmw = { in write_protect_page() local 1047 pvmw.address = page_address_in_vma(page, vma); in write_protect_page() 1048 if (pvmw.address == -EFAULT) in write_protect_page() 1054 pvmw.address, in write_protect_page() 1055 pvmw.address + PAGE_SIZE); in write_protect_page() 1058 if (!page_vma_mapped_walk(&pvmw)) in write_protect_page() 1060 if (WARN_ONCE(!pvmw.pte, "Unexpected PMD mapping?")) in write_protect_page() 1063 if (pte_write(*pvmw.pte) || pte_dirty(*pvmw.pte) || in write_protect_page() 1064 (pte_protnone(*pvmw in write_protect_page() [all...] |
H A D | migrate.c | 208 struct page_vma_mapped_walk pvmw = { in remove_migration_pte() local 219 while (page_vma_mapped_walk(&pvmw)) { in remove_migration_pte() 223 new = page - pvmw.page->index + in remove_migration_pte() 224 linear_page_index(vma, pvmw.address); in remove_migration_pte() 228 if (!pvmw.pte) { in remove_migration_pte() 230 remove_migration_pmd(&pvmw, new); in remove_migration_pte() 237 if (pte_swp_soft_dirty(*pvmw.pte)) in remove_migration_pte() 243 entry = pte_to_swp_entry(*pvmw.pte); in remove_migration_pte() 246 else if (pte_swp_uffd_wp(*pvmw.pte)) in remove_migration_pte() 252 if (pte_swp_soft_dirty(*pvmw in remove_migration_pte() [all...] |
H A D | huge_memory.c | 2957 void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, in set_pmd_migration_entry() argument 2960 struct vm_area_struct *vma = pvmw->vma; in set_pmd_migration_entry() 2962 unsigned long address = pvmw->address; in set_pmd_migration_entry() 2967 if (!(pvmw->pmd && !pvmw->pte)) in set_pmd_migration_entry() 2971 pmdval = pmdp_invalidate(vma, address, pvmw->pmd); in set_pmd_migration_entry() 2978 set_pmd_at(mm, address, pvmw->pmd, pmdswp); in set_pmd_migration_entry() 2983 void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new) in remove_migration_pmd() argument 2985 struct vm_area_struct *vma = pvmw->vma; in remove_migration_pmd() 2987 unsigned long address = pvmw in remove_migration_pmd() [all...] |
/kernel/linux/linux-6.6/mm/ |
H A D | page_vma_mapped.c | 10 static inline bool not_found(struct page_vma_mapped_walk *pvmw) in not_found() argument 12 page_vma_mapped_walk_done(pvmw); in not_found() 16 static bool map_pte(struct page_vma_mapped_walk *pvmw, spinlock_t **ptlp) in map_pte() argument 20 if (pvmw->flags & PVMW_SYNC) { in map_pte() 22 pvmw->pte = pte_offset_map_lock(pvmw->vma->vm_mm, pvmw->pmd, in map_pte() 23 pvmw->address, &pvmw->ptl); in map_pte() 24 *ptlp = pvmw in map_pte() 96 check_pte(struct page_vma_mapped_walk *pvmw) check_pte() argument 133 check_pmd(unsigned long pfn, struct page_vma_mapped_walk *pvmw) check_pmd() argument 142 step_forward(struct page_vma_mapped_walk *pvmw, unsigned long size) step_forward() argument 173 page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw) page_vma_mapped_walk() argument 327 struct page_vma_mapped_walk pvmw = { page_mapped_in_vma() local [all...] |
H A D | rmap.c | 809 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0); in folio_referenced_one() 812 while (page_vma_mapped_walk(&pvmw)) { in folio_referenced_one() 813 address = pvmw.address; in folio_referenced_one() 820 (!folio_test_large(folio) || !pvmw.pte)) { in folio_referenced_one() 822 mlock_vma_folio(folio, vma, !pvmw.pte); in folio_referenced_one() 823 page_vma_mapped_walk_done(&pvmw); in folio_referenced_one() 828 if (pvmw.pte) { in folio_referenced_one() 830 pte_young(ptep_get(pvmw.pte))) { in folio_referenced_one() 831 lru_gen_look_around(&pvmw); in folio_referenced_one() 836 pvmw in folio_referenced_one() 946 page_vma_mkclean_one(struct page_vma_mapped_walk *pvmw) page_vma_mkclean_one() argument 1067 struct page_vma_mapped_walk pvmw = { pfn_mkclean_range() local [all...] |
H A D | page_idle.c | 56 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0); in page_idle_clear_pte_refs_one() 59 while (page_vma_mapped_walk(&pvmw)) { in page_idle_clear_pte_refs_one() 60 addr = pvmw.address; in page_idle_clear_pte_refs_one() 61 if (pvmw.pte) { in page_idle_clear_pte_refs_one() 66 if (ptep_clear_young_notify(vma, addr, pvmw.pte)) in page_idle_clear_pte_refs_one() 69 if (pmdp_clear_young_notify(vma, addr, pvmw.pmd)) in page_idle_clear_pte_refs_one()
|
H A D | migrate.c | 186 DEFINE_FOLIO_VMA_WALK(pvmw, old, vma, addr, PVMW_SYNC | PVMW_MIGRATION); in remove_migration_pte() 188 while (page_vma_mapped_walk(&pvmw)) { in remove_migration_pte() 198 idx = linear_page_index(vma, pvmw.address) - pvmw.pgoff; in remove_migration_pte() 203 if (!pvmw.pte) { in remove_migration_pte() 206 remove_migration_pmd(&pvmw, new); in remove_migration_pte() 213 old_pte = ptep_get(pvmw.pte); in remove_migration_pte() 252 hugepage_add_anon_rmap(new, vma, pvmw.address, in remove_migration_pte() 256 set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw in remove_migration_pte() [all...] |
H A D | huge_memory.c | 3204 int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, in set_pmd_migration_entry() argument 3207 struct vm_area_struct *vma = pvmw->vma; in set_pmd_migration_entry() 3209 unsigned long address = pvmw->address; in set_pmd_migration_entry() 3215 if (!(pvmw->pmd && !pvmw->pte)) in set_pmd_migration_entry() 3219 pmdval = pmdp_invalidate(vma, address, pvmw->pmd); in set_pmd_migration_entry() 3224 set_pmd_at(mm, address, pvmw->pmd, pmdval); in set_pmd_migration_entry() 3245 set_pmd_at(mm, address, pvmw->pmd, pmdswp); in set_pmd_migration_entry() 3253 void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new) in remove_migration_pmd() argument 3255 struct vm_area_struct *vma = pvmw in remove_migration_pmd() [all...] |
H A D | ksm.c | 1099 DEFINE_PAGE_VMA_WALK(pvmw, page, vma, 0, 0); in write_protect_page() 1106 pvmw.address = page_address_in_vma(page, vma); in write_protect_page() 1107 if (pvmw.address == -EFAULT) in write_protect_page() 1112 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, pvmw.address, in write_protect_page() 1113 pvmw.address + PAGE_SIZE); in write_protect_page() 1116 if (!page_vma_mapped_walk(&pvmw)) in write_protect_page() 1118 if (WARN_ONCE(!pvmw.pte, "Unexpected PMD mapping?")) in write_protect_page() 1122 entry = ptep_get(pvmw.pte); in write_protect_page() 1126 flush_cache_page(vma, pvmw.address, page_to_pfn(page)); in write_protect_page() 1141 entry = ptep_clear_flush(vma, pvmw in write_protect_page() [all...] |
H A D | internal.h | 817 static inline unsigned long vma_address_end(struct page_vma_mapped_walk *pvmw) in vma_address_end() argument 819 struct vm_area_struct *vma = pvmw->vma; in vma_address_end() 824 if (pvmw->nr_pages == 1) in vma_address_end() 825 return pvmw->address + PAGE_SIZE; in vma_address_end() 827 pgoff = pvmw->pgoff + pvmw->nr_pages; in vma_address_end()
|
H A D | vmscan.c | 4609 void lru_gen_look_around(struct page_vma_mapped_walk *pvmw) in lru_gen_look_around() argument 4616 pte_t *pte = pvmw->pte; in lru_gen_look_around() 4617 unsigned long addr = pvmw->address; in lru_gen_look_around() 4618 struct vm_area_struct *vma = pvmw->vma; in lru_gen_look_around() 4619 struct folio *folio = pfn_folio(pvmw->pfn); in lru_gen_look_around() 4627 lockdep_assert_held(pvmw->ptl); in lru_gen_look_around() 4630 if (spin_is_contended(pvmw->ptl)) in lru_gen_look_around() 4707 update_bloom_filter(lruvec, max_seq, pvmw->pmd); in lru_gen_look_around()
|
/kernel/linux/linux-6.6/mm/damon/ |
H A D | paddr.c | 22 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0); in __damon_pa_mkold() 24 while (page_vma_mapped_walk(&pvmw)) { in __damon_pa_mkold() 25 addr = pvmw.address; in __damon_pa_mkold() 26 if (pvmw.pte) in __damon_pa_mkold() 27 damon_ptep_mkold(pvmw.pte, vma, addr); in __damon_pa_mkold() 29 damon_pmdp_mkold(pvmw.pmd, vma, addr); in __damon_pa_mkold() 86 DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0); in __damon_pa_young() 89 while (page_vma_mapped_walk(&pvmw)) { in __damon_pa_young() 90 addr = pvmw.address; in __damon_pa_young() 91 if (pvmw in __damon_pa_young() [all...] |
/kernel/linux/linux-5.10/include/linux/ |
H A D | rmap.h | 218 static inline void page_vma_mapped_walk_done(struct page_vma_mapped_walk *pvmw) in page_vma_mapped_walk_done() argument 221 if (pvmw->pte && !PageHuge(pvmw->page)) in page_vma_mapped_walk_done() 222 pte_unmap(pvmw->pte); in page_vma_mapped_walk_done() 223 if (pvmw->ptl) in page_vma_mapped_walk_done() 224 spin_unlock(pvmw->ptl); in page_vma_mapped_walk_done() 227 bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw);
|
H A D | swapops.h | 254 extern void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, 257 extern void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, 287 static inline void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, in set_pmd_migration_entry() argument 293 static inline void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, in remove_migration_pmd() argument
|
/kernel/linux/linux-6.6/include/linux/ |
H A D | rmap.h | 415 static inline void page_vma_mapped_walk_done(struct page_vma_mapped_walk *pvmw) in page_vma_mapped_walk_done() argument 418 if (pvmw->pte && !is_vm_hugetlb_page(pvmw->vma)) in page_vma_mapped_walk_done() 419 pte_unmap(pvmw->pte); in page_vma_mapped_walk_done() 420 if (pvmw->ptl) in page_vma_mapped_walk_done() 421 spin_unlock(pvmw->ptl); in page_vma_mapped_walk_done() 424 bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw);
|
H A D | swapops.h | 488 extern int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, 491 extern void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, 521 static inline int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, 527 static inline void remove_migration_pmd(struct page_vma_mapped_walk *pvmw,
|
H A D | mmzone.h | 520 void lru_gen_look_around(struct page_vma_mapped_walk *pvmw); 612 static inline void lru_gen_look_around(struct page_vma_mapped_walk *pvmw) in lru_gen_look_around() argument
|
/kernel/linux/linux-5.10/kernel/events/ |
H A D | uprobes.c | 158 struct page_vma_mapped_walk pvmw = { in __replace_page() local 180 if (!page_vma_mapped_walk(&pvmw)) in __replace_page() 182 VM_BUG_ON_PAGE(addr != pvmw.address, old_page); in __replace_page() 197 flush_cache_page(vma, addr, pte_pfn(*pvmw.pte)); in __replace_page() 198 ptep_clear_flush_notify(vma, addr, pvmw.pte); in __replace_page() 200 set_pte_at_notify(mm, addr, pvmw.pte, in __replace_page() 206 page_vma_mapped_walk_done(&pvmw); in __replace_page()
|
/kernel/linux/linux-6.6/kernel/events/ |
H A D | uprobes.c | 159 DEFINE_FOLIO_VMA_WALK(pvmw, old_folio, vma, addr, 0); in __replace_page() 178 if (!page_vma_mapped_walk(&pvmw)) in __replace_page() 180 VM_BUG_ON_PAGE(addr != pvmw.address, old_page); in __replace_page() 195 flush_cache_page(vma, addr, pte_pfn(ptep_get(pvmw.pte))); in __replace_page() 196 ptep_clear_flush(vma, addr, pvmw.pte); in __replace_page() 198 set_pte_at_notify(mm, addr, pvmw.pte, in __replace_page() 204 page_vma_mapped_walk_done(&pvmw); in __replace_page()
|