/kernel/linux/linux-5.10/fs/proc/ |
H A D | task_mmu.c | 534 page = vm_normal_page(vma, addr, *pte); in smaps_pte_entry() 732 page = vm_normal_page(vma, addr, *pte); in smaps_hugetlb_range() 1091 page = vm_normal_page(vma, addr, pte); in pte_is_pinned() 1204 page = vm_normal_page(vma, addr, ptent); in clear_refs_pte_range() 1415 page = vm_normal_page(vma, addr, pte); in pte_to_pagemap_entry() 1792 page = vm_normal_page(vma, addr, pte); in can_gather_numa_stats()
|
/kernel/linux/linux-6.6/fs/proc/ |
H A D | task_mmu.c | 547 page = vm_normal_page(vma, addr, ptent); in smaps_pte_entry() 744 page = vm_normal_page(vma, addr, ptent); in smaps_hugetlb_range() 1103 page = vm_normal_page(vma, addr, pte); in pte_is_pinned() 1217 page = vm_normal_page(vma, addr, ptent); in clear_refs_pte_range() 1430 page = vm_normal_page(vma, addr, pte); in pte_to_pagemap_entry() 1840 page = vm_normal_page(vma, addr, pte); in can_gather_numa_stats()
|
/kernel/linux/linux-5.10/mm/ |
H A D | mprotect.c | 94 page = vm_normal_page(vma, addr, oldpte); in change_pte_range() 147 vm_normal_page(vma, addr, oldpte)))) in change_pte_range()
|
H A D | khugepaged.c | 629 page = vm_normal_page(vma, address, pteval); in __collapse_huge_page_isolate() 1304 page = vm_normal_page(vma, _address, pteval); in khugepaged_scan_pmd() 1506 page = vm_normal_page(vma, addr, *pte); in collapse_pte_mapped_thp() 1524 page = vm_normal_page(vma, addr, *pte); in collapse_pte_mapped_thp()
|
H A D | hmm.c | 299 if (!vm_normal_page(walk->vma, addr, pte) && in hmm_vma_handle_pte()
|
H A D | madvise.c | 412 page = vm_normal_page(vma, addr, ptent); in madvise_cold_or_pageout_pte_range() 624 page = vm_normal_page(vma, addr, ptent); in madvise_free_pte_range()
|
H A D | mlock.c | 373 * zone, as long as the pte's are present and vm_normal_page() succeeds. These 404 page = vm_normal_page(vma, start, *pte); in __munlock_pagevec_fill()
|
H A D | memory.c | 555 * vm_normal_page -- This function gets the "struct page" associated with a pte. 596 struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, in vm_normal_page() function 660 * !CONFIG_ARCH_HAS_PTE_SPECIAL case from vm_normal_page() here. in vm_normal_page_pmd() 878 page = vm_normal_page(src_vma, addr, pte); in copy_present_pte() 1262 page = vm_normal_page(vma, addr, ptent); in zap_pte_range() 2090 /* these checks mirror the abort conditions in vm_normal_page */ in vm_mixed_ok() 2120 * based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must* in __vm_insert_mixed() 2339 * See vm_normal_page() for details. in remap_pfn_range() 3162 vmf->page = vm_normal_page(vma, vmf->address, vmf->orig_pte); 4372 page = vm_normal_page(vm in do_numa_page() [all...] |
H A D | gup.c | 451 page = vm_normal_page(vma, address, pte); in follow_page_pte() 754 * by a page descriptor (see also vm_normal_page()). 844 *page = vm_normal_page(*vma, address, *pte); in get_gate_page()
|
H A D | mempolicy.c | 546 page = vm_normal_page(vma, addr, *pte); in queue_pages_pte_range() 550 * vm_normal_page() filters out zero pages, but there might in queue_pages_pte_range()
|
H A D | migrate.c | 2424 page = vm_normal_page(migrate->vma, addr, pte); in migrate_vma_collect_pmd()
|
H A D | memcontrol.c | 5659 struct page *page = vm_normal_page(vma, addr, ptent); in mc_handle_present_pte()
|
/kernel/linux/linux-6.6/mm/ |
H A D | mprotect.c | 70 page = vm_normal_page(vma, addr, pte); in can_change_pte_writable() 126 page = vm_normal_page(vma, addr, oldpte); in change_pte_range()
|
H A D | khugepaged.c | 573 page = vm_normal_page(vma, address, pteval); in __collapse_huge_page_isolate() 1320 page = vm_normal_page(vma, _address, pteval); in hpage_collapse_scan_pmd() 1559 page = vm_normal_page(vma, addr, ptent); in collapse_pte_mapped_thp() 1613 page = vm_normal_page(vma, addr, ptent); in collapse_pte_mapped_thp()
|
H A D | hmm.c | 300 if (!vm_normal_page(walk->vma, addr, pte) && in hmm_vma_handle_pte()
|
H A D | migrate_device.c | 152 page = vm_normal_page(migrate->vma, addr, pte); in migrate_vma_collect_pmd()
|
H A D | memory.c | 542 * vm_normal_page -- This function gets the "struct page" associated with a pte. 583 struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, in vm_normal_page() function 649 struct page *page = vm_normal_page(vma, addr, pte); in vm_normal_folio() 665 * !CONFIG_ARCH_HAS_PTE_SPECIAL case from vm_normal_page() here. in vm_normal_page_pmd() 935 page = vm_normal_page(src_vma, addr, pte); in copy_present_pte() 1430 page = vm_normal_page(vma, addr, ptent); in zap_pte_range() 2261 /* these checks mirror the abort conditions in vm_normal_page */ in vm_mixed_ok() 2291 * based VM_MIXEDMAP scheme (see vm_normal_page), and thus we *must* in __vm_insert_mixed() 2468 * See vm_normal_page() for details. in remap_pfn_range_notrack() 3376 vmf->page = vm_normal_page(vm [all...] |
H A D | gup.c | 603 page = vm_normal_page(vma, address, pte); in follow_page_pte() 807 * by a page descriptor (see also vm_normal_page()). 896 *page = vm_normal_page(*vma, address, entry); in get_gate_page()
|
H A D | ksm.c | 446 page = vm_normal_page(walk->vma, addr, ptent); in break_ksm_pmd_entry()
|
H A D | memcontrol.c | 5745 struct page *page = vm_normal_page(vma, addr, ptent); in mc_handle_present_pte()
|
/kernel/linux/linux-5.10/include/linux/ |
H A D | mm.h | 641 * Called by vm_normal_page() for special PTEs to find the 1686 struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, 3163 * the definition in vm_normal_page().
|
/kernel/linux/linux-6.6/include/linux/ |
H A D | mm.h | 651 * Called by vm_normal_page() for special PTEs to find the 2354 struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, 3983 * the definition in vm_normal_page().
|