/kernel/linux/linux-6.6/arch/riscv/mm/ |
H A D | hugetlbpage.c | 10 pte_t orig_pte = ptep_get(ptep); in huge_ptep_get() local 12 if (!pte_present(orig_pte) || !pte_napot(orig_pte)) in huge_ptep_get() 13 return orig_pte; in huge_ptep_get() 15 pte_num = napot_pte_num(napot_cont_order(orig_pte)); in huge_ptep_get() 21 orig_pte = pte_mkdirty(orig_pte); in huge_ptep_get() 24 orig_pte = pte_mkyoung(orig_pte); in huge_ptep_get() 27 return orig_pte; in huge_ptep_get() 153 pte_t orig_pte = ptep_get(ptep); get_clear_contig() local 174 pte_t orig_pte = get_clear_contig(mm, addr, ptep, pte_num); get_clear_contig_flush() local 271 pte_t orig_pte; huge_ptep_set_access_flags() local 298 pte_t orig_pte = ptep_get(ptep); huge_ptep_get_and_clear() local 315 pte_t orig_pte; huge_ptep_set_wrprotect() local [all...] |
/kernel/linux/linux-6.6/arch/arm64/mm/ |
H A D | hugetlbpage.c | 155 pte_t orig_pte = ptep_get(ptep); in huge_ptep_get() local 157 if (!pte_present(orig_pte) || !pte_cont(orig_pte)) in huge_ptep_get() 158 return orig_pte; in huge_ptep_get() 160 ncontig = num_contig_ptes(page_size(pte_page(orig_pte)), &pgsize); in huge_ptep_get() 165 orig_pte = pte_mkdirty(orig_pte); in huge_ptep_get() 168 orig_pte = pte_mkyoung(orig_pte); in huge_ptep_get() 170 return orig_pte; in huge_ptep_get() 187 pte_t orig_pte = ptep_get(ptep); get_clear_contig() local 213 pte_t orig_pte = get_clear_contig(mm, addr, ptep, pgsize, ncontig); get_clear_contig_flush() local 411 pte_t orig_pte = ptep_get(ptep); huge_ptep_get_and_clear() local 438 pte_t orig_pte = ptep_get(ptep + i); __cont_access_flags_changed() local 459 pte_t orig_pte; huge_ptep_set_access_flags() local [all...] |
/kernel/linux/linux-5.10/arch/arm64/mm/ |
H A D | hugetlbpage.c | 162 pte_t orig_pte = huge_ptep_get(ptep); in get_clear_flush() local 163 bool valid = pte_valid(orig_pte); in get_clear_flush() 175 orig_pte = pte_mkdirty(orig_pte); in get_clear_flush() 178 orig_pte = pte_mkyoung(orig_pte); in get_clear_flush() 185 return orig_pte; in get_clear_flush() 376 pte_t orig_pte = huge_ptep_get(ptep); in huge_ptep_get_and_clear() local 378 if (!pte_cont(orig_pte)) in huge_ptep_get_and_clear() 403 pte_t orig_pte in __cont_access_flags_changed() local 423 pte_t orig_pte; huge_ptep_set_access_flags() local [all...] |
/kernel/linux/linux-5.10/arch/powerpc/kvm/ |
H A D | book3s_64_mmu_host.c | 70 int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte, in kvmppc_mmu_map_page() argument 88 unsigned long gfn = orig_pte->raddr >> PAGE_SHIFT; in kvmppc_mmu_map_page() 96 pfn = kvmppc_gpa_to_pfn(vcpu, orig_pte->raddr, iswrite, &writable); in kvmppc_mmu_map_page() 99 orig_pte->raddr); in kvmppc_mmu_map_page() 106 vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid); in kvmppc_mmu_map_page() 109 ret = kvmppc_mmu_map_segment(vcpu, orig_pte->eaddr); in kvmppc_mmu_map_page() 115 vsid, orig_pte->eaddr); in kvmppc_mmu_map_page() 121 vpn = hpt_vpn(orig_pte->eaddr, map->host_vsid, MMU_SEGSIZE_256M); in kvmppc_mmu_map_page() 124 if (!orig_pte->may_write || !writable) in kvmppc_mmu_map_page() 131 if (!orig_pte in kvmppc_mmu_map_page() [all...] |
H A D | book3s_32_mmu_host.c | 130 int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte, in kvmppc_mmu_map_page() argument 138 u32 eaddr = orig_pte->eaddr; in kvmppc_mmu_map_page() 148 hpaddr = kvmppc_gpa_to_pfn(vcpu, orig_pte->raddr, iswrite, &writable); in kvmppc_mmu_map_page() 151 orig_pte->raddr); in kvmppc_mmu_map_page() 158 vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid); in kvmppc_mmu_map_page() 198 if (orig_pte->may_write && writable) { in kvmppc_mmu_map_page() 200 mark_page_dirty(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT); in kvmppc_mmu_map_page() 205 if (orig_pte->may_execute) in kvmppc_mmu_map_page() 241 orig_pte->may_write ? 'w' : '-', in kvmppc_mmu_map_page() 242 orig_pte in kvmppc_mmu_map_page() [all...] |
H A D | trace_pr.h | 33 struct kvmppc_pte *orig_pte), 34 TP_ARGS(rflags, hpteg, va, hpaddr, orig_pte), 49 __entry->eaddr = orig_pte->eaddr; 52 __entry->vpage = orig_pte->vpage;
|
H A D | book3s_hv_rm_mmu.c | 495 u64 pte, orig_pte, pte_r; in kvmppc_do_h_remove() local 504 pte = orig_pte = be64_to_cpu(hpte[0]); in kvmppc_do_h_remove() 513 __unlock_hpte(hpte, orig_pte); in kvmppc_do_h_remove()
|
/kernel/linux/linux-6.6/arch/powerpc/kvm/ |
H A D | book3s_64_mmu_host.c | 71 int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte, in kvmppc_mmu_map_page() argument 89 unsigned long gfn = orig_pte->raddr >> PAGE_SHIFT; in kvmppc_mmu_map_page() 97 pfn = kvmppc_gpa_to_pfn(vcpu, orig_pte->raddr, iswrite, &writable); in kvmppc_mmu_map_page() 100 orig_pte->raddr); in kvmppc_mmu_map_page() 107 vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid); in kvmppc_mmu_map_page() 110 ret = kvmppc_mmu_map_segment(vcpu, orig_pte->eaddr); in kvmppc_mmu_map_page() 116 vsid, orig_pte->eaddr); in kvmppc_mmu_map_page() 122 vpn = hpt_vpn(orig_pte->eaddr, map->host_vsid, MMU_SEGSIZE_256M); in kvmppc_mmu_map_page() 125 if (!orig_pte->may_write || !writable) in kvmppc_mmu_map_page() 132 if (!orig_pte in kvmppc_mmu_map_page() [all...] |
H A D | book3s_32_mmu_host.c | 130 int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte, in kvmppc_mmu_map_page() argument 138 u32 eaddr = orig_pte->eaddr; in kvmppc_mmu_map_page() 148 hpaddr = kvmppc_gpa_to_pfn(vcpu, orig_pte->raddr, iswrite, &writable); in kvmppc_mmu_map_page() 151 orig_pte->raddr); in kvmppc_mmu_map_page() 158 vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid); in kvmppc_mmu_map_page() 198 if (orig_pte->may_write && writable) { in kvmppc_mmu_map_page() 200 mark_page_dirty(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT); in kvmppc_mmu_map_page() 205 if (orig_pte->may_execute) in kvmppc_mmu_map_page() 241 orig_pte->may_write ? 'w' : '-', in kvmppc_mmu_map_page() 242 orig_pte in kvmppc_mmu_map_page() [all...] |
H A D | trace_pr.h | 33 struct kvmppc_pte *orig_pte), 34 TP_ARGS(rflags, hpteg, va, hpaddr, orig_pte), 49 __entry->eaddr = orig_pte->eaddr; 52 __entry->vpage = orig_pte->vpage;
|
/kernel/linux/linux-6.6/mm/ |
H A D | memory.c | 121 return pte_marker_uffd_wp(vmf->orig_pte); in vmf_orig_pte_uffd_wp() 702 pte_t orig_pte; in restore_exclusive_pte() local 706 orig_pte = ptep_get(ptep); in restore_exclusive_pte() 708 if (pte_swp_soft_dirty(orig_pte)) in restore_exclusive_pte() 711 entry = pte_to_swp_entry(orig_pte); in restore_exclusive_pte() 712 if (pte_swp_uffd_wp(orig_pte)) in restore_exclusive_pte() 773 pte_t orig_pte = ptep_get(src_pte); in copy_nonpresent_pte() local 774 pte_t pte = orig_pte; in copy_nonpresent_pte() 776 swp_entry_t entry = pte_to_swp_entry(orig_pte); in copy_nonpresent_pte() 791 if (pte_swp_exclusive(orig_pte)) { in copy_nonpresent_pte() [all...] |
H A D | migrate_device.c | 577 pte_t orig_pte; in migrate_vma_insert_page() local 633 orig_pte = ptep_get(ptep); in migrate_vma_insert_page() 638 if (pte_present(orig_pte)) { in migrate_vma_insert_page() 639 unsigned long pfn = pte_pfn(orig_pte); in migrate_vma_insert_page() 644 } else if (!pte_none(orig_pte)) in migrate_vma_insert_page() 661 flush_cache_page(vma, addr, pte_pfn(orig_pte)); in migrate_vma_insert_page()
|
H A D | ksm.c | 1096 pte_t *orig_pte) in write_protect_page() 1166 *orig_pte = entry; in write_protect_page() 1182 * @orig_pte: the original value of the pte 1187 struct page *kpage, pte_t orig_pte) in replace_page() 1223 if (!pte_same(ptep_get(ptep), orig_pte)) { in replace_page() 1292 pte_t orig_pte = __pte(0); in try_to_merge_one_page() local 1322 if (write_protect_page(vma, page, &orig_pte) == 0) { in try_to_merge_one_page() 1339 err = replace_page(vma, page, kpage, orig_pte); in try_to_merge_one_page() 1095 write_protect_page(struct vm_area_struct *vma, struct page *page, pte_t *orig_pte) write_protect_page() argument 1186 replace_page(struct vm_area_struct *vma, struct page *page, struct page *kpage, pte_t orig_pte) replace_page() argument
|
/kernel/linux/linux-5.10/mm/ |
H A D | madvise.c | 194 pte_t *orig_pte; in swapin_walk_pmd_entry() local 207 orig_pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl); in swapin_walk_pmd_entry() 208 pte = *(orig_pte + ((index - start) / PAGE_SIZE)); in swapin_walk_pmd_entry() 209 pte_unmap_unlock(orig_pte, ptl); in swapin_walk_pmd_entry() 321 pte_t *orig_pte, *pte, ptent; in madvise_cold_or_pageout_pte_range() local 400 orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in madvise_cold_or_pageout_pte_range() 428 pte_unmap_unlock(orig_pte, ptl); in madvise_cold_or_pageout_pte_range() 480 pte_unmap_unlock(orig_pte, ptl); in madvise_cold_or_pageout_pte_range() 585 pte_t *orig_pte, *pte, ptent; in madvise_free_pte_range() local 599 orig_pte in madvise_free_pte_range() [all...] |
H A D | memory.c | 2613 pte_t *page_table, pte_t orig_pte) in pte_unmap_same() 2620 same = pte_same(*page_table, orig_pte); in pte_unmap_same() 2657 if (arch_faults_on_old_pte() && !pte_young(vmf->orig_pte)) { in cow_user_page() 2662 if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) { in cow_user_page() 2672 entry = pte_mkyoung(vmf->orig_pte); in cow_user_page() 2690 if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) { in cow_user_page() 2844 flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte)); 2845 entry = pte_mkyoung(vmf->orig_pte); 2882 if (is_zero_pfn(pte_pfn(vmf->orig_pte))) { in wp_page_copy() 2922 if (likely(pte_same(*vmf->pte, vmf->orig_pte))) { in wp_page_copy() 2612 pte_unmap_same(struct mm_struct *mm, pmd_t *pmd, pte_t *page_table, pte_t orig_pte) pte_unmap_same() argument [all...] |
H A D | swap_state.c | 758 pte_t *pte, *orig_pte; in swap_ra_info() local 772 orig_pte = pte = pte_offset_map(vmf->pmd, faddr); in swap_ra_info() 775 pte_unmap(orig_pte); in swap_ra_info() 790 pte_unmap(orig_pte); in swap_ra_info() 815 pte_unmap(orig_pte); in swap_ra_info()
|
H A D | ksm.c | 1036 pte_t *orig_pte) in write_protect_page() 1102 *orig_pte = *pvmw.pte; in write_protect_page() 1118 * @orig_pte: the original value of the pte 1123 struct page *kpage, pte_t orig_pte) in replace_page() 1147 if (!pte_same(*ptep, orig_pte)) { in replace_page() 1207 pte_t orig_pte = __pte(0); in try_to_merge_one_page() local 1240 if (write_protect_page(vma, page, &orig_pte) == 0) { in try_to_merge_one_page() 1257 err = replace_page(vma, page, kpage, orig_pte); in try_to_merge_one_page() 1035 write_protect_page(struct vm_area_struct *vma, struct page *page, pte_t *orig_pte) write_protect_page() argument 1122 replace_page(struct vm_area_struct *vma, struct page *page, struct page *kpage, pte_t orig_pte) replace_page() argument
|
/kernel/linux/linux-5.10/arch/x86/kvm/mmu/ |
H A D | paging_tmpl.h | 149 pt_element_t orig_pte, pt_element_t new_pte) in cmpxchg_gpte() 163 [old] "+a" (orig_pte), in cmpxchg_gpte() 175 [old] "+A" (orig_pte), in cmpxchg_gpte() 238 pt_element_t pte, orig_pte; in update_accessed_dirty_bits() local 248 pte = orig_pte = walker->ptes[level - 1]; in update_accessed_dirty_bits() 265 if (pte == orig_pte) in update_accessed_dirty_bits() 284 ret = FNAME(cmpxchg_gpte)(vcpu, mmu, ptep_user, index, orig_pte, pte); in update_accessed_dirty_bits() 147 cmpxchg_gpte(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, pt_element_t __user *ptep_user, unsigned index, pt_element_t orig_pte, pt_element_t new_pte) cmpxchg_gpte() argument
|
/kernel/linux/linux-5.10/arch/sparc/mm/ |
H A D | tlb.c | 205 pte_t orig_pte = __pte(pmd_val(orig)); in __set_pmd_acct() local 206 bool exec = pte_exec(orig_pte); in __set_pmd_acct()
|
/kernel/linux/linux-6.6/arch/sparc/mm/ |
H A D | tlb.c | 210 pte_t orig_pte = __pte(pmd_val(orig)); in __set_pmd_acct() local 211 bool exec = pte_exec(orig_pte); in __set_pmd_acct()
|
/kernel/linux/linux-6.6/arch/x86/kvm/mmu/ |
H A D | paging_tmpl.h | 203 pt_element_t pte, orig_pte; in update_accessed_dirty_bits() local 213 pte = orig_pte = walker->ptes[level - 1]; in update_accessed_dirty_bits() 230 if (pte == orig_pte) in update_accessed_dirty_bits() 249 ret = __try_cmpxchg_user(ptep_user, &orig_pte, pte, fault); in update_accessed_dirty_bits()
|
/kernel/linux/linux-5.10/arch/arm64/kvm/hyp/ |
H A D | pgtable.c | 744 kvm_pte_t attr_clr, kvm_pte_t *orig_pte, in stage2_update_leaf_attrs() 763 if (orig_pte) in stage2_update_leaf_attrs() 764 *orig_pte = data.pte; in stage2_update_leaf_attrs() 742 stage2_update_leaf_attrs(struct kvm_pgtable *pgt, u64 addr, u64 size, kvm_pte_t attr_set, kvm_pte_t attr_clr, kvm_pte_t *orig_pte, u32 *level) stage2_update_leaf_attrs() argument
|
/kernel/linux/linux-5.10/fs/proc/ |
H A D | task_mmu.c | 1452 pte_t *pte, *orig_pte; in pagemap_pmd_range() local 1524 orig_pte = pte = pte_offset_map_lock(walk->mm, pmdp, addr, &ptl); in pagemap_pmd_range() 1533 pte_unmap_unlock(orig_pte, ptl); in pagemap_pmd_range() 1838 pte_t *orig_pte; in gather_pte_stats() local 1857 orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); in gather_pte_stats() 1865 pte_unmap_unlock(orig_pte, ptl); in gather_pte_stats()
|
/kernel/linux/linux-6.6/fs/proc/ |
H A D | task_mmu.c | 1479 pte_t *pte, *orig_pte; in pagemap_pmd_range() local 1556 orig_pte = pte = pte_offset_map_lock(walk->mm, pmdp, addr, &ptl); in pagemap_pmd_range() 1569 pte_unmap_unlock(orig_pte, ptl); in pagemap_pmd_range() 1886 pte_t *orig_pte; in gather_pte_stats() local 1902 orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); in gather_pte_stats() 1915 pte_unmap_unlock(orig_pte, ptl); in gather_pte_stats()
|
/kernel/linux/linux-6.6/arch/arm64/kvm/hyp/ |
H A D | pgtable.c | 1196 kvm_pte_t attr_clr, kvm_pte_t *orig_pte, in stage2_update_leaf_attrs() 1215 if (orig_pte) in stage2_update_leaf_attrs() 1216 *orig_pte = data.pte; in stage2_update_leaf_attrs() 1194 stage2_update_leaf_attrs(struct kvm_pgtable *pgt, u64 addr, u64 size, kvm_pte_t attr_set, kvm_pte_t attr_clr, kvm_pte_t *orig_pte, u32 *level, enum kvm_pgtable_walk_flags flags) stage2_update_leaf_attrs() argument
|