/kernel/linux/linux-6.6/arch/powerpc/mm/book3s64/ |
H A D | radix_hugetlbpage.c | 16 radix__flush_tlb_page_psize(vma->vm_mm, vmaddr, psize); in radix__flush_hugetlb_page() 25 radix__local_flush_tlb_page_psize(vma->vm_mm, vmaddr, psize); in radix__local_flush_hugetlb_page() 39 radix__flush_tlb_pwc_range_psize(vma->vm_mm, start, end, psize); in radix__flush_hugetlb_tlb_range() 41 radix__flush_tlb_range_psize(vma->vm_mm, start, end, psize); in radix__flush_hugetlb_tlb_range() 42 mmu_notifier_arch_invalidate_secondary_tlbs(vma->vm_mm, start, end); in radix__flush_hugetlb_tlb_range() 49 struct mm_struct *mm = vma->vm_mm; in radix__huge_ptep_modify_prot_commit() 62 set_huge_pte_at(vma->vm_mm, addr, ptep, pte, psize); in radix__huge_ptep_modify_prot_commit()
|
H A D | pgtable.c | 54 assert_spin_locked(pmd_lockptr(vma->vm_mm, pmdp)); in pmdp_set_access_flags() 74 assert_spin_locked(pud_lockptr(vma->vm_mm, pudp)); in pudp_set_access_flags() 92 return __pmdp_test_and_clear_young(vma->vm_mm, address, pmdp); in pmdp_test_and_clear_young() 98 return __pudp_test_and_clear_young(vma->vm_mm, address, pudp); in pudp_test_and_clear_young() 173 old_pmd = pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, _PAGE_INVALID); in pmdp_invalidate() 185 pmd = pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp); in pmdp_huge_get_and_clear_full() 204 pud = pudp_huge_get_and_clear(vma->vm_mm, addr, pudp); in pudp_huge_get_and_clear_full() 530 pte_val = pte_update(vma->vm_mm, addr, ptep, _PAGE_PRESENT, _PAGE_INVALID, 0); in ptep_modify_prot_start() 542 set_pte_at(vma->vm_mm, addr, ptep, pte); in ptep_modify_prot_commit()
|
/kernel/linux/linux-6.6/arch/mips/mm/ |
H A D | tlb-r3k.c | 71 struct mm_struct *mm = vma->vm_mm; in local_flush_tlb_range() 152 if (cpu_context(cpu, vma->vm_mm) != 0) { in local_flush_tlb_page() 157 printk("[tlbpage<%lu,0x%08lx>]", cpu_context(cpu, vma->vm_mm), page); in local_flush_tlb_page() 159 newpid = cpu_context(cpu, vma->vm_mm) & asid_mask; in local_flush_tlb_page() 188 if (current->active_mm != vma->vm_mm) in __update_tlb() 194 if ((pid != (cpu_context(cpu, vma->vm_mm) & asid_mask)) || (cpu_context(cpu, vma->vm_mm) == 0)) { in __update_tlb() 196 (cpu_context(cpu, vma->vm_mm)), pid); in __update_tlb()
|
/kernel/linux/linux-5.10/mm/ |
H A D | purgeable.c | 85 struct mm_struct *mm = vma->vm_mm; in lookup_uxpte_page() 157 spin_lock(&vma->vm_mm->uxpgd_lock); in lock_uxpte() 169 spin_unlock(&vma->vm_mm->uxpgd_lock); in lock_uxpte() 178 spin_lock(&vma->vm_mm->uxpgd_lock); in unlock_uxpte() 184 spin_unlock(&vma->vm_mm->uxpgd_lock); in unlock_uxpte() 192 spin_lock(&vma->vm_mm->uxpgd_lock); in uxpte_set_present() 204 spin_unlock(&vma->vm_mm->uxpgd_lock); in uxpte_set_present() 214 spin_lock(&vma->vm_mm->uxpgd_lock); in uxpte_clear_present() 225 spin_unlock(&vma->vm_mm->uxpgd_lock); in uxpte_clear_present() 239 spin_lock(&vma->vm_mm in do_uxpte_page_fault() [all...] |
H A D | huge_memory.c | 591 if (mem_cgroup_charge(page, vma->vm_mm, gfp)) { in __do_huge_pmd_anonymous_page() 599 pgtable = pte_alloc_one(vma->vm_mm); in __do_huge_pmd_anonymous_page() 613 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in __do_huge_pmd_anonymous_page() 619 ret = check_stable_address_space(vma->vm_mm); in __do_huge_pmd_anonymous_page() 629 pte_free(vma->vm_mm, pgtable); in __do_huge_pmd_anonymous_page() 639 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); in __do_huge_pmd_anonymous_page() 640 set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry); in __do_huge_pmd_anonymous_page() 641 add_mm_counter(vma->vm_mm, MM_ANONPAGES, HPAGE_PMD_NR); in __do_huge_pmd_anonymous_page() 642 mm_inc_nr_ptes(vma->vm_mm); in __do_huge_pmd_anonymous_page() 645 count_memcg_event_mm(vma->vm_mm, THP_FAULT_ALLO in __do_huge_pmd_anonymous_page() [all...] |
H A D | pgtable-generic.c | 70 set_pte_at(vma->vm_mm, address, ptep, entry); in ptep_set_access_flags() 93 struct mm_struct *mm = (vma)->vm_mm; in ptep_clear_flush() 112 set_pmd_at(vma->vm_mm, address, pmdp, entry); in pmdp_set_access_flags() 140 pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); in pmdp_huge_clear_flush() 153 pud = pudp_huge_get_and_clear(vma->vm_mm, address, pudp); in pudp_huge_clear_flush() 215 pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); in pmdp_collapse_flush()
|
H A D | memory.c | 506 pgd_t *pgd = pgd_offset(vma->vm_mm, addr); in print_bad_pte() 804 struct mm_struct *src_mm = src_vma->vm_mm; in copy_present_page() 860 set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte); in copy_present_page() 873 struct mm_struct *src_mm = src_vma->vm_mm; in copy_present_pte() 912 set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte); in copy_present_pte() 940 struct mm_struct *dst_mm = dst_vma->vm_mm; in copy_pte_range() 941 struct mm_struct *src_mm = src_vma->vm_mm; in copy_pte_range() 1047 struct mm_struct *dst_mm = dst_vma->vm_mm; in copy_pmd_range() 1048 struct mm_struct *src_mm = src_vma->vm_mm; in copy_pmd_range() 1084 struct mm_struct *dst_mm = dst_vma->vm_mm; in copy_pud_range() 4274 struct mm_struct *vm_mm = vma->vm_mm; do_fault() local [all...] |
/kernel/linux/linux-6.6/mm/ |
H A D | purgeable.c | 86 struct mm_struct *mm = vma->vm_mm; in lookup_uxpte_page() 159 spin_lock(&vma->vm_mm->uxpgd_lock); in lock_uxpte() 171 spin_unlock(&vma->vm_mm->uxpgd_lock); in lock_uxpte() 180 spin_lock(&vma->vm_mm->uxpgd_lock); in unlock_uxpte() 186 spin_unlock(&vma->vm_mm->uxpgd_lock); in unlock_uxpte() 194 spin_lock(&vma->vm_mm->uxpgd_lock); in uxpte_set_present() 206 spin_unlock(&vma->vm_mm->uxpgd_lock); in uxpte_set_present() 216 spin_lock(&vma->vm_mm->uxpgd_lock); in uxpte_clear_present() 227 spin_unlock(&vma->vm_mm->uxpgd_lock); in uxpte_clear_present() 241 spin_lock(&vma->vm_mm in do_uxpte_page_fault() [all...] |
H A D | huge_memory.c | 77 if (!vma->vm_mm) /* vdso */ in hugepage_vma_check() 86 test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)) in hugepage_vma_check() 123 !enforce_sysfs, vma->vm_mm, vm_flags); in hugepage_vma_check() 657 if (mem_cgroup_charge(folio, vma->vm_mm, gfp)) { in __do_huge_pmd_anonymous_page() 665 pgtable = pte_alloc_one(vma->vm_mm); in __do_huge_pmd_anonymous_page() 679 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in __do_huge_pmd_anonymous_page() 685 ret = check_stable_address_space(vma->vm_mm); in __do_huge_pmd_anonymous_page() 693 pte_free(vma->vm_mm, pgtable); in __do_huge_pmd_anonymous_page() 703 pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable); in __do_huge_pmd_anonymous_page() 704 set_pmd_at(vma->vm_mm, hadd in __do_huge_pmd_anonymous_page() [all...] |
H A D | memory.c | 493 pgd_t *pgd = pgd_offset(vma->vm_mm, addr); in print_bad_pte() 732 set_pte_at(vma->vm_mm, address, ptep, pte); in restore_exclusive_pte() 916 set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte); in copy_present_page() 929 struct mm_struct *src_mm = src_vma->vm_mm; in copy_present_pte() 980 set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte); in copy_present_pte() 1007 struct mm_struct *dst_mm = dst_vma->vm_mm; in copy_pte_range() 1008 struct mm_struct *src_mm = src_vma->vm_mm; in copy_pte_range() 1144 struct mm_struct *dst_mm = dst_vma->vm_mm; in copy_pmd_range() 1145 struct mm_struct *src_mm = src_vma->vm_mm; in copy_pmd_range() 1181 struct mm_struct *dst_mm = dst_vma->vm_mm; in copy_pud_range() 4737 struct mm_struct *vm_mm = vma->vm_mm; do_fault() local [all...] |
H A D | mremap.c | 141 struct mm_struct *mm = vma->vm_mm; in move_ptes() 186 flush_tlb_batched_pending(vma->vm_mm); in move_ptes() 240 struct mm_struct *mm = vma->vm_mm; in move_normal_pmd() 275 old_ptl = pmd_lock(vma->vm_mm, old_pmd); in move_normal_pmd() 308 struct mm_struct *mm = vma->vm_mm; in move_normal_pud() 324 old_ptl = pud_lock(vma->vm_mm, old_pud); in move_normal_pud() 357 struct mm_struct *mm = vma->vm_mm; in move_huge_pud() 371 old_ptl = pud_lock(vma->vm_mm, old_pud); in move_huge_pud() 512 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma->vm_mm, in move_page_tables() 524 old_pud = get_old_pud(vma->vm_mm, old_add in move_page_tables() [all...] |
H A D | mprotect.c | 97 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in change_pte_range() 103 atomic_read(&vma->vm_mm->mm_users) == 1) in change_pte_range() 106 flush_tlb_batched_pending(vma->vm_mm); in change_pte_range() 245 pte_clear(vma->vm_mm, addr, pte); in change_pte_range() 259 set_pte_at(vma->vm_mm, addr, pte, newpte); in change_pte_range() 280 set_pte_at(vma->vm_mm, addr, pte, in change_pte_range() 332 if (pte_alloc(vma->vm_mm, pmd)) \ 347 low##_t *p = low##_alloc(vma->vm_mm, high, addr); \ 386 vma->vm_mm, addr, end); in change_pmd_range() 488 struct mm_struct *mm = vma->vm_mm; in change_protection_range() [all...] |
/kernel/linux/linux-5.10/arch/mips/mm/ |
H A D | tlb-r3k.c | 73 struct mm_struct *mm = vma->vm_mm; in local_flush_tlb_range() 154 if (cpu_context(cpu, vma->vm_mm) != 0) { in local_flush_tlb_page() 159 printk("[tlbpage<%lu,0x%08lx>]", cpu_context(cpu, vma->vm_mm), page); in local_flush_tlb_page() 161 newpid = cpu_context(cpu, vma->vm_mm) & asid_mask; in local_flush_tlb_page() 190 if (current->active_mm != vma->vm_mm) in __update_tlb() 196 if ((pid != (cpu_context(cpu, vma->vm_mm) & asid_mask)) || (cpu_context(cpu, vma->vm_mm) == 0)) { in __update_tlb() 198 (cpu_context(cpu, vma->vm_mm)), pid); in __update_tlb()
|
/kernel/linux/linux-5.10/arch/sh/mm/ |
H A D | tlbflush_32.c | 19 if (vma->vm_mm && cpu_context(cpu, vma->vm_mm) != NO_CONTEXT) { in local_flush_tlb_page() 24 asid = cpu_asid(cpu, vma->vm_mm); in local_flush_tlb_page() 28 if (vma->vm_mm != current->mm) { in local_flush_tlb_page() 42 struct mm_struct *mm = vma->vm_mm; in local_flush_tlb_range()
|
/kernel/linux/linux-5.10/arch/powerpc/mm/book3s64/ |
H A D | radix_hugetlbpage.c | 16 radix__flush_tlb_page_psize(vma->vm_mm, vmaddr, psize); in radix__flush_hugetlb_page() 25 radix__local_flush_tlb_page_psize(vma->vm_mm, vmaddr, psize); in radix__local_flush_hugetlb_page() 35 radix__flush_tlb_range_psize(vma->vm_mm, start, end, psize); in radix__flush_hugetlb_tlb_range() 97 struct mm_struct *mm = vma->vm_mm; in radix__huge_ptep_modify_prot_commit() 107 set_huge_pte_at(vma->vm_mm, addr, ptep, pte); in radix__huge_ptep_modify_prot_commit()
|
/kernel/linux/linux-6.6/arch/sh/mm/ |
H A D | tlbflush_32.c | 19 if (vma->vm_mm && cpu_context(cpu, vma->vm_mm) != NO_CONTEXT) { in local_flush_tlb_page() 24 asid = cpu_asid(cpu, vma->vm_mm); in local_flush_tlb_page() 28 if (vma->vm_mm != current->mm) { in local_flush_tlb_page() 42 struct mm_struct *mm = vma->vm_mm; in local_flush_tlb_range()
|
/kernel/linux/linux-6.6/arch/arc/mm/ |
H A D | tlb.c | 222 local_flush_tlb_mm(vma->vm_mm); in local_flush_tlb_range() 235 if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) { in local_flush_tlb_range() 237 tlb_entry_erase(start | hw_pid(vma->vm_mm, cpu)); in local_flush_tlb_range() 288 if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) { in local_flush_tlb_page() 289 tlb_entry_erase((page & PAGE_MASK) | hw_pid(vma->vm_mm, cpu)); in local_flush_tlb_page() 351 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_page, &ta, 1); in flush_tlb_page() 363 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_range, &ta, 1); in flush_tlb_range() 376 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_pmd_tlb_range, &ta, 1); in flush_pmd_tlb_range() 425 if (current->active_mm != vma->vm_mm) in create_tlb() 553 if (likely(asid_mm(vma->vm_mm, cp in local_flush_pmd_tlb_range() [all...] |
/kernel/linux/linux-5.10/arch/riscv/kernel/ |
H A D | vdso.c | 107 if (vma->vm_mm && (vma->vm_start == (long)vma->vm_mm->context.vdso)) in arch_vma_name() 109 if (vma->vm_mm && (vma->vm_start == in arch_vma_name() 110 (long)vma->vm_mm->context.vdso + PAGE_SIZE)) in arch_vma_name()
|
/kernel/linux/linux-6.6/arch/csky/kernel/ |
H A D | vdso.c | 101 if (vma->vm_mm && (vma->vm_start == (long)vma->vm_mm->context.vdso)) in arch_vma_name() 103 if (vma->vm_mm && (vma->vm_start == in arch_vma_name() 104 (long)vma->vm_mm->context.vdso + PAGE_SIZE)) in arch_vma_name()
|
/kernel/linux/linux-5.10/arch/arc/mm/ |
H A D | tlb.c | 322 local_flush_tlb_mm(vma->vm_mm); in local_flush_tlb_range() 335 if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) { in local_flush_tlb_range() 337 tlb_entry_erase(start | hw_pid(vma->vm_mm, cpu)); in local_flush_tlb_range() 388 if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) { in local_flush_tlb_page() 389 tlb_entry_erase((page & PAGE_MASK) | hw_pid(vma->vm_mm, cpu)); in local_flush_tlb_page() 451 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_page, &ta, 1); in flush_tlb_page() 463 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_range, &ta, 1); in flush_tlb_range() 476 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_pmd_tlb_range, &ta, 1); in flush_pmd_tlb_range() 526 if (current->active_mm != vma->vm_mm) in create_tlb() 531 tlb_paranoid_check(asid_mm(vma->vm_mm, smp_processor_i in create_tlb() [all...] |
/kernel/linux/linux-5.10/include/linux/ |
H A D | khugepaged.h | 59 if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags)) in khugepaged_enter() 64 !test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags)) in khugepaged_enter() 65 if (__khugepaged_enter(vma->vm_mm)) in khugepaged_enter()
|
/kernel/linux/linux-5.10/arch/arm/mm/ |
H A D | fault-armv.c | 57 set_pte_at(vma->vm_mm, address, ptep, entry); in do_adjust_pte() 99 pgd = pgd_offset(vma->vm_mm, address); in adjust_pte() 120 ptl = pte_lockptr(vma->vm_mm, pmd); in adjust_pte() 136 struct mm_struct *mm = vma->vm_mm; in make_coherent() 156 if (mpnt->vm_mm != mm || mpnt == vma) in make_coherent()
|
/kernel/linux/linux-6.6/arch/arm/mm/ |
H A D | fault-armv.c | 57 set_pte_at(vma->vm_mm, address, ptep, entry); in do_adjust_pte() 99 pgd = pgd_offset(vma->vm_mm, address); in adjust_pte() 120 pte = pte_offset_map_nolock(vma->vm_mm, pmd, address, &ptl); in adjust_pte() 138 struct mm_struct *mm = vma->vm_mm; in make_coherent() 158 if (mpnt->vm_mm != mm || mpnt == vma) in make_coherent()
|
/kernel/linux/linux-5.10/arch/m68k/include/asm/ |
H A D | tlbflush.h | 87 if (vma->vm_mm == current->active_mm) { in flush_tlb_page() 98 if (vma->vm_mm == current->active_mm) in flush_tlb_range() 178 sun3_put_context(vma->vm_mm->context); in flush_tlb_page() 195 struct mm_struct *mm = vma->vm_mm; in flush_tlb_range()
|
/kernel/linux/linux-5.10/arch/arm/kernel/ |
H A D | smp_tlb.c | 202 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_page, in flush_tlb_page() 206 broadcast_tlb_mm_a15_erratum(vma->vm_mm); in flush_tlb_page() 228 on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_range, in flush_tlb_range() 232 broadcast_tlb_mm_a15_erratum(vma->vm_mm); in flush_tlb_range()
|