Home
last modified time | relevance | path

Searched refs:ASID (Results 1 - 16 of 16) sorted by relevance

/kernel/linux/linux-5.10/arch/arm/include/asm/
H A Dmmu.h27 #define ASID(mm) ((unsigned int)((mm)->context.id.counter & ~ASID_MASK)) macro
29 #define ASID(mm) (0) macro
H A Dtlbflush.h370 const int asid = ASID(mm); in __local_flush_tlb_mm()
388 const int asid = ASID(mm); in local_flush_tlb_mm()
412 tlb_op(TLB_V7_UIS_ASID, "c8, c3, 2", ASID(mm)); in __flush_tlb_mm()
425 uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm); in __local_flush_tlb_page()
446 uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm); in local_flush_tlb_page()
463 uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm); in __flush_tlb_page()
/kernel/linux/linux-6.6/arch/arm/include/asm/
H A Dmmu.h27 #define ASID(mm) ((unsigned int)((mm)->context.id.counter & ~ASID_MASK)) macro
29 #define ASID(mm) (0) macro
H A Dtlbflush.h363 const int asid = ASID(mm); in __local_flush_tlb_mm()
381 const int asid = ASID(mm); in local_flush_tlb_mm()
405 tlb_op(TLB_V7_UIS_ASID, "c8, c3, 2", ASID(mm)); in __flush_tlb_mm()
418 uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm); in __local_flush_tlb_page()
439 uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm); in local_flush_tlb_page()
456 uaddr = (uaddr & PAGE_MASK) | ASID(vma->vm_mm); in __flush_tlb_page()
/kernel/linux/linux-5.10/arch/arm64/include/asm/
H A Dtlbflush.h128 * | ASID | TG | SCALE | NUM | TTL | BADDR |
185 * The 'mm' argument identifies the ASID to invalidate.
251 asid = __TLBI_VADDR(0, ASID(mm)); in flush_tlb_mm()
263 addr = __TLBI_VADDR(uaddr, ASID(vma->vm_mm)); in flush_tlb_page_nosync()
308 asid = ASID(vma->vm_mm); in __flush_tlb_range()
H A Dmmu.h33 * We use atomic64_read() here because the ASID for an 'mm_struct' can
37 * may use a stale ASID. This is fine in principle as the new ASID is
46 * old = ASID(mm)
51 * // Hardware walk of pte using new ASID
58 #define ASID(mm) (atomic64_read(&(mm)->context.id) & 0xffff) macro
H A Dmmu_context.h168 * free an ASID allocated in a future generation. We could workaround this by
169 * freeing the ASID from the context of the dying mm (e.g. in arch_exit_mmap),
197 ttbr = phys_to_ttbr(virt_to_phys(mm->pgd)) | ASID(mm) << 48; in update_saved_ttbr0()
242 * ASID has changed since the last run (following the context switch in switch_mm()
/kernel/linux/linux-6.6/arch/arm64/include/asm/
H A Dtlbflush.h129 * | ASID | TG | SCALE | NUM | TTL | BADDR |
186 * The 'mm' argument identifies the ASID to invalidate.
252 asid = __TLBI_VADDR(0, ASID(mm)); in flush_tlb_mm()
265 addr = __TLBI_VADDR(uaddr, ASID(mm)); in __flush_tlb_page_nosync()
345 * @asid: The ASID of the task (0 for IPA instructions)
428 asid = ASID(vma->vm_mm); in __flush_tlb_range()
H A Dmmu.h31 * We use atomic64_read() here because the ASID for an 'mm_struct' can
35 * may use a stale ASID. This is fine in principle as the new ASID is
44 * old = ASID(mm)
49 * // Hardware walk of pte using new ASID
56 #define ASID(mm) (atomic64_read(&(mm)->context.id) & 0xffff) macro
H A Dmmu_context.h133 * page, but TLBs may contain stale ASID-tagged entries (e.g. for EFI runtime
195 * free an ASID allocated in a future generation. We could workaround this by
196 * freeing the ASID from the context of the dying mm (e.g. in arch_exit_mmap),
224 ttbr = phys_to_ttbr(virt_to_phys(mm->pgd)) | ASID(mm) << 48; in update_saved_ttbr0()
270 * ASID has changed since the last run (following the context switch in switch_mm()
/kernel/linux/linux-5.10/arch/arm/mm/
H A Dtlb-v6.S40 asid r3, r3 @ mask ASID
/kernel/linux/linux-6.6/arch/loongarch/include/asm/
H A Dhw_breakpoint.h54 #define LOONGARCH_CSR_NAME_ASID ASID
/kernel/linux/linux-6.6/arch/arm/mm/
H A Dtlb-v6.S42 asid r3, r3 @ mask ASID
/kernel/linux/linux-6.6/arch/loongarch/kernel/
H A Dhw_breakpoint.c74 GEN_READ_WB_REG_CASES(CSR_CFG_ASID, ASID, t, val); in read_wb_reg()
89 GEN_WRITE_WB_REG_CASES(CSR_CFG_ASID, ASID, t, val); in write_wb_reg()
/kernel/linux/linux-5.10/arch/arm64/mm/
H A Dcontext.c50 pr_warn("CPU%d: Unknown ASID size (%d); assuming 8-bit\n", in get_cpu_asid_bits()
70 * We cannot decrease the ASID size at runtime, so panic if we support in verify_cpu_asid_bits()
71 * fewer ASID bits than the boot CPU. in verify_cpu_asid_bits()
73 pr_crit("CPU%d: smaller ASID size(%u) than boot CPU (%u)\n", in verify_cpu_asid_bits()
85 * is set, then the ASID will map only userspace. Thus in set_kpti_asid_bits()
109 /* Update the list of reserved ASIDs and the ASID bitmap. */ in flush_context()
118 * ASID, as this is the only trace we have of in flush_context()
142 * (i.e. the same ASID in the current generation) but we can't in check_update_reserved_asid()
144 * of the old ASID are updated to reflect the mm. Failure to do in check_update_reserved_asid()
145 * so could result in us missing the reserved ASID i in check_update_reserved_asid()
[all...]
/kernel/linux/linux-6.6/arch/arm64/mm/
H A Dcontext.c50 pr_warn("CPU%d: Unknown ASID size (%d); assuming 8-bit\n", in get_cpu_asid_bits()
70 * We cannot decrease the ASID size at runtime, so panic if we support in verify_cpu_asid_bits()
71 * fewer ASID bits than the boot CPU. in verify_cpu_asid_bits()
73 pr_crit("CPU%d: smaller ASID size(%u) than boot CPU (%u)\n", in verify_cpu_asid_bits()
85 * is set, then the ASID will map only userspace. Thus in set_kpti_asid_bits()
109 /* Update the list of reserved ASIDs and the ASID bitmap. */ in flush_context()
118 * ASID, as this is the only trace we have of in flush_context()
142 * (i.e. the same ASID in the current generation) but we can't in check_update_reserved_asid()
144 * of the old ASID are updated to reflect the mm. Failure to do in check_update_reserved_asid()
145 * so could result in us missing the reserved ASID i in check_update_reserved_asid()
[all...]

Completed in 10 milliseconds