Home
last modified time | relevance | path

Searched refs:tlb_gen (Results 1 - 8 of 8) sorted by relevance

/kernel/linux/linux-5.10/arch/x86/mm/
H A Dtlb.c225 *need_flush = (this_cpu_read(cpu_tlbstate.ctxs[asid].tlb_gen) < in choose_new_asid()
508 * Read the tlb_gen to check whether a flush is needed. in switch_mm_irqs_off()
510 * The barrier synchronizes with the tlb_gen increment in in switch_mm_irqs_off()
514 next_tlb_gen = atomic64_read(&next->context.tlb_gen); in switch_mm_irqs_off()
515 if (this_cpu_read(cpu_tlbstate.ctxs[prev_asid].tlb_gen) == in switch_mm_irqs_off()
545 * Start remote flushes and then read tlb_gen. in switch_mm_irqs_off()
549 next_tlb_gen = atomic64_read(&next->context.tlb_gen); in switch_mm_irqs_off()
560 this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen); in switch_mm_irqs_off()
621 u64 tlb_gen = atomic64_read(&init_mm.context.tlb_gen); in initialize_tlbstate_and_flush() local
[all...]
/kernel/linux/linux-6.6/arch/x86/mm/
H A Dtlb.c239 *need_flush = (this_cpu_read(cpu_tlbstate.ctxs[asid].tlb_gen) < in choose_new_asid()
590 * Read the tlb_gen to check whether a flush is needed. in switch_mm_irqs_off()
592 * The barrier synchronizes with the tlb_gen increment in in switch_mm_irqs_off()
596 next_tlb_gen = atomic64_read(&next->context.tlb_gen); in switch_mm_irqs_off()
597 if (this_cpu_read(cpu_tlbstate.ctxs[prev_asid].tlb_gen) == in switch_mm_irqs_off()
626 * Start remote flushes and then read tlb_gen. in switch_mm_irqs_off()
630 next_tlb_gen = atomic64_read(&next->context.tlb_gen); in switch_mm_irqs_off()
642 this_cpu_write(cpu_tlbstate.ctxs[new_asid].tlb_gen, next_tlb_gen); in switch_mm_irqs_off()
703 u64 tlb_gen = atomic64_read(&init_mm.context.tlb_gen); in initialize_tlbstate_and_flush() local
[all...]
/kernel/linux/linux-5.10/arch/x86/include/asm/
H A Dtlbflush.h64 u64 tlb_gen; member
141 * the tlb_gen in the list.
144 * flush the TLB without updating tlb_gen. This can happen
188 * TLB_FLUSH_ALL, and .new_tlb_gen will be the tlb_gen to
246 return atomic64_inc_return(&mm->context.tlb_gen); in inc_mm_tlb_gen()
H A Dmmu.h23 * increment tlb_gen, then flush. This lets the low-level
28 atomic64_t tlb_gen; member
H A Dmmu_context.h106 atomic64_set(&mm->context.tlb_gen, 0); in init_new_context()
/kernel/linux/linux-6.6/arch/x86/include/asm/
H A Dmmu.h33 * increment tlb_gen, then flush. This lets the low-level
38 atomic64_t tlb_gen; member
H A Dtlbflush.h69 u64 tlb_gen; member
139 * the tlb_gen in the list.
142 * flush the TLB without updating tlb_gen. This can happen
206 * TLB_FLUSH_ALL, and .new_tlb_gen will be the tlb_gen to
277 return atomic64_inc_return(&mm->context.tlb_gen); in inc_mm_tlb_gen()
H A Dmmu_context.h147 atomic64_set(&mm->context.tlb_gen, 0); in init_new_context()

Completed in 6 milliseconds