162306a36Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0-only 262306a36Sopenharmony_ci/* 362306a36Sopenharmony_ci * linux/arch/arm/mm/context.c 462306a36Sopenharmony_ci * 562306a36Sopenharmony_ci * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved. 662306a36Sopenharmony_ci * Copyright (C) 2012 ARM Limited 762306a36Sopenharmony_ci * 862306a36Sopenharmony_ci * Author: Will Deacon <will.deacon@arm.com> 962306a36Sopenharmony_ci */ 1062306a36Sopenharmony_ci#include <linux/init.h> 1162306a36Sopenharmony_ci#include <linux/sched.h> 1262306a36Sopenharmony_ci#include <linux/mm.h> 1362306a36Sopenharmony_ci#include <linux/smp.h> 1462306a36Sopenharmony_ci#include <linux/percpu.h> 1562306a36Sopenharmony_ci 1662306a36Sopenharmony_ci#include <asm/mmu_context.h> 1762306a36Sopenharmony_ci#include <asm/smp_plat.h> 1862306a36Sopenharmony_ci#include <asm/thread_notify.h> 1962306a36Sopenharmony_ci#include <asm/tlbflush.h> 2062306a36Sopenharmony_ci#include <asm/proc-fns.h> 2162306a36Sopenharmony_ci 2262306a36Sopenharmony_ci/* 2362306a36Sopenharmony_ci * On ARMv6, we have the following structure in the Context ID: 2462306a36Sopenharmony_ci * 2562306a36Sopenharmony_ci * 31 7 0 2662306a36Sopenharmony_ci * +-------------------------+-----------+ 2762306a36Sopenharmony_ci * | process ID | ASID | 2862306a36Sopenharmony_ci * +-------------------------+-----------+ 2962306a36Sopenharmony_ci * | context ID | 3062306a36Sopenharmony_ci * +-------------------------------------+ 3162306a36Sopenharmony_ci * 3262306a36Sopenharmony_ci * The ASID is used to tag entries in the CPU caches and TLBs. 3362306a36Sopenharmony_ci * The context ID is used by debuggers and trace logic, and 3462306a36Sopenharmony_ci * should be unique within all running processes. 3562306a36Sopenharmony_ci * 3662306a36Sopenharmony_ci * In big endian operation, the two 32 bit words are swapped if accessed 3762306a36Sopenharmony_ci * by non-64-bit operations. 3862306a36Sopenharmony_ci */ 3962306a36Sopenharmony_ci#define ASID_FIRST_VERSION (1ULL << ASID_BITS) 4062306a36Sopenharmony_ci#define NUM_USER_ASIDS ASID_FIRST_VERSION 4162306a36Sopenharmony_ci 4262306a36Sopenharmony_cistatic DEFINE_RAW_SPINLOCK(cpu_asid_lock); 4362306a36Sopenharmony_cistatic atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION); 4462306a36Sopenharmony_cistatic DECLARE_BITMAP(asid_map, NUM_USER_ASIDS); 4562306a36Sopenharmony_ci 4662306a36Sopenharmony_cistatic DEFINE_PER_CPU(atomic64_t, active_asids); 4762306a36Sopenharmony_cistatic DEFINE_PER_CPU(u64, reserved_asids); 4862306a36Sopenharmony_cistatic cpumask_t tlb_flush_pending; 4962306a36Sopenharmony_ci 5062306a36Sopenharmony_ci#ifdef CONFIG_ARM_ERRATA_798181 5162306a36Sopenharmony_civoid a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm, 5262306a36Sopenharmony_ci cpumask_t *mask) 5362306a36Sopenharmony_ci{ 5462306a36Sopenharmony_ci int cpu; 5562306a36Sopenharmony_ci unsigned long flags; 5662306a36Sopenharmony_ci u64 context_id, asid; 5762306a36Sopenharmony_ci 5862306a36Sopenharmony_ci raw_spin_lock_irqsave(&cpu_asid_lock, flags); 5962306a36Sopenharmony_ci context_id = mm->context.id.counter; 6062306a36Sopenharmony_ci for_each_online_cpu(cpu) { 6162306a36Sopenharmony_ci if (cpu == this_cpu) 6262306a36Sopenharmony_ci continue; 6362306a36Sopenharmony_ci /* 6462306a36Sopenharmony_ci * We only need to send an IPI if the other CPUs are 6562306a36Sopenharmony_ci * running the same ASID as the one being invalidated. 6662306a36Sopenharmony_ci */ 6762306a36Sopenharmony_ci asid = per_cpu(active_asids, cpu).counter; 6862306a36Sopenharmony_ci if (asid == 0) 6962306a36Sopenharmony_ci asid = per_cpu(reserved_asids, cpu); 7062306a36Sopenharmony_ci if (context_id == asid) 7162306a36Sopenharmony_ci cpumask_set_cpu(cpu, mask); 7262306a36Sopenharmony_ci } 7362306a36Sopenharmony_ci raw_spin_unlock_irqrestore(&cpu_asid_lock, flags); 7462306a36Sopenharmony_ci} 7562306a36Sopenharmony_ci#endif 7662306a36Sopenharmony_ci 7762306a36Sopenharmony_ci#ifdef CONFIG_ARM_LPAE 7862306a36Sopenharmony_ci/* 7962306a36Sopenharmony_ci * With LPAE, the ASID and page tables are updated atomicly, so there is 8062306a36Sopenharmony_ci * no need for a reserved set of tables (the active ASID tracking prevents 8162306a36Sopenharmony_ci * any issues across a rollover). 8262306a36Sopenharmony_ci */ 8362306a36Sopenharmony_ci#define cpu_set_reserved_ttbr0() 8462306a36Sopenharmony_ci#else 8562306a36Sopenharmony_cistatic void cpu_set_reserved_ttbr0(void) 8662306a36Sopenharmony_ci{ 8762306a36Sopenharmony_ci u32 ttb; 8862306a36Sopenharmony_ci /* 8962306a36Sopenharmony_ci * Copy TTBR1 into TTBR0. 9062306a36Sopenharmony_ci * This points at swapper_pg_dir, which contains only global 9162306a36Sopenharmony_ci * entries so any speculative walks are perfectly safe. 9262306a36Sopenharmony_ci */ 9362306a36Sopenharmony_ci asm volatile( 9462306a36Sopenharmony_ci " mrc p15, 0, %0, c2, c0, 1 @ read TTBR1\n" 9562306a36Sopenharmony_ci " mcr p15, 0, %0, c2, c0, 0 @ set TTBR0\n" 9662306a36Sopenharmony_ci : "=r" (ttb)); 9762306a36Sopenharmony_ci isb(); 9862306a36Sopenharmony_ci} 9962306a36Sopenharmony_ci#endif 10062306a36Sopenharmony_ci 10162306a36Sopenharmony_ci#ifdef CONFIG_PID_IN_CONTEXTIDR 10262306a36Sopenharmony_cistatic int contextidr_notifier(struct notifier_block *unused, unsigned long cmd, 10362306a36Sopenharmony_ci void *t) 10462306a36Sopenharmony_ci{ 10562306a36Sopenharmony_ci u32 contextidr; 10662306a36Sopenharmony_ci pid_t pid; 10762306a36Sopenharmony_ci struct thread_info *thread = t; 10862306a36Sopenharmony_ci 10962306a36Sopenharmony_ci if (cmd != THREAD_NOTIFY_SWITCH) 11062306a36Sopenharmony_ci return NOTIFY_DONE; 11162306a36Sopenharmony_ci 11262306a36Sopenharmony_ci pid = task_pid_nr(thread_task(thread)) << ASID_BITS; 11362306a36Sopenharmony_ci asm volatile( 11462306a36Sopenharmony_ci " mrc p15, 0, %0, c13, c0, 1\n" 11562306a36Sopenharmony_ci " and %0, %0, %2\n" 11662306a36Sopenharmony_ci " orr %0, %0, %1\n" 11762306a36Sopenharmony_ci " mcr p15, 0, %0, c13, c0, 1\n" 11862306a36Sopenharmony_ci : "=r" (contextidr), "+r" (pid) 11962306a36Sopenharmony_ci : "I" (~ASID_MASK)); 12062306a36Sopenharmony_ci isb(); 12162306a36Sopenharmony_ci 12262306a36Sopenharmony_ci return NOTIFY_OK; 12362306a36Sopenharmony_ci} 12462306a36Sopenharmony_ci 12562306a36Sopenharmony_cistatic struct notifier_block contextidr_notifier_block = { 12662306a36Sopenharmony_ci .notifier_call = contextidr_notifier, 12762306a36Sopenharmony_ci}; 12862306a36Sopenharmony_ci 12962306a36Sopenharmony_cistatic int __init contextidr_notifier_init(void) 13062306a36Sopenharmony_ci{ 13162306a36Sopenharmony_ci return thread_register_notifier(&contextidr_notifier_block); 13262306a36Sopenharmony_ci} 13362306a36Sopenharmony_ciarch_initcall(contextidr_notifier_init); 13462306a36Sopenharmony_ci#endif 13562306a36Sopenharmony_ci 13662306a36Sopenharmony_cistatic void flush_context(unsigned int cpu) 13762306a36Sopenharmony_ci{ 13862306a36Sopenharmony_ci int i; 13962306a36Sopenharmony_ci u64 asid; 14062306a36Sopenharmony_ci 14162306a36Sopenharmony_ci /* Update the list of reserved ASIDs and the ASID bitmap. */ 14262306a36Sopenharmony_ci bitmap_clear(asid_map, 0, NUM_USER_ASIDS); 14362306a36Sopenharmony_ci for_each_possible_cpu(i) { 14462306a36Sopenharmony_ci asid = atomic64_xchg(&per_cpu(active_asids, i), 0); 14562306a36Sopenharmony_ci /* 14662306a36Sopenharmony_ci * If this CPU has already been through a 14762306a36Sopenharmony_ci * rollover, but hasn't run another task in 14862306a36Sopenharmony_ci * the meantime, we must preserve its reserved 14962306a36Sopenharmony_ci * ASID, as this is the only trace we have of 15062306a36Sopenharmony_ci * the process it is still running. 15162306a36Sopenharmony_ci */ 15262306a36Sopenharmony_ci if (asid == 0) 15362306a36Sopenharmony_ci asid = per_cpu(reserved_asids, i); 15462306a36Sopenharmony_ci __set_bit(asid & ~ASID_MASK, asid_map); 15562306a36Sopenharmony_ci per_cpu(reserved_asids, i) = asid; 15662306a36Sopenharmony_ci } 15762306a36Sopenharmony_ci 15862306a36Sopenharmony_ci /* Queue a TLB invalidate and flush the I-cache if necessary. */ 15962306a36Sopenharmony_ci cpumask_setall(&tlb_flush_pending); 16062306a36Sopenharmony_ci 16162306a36Sopenharmony_ci if (icache_is_vivt_asid_tagged()) 16262306a36Sopenharmony_ci __flush_icache_all(); 16362306a36Sopenharmony_ci} 16462306a36Sopenharmony_ci 16562306a36Sopenharmony_cistatic bool check_update_reserved_asid(u64 asid, u64 newasid) 16662306a36Sopenharmony_ci{ 16762306a36Sopenharmony_ci int cpu; 16862306a36Sopenharmony_ci bool hit = false; 16962306a36Sopenharmony_ci 17062306a36Sopenharmony_ci /* 17162306a36Sopenharmony_ci * Iterate over the set of reserved ASIDs looking for a match. 17262306a36Sopenharmony_ci * If we find one, then we can update our mm to use newasid 17362306a36Sopenharmony_ci * (i.e. the same ASID in the current generation) but we can't 17462306a36Sopenharmony_ci * exit the loop early, since we need to ensure that all copies 17562306a36Sopenharmony_ci * of the old ASID are updated to reflect the mm. Failure to do 17662306a36Sopenharmony_ci * so could result in us missing the reserved ASID in a future 17762306a36Sopenharmony_ci * generation. 17862306a36Sopenharmony_ci */ 17962306a36Sopenharmony_ci for_each_possible_cpu(cpu) { 18062306a36Sopenharmony_ci if (per_cpu(reserved_asids, cpu) == asid) { 18162306a36Sopenharmony_ci hit = true; 18262306a36Sopenharmony_ci per_cpu(reserved_asids, cpu) = newasid; 18362306a36Sopenharmony_ci } 18462306a36Sopenharmony_ci } 18562306a36Sopenharmony_ci 18662306a36Sopenharmony_ci return hit; 18762306a36Sopenharmony_ci} 18862306a36Sopenharmony_ci 18962306a36Sopenharmony_cistatic u64 new_context(struct mm_struct *mm, unsigned int cpu) 19062306a36Sopenharmony_ci{ 19162306a36Sopenharmony_ci static u32 cur_idx = 1; 19262306a36Sopenharmony_ci u64 asid = atomic64_read(&mm->context.id); 19362306a36Sopenharmony_ci u64 generation = atomic64_read(&asid_generation); 19462306a36Sopenharmony_ci 19562306a36Sopenharmony_ci if (asid != 0) { 19662306a36Sopenharmony_ci u64 newasid = generation | (asid & ~ASID_MASK); 19762306a36Sopenharmony_ci 19862306a36Sopenharmony_ci /* 19962306a36Sopenharmony_ci * If our current ASID was active during a rollover, we 20062306a36Sopenharmony_ci * can continue to use it and this was just a false alarm. 20162306a36Sopenharmony_ci */ 20262306a36Sopenharmony_ci if (check_update_reserved_asid(asid, newasid)) 20362306a36Sopenharmony_ci return newasid; 20462306a36Sopenharmony_ci 20562306a36Sopenharmony_ci /* 20662306a36Sopenharmony_ci * We had a valid ASID in a previous life, so try to re-use 20762306a36Sopenharmony_ci * it if possible., 20862306a36Sopenharmony_ci */ 20962306a36Sopenharmony_ci asid &= ~ASID_MASK; 21062306a36Sopenharmony_ci if (!__test_and_set_bit(asid, asid_map)) 21162306a36Sopenharmony_ci return newasid; 21262306a36Sopenharmony_ci } 21362306a36Sopenharmony_ci 21462306a36Sopenharmony_ci /* 21562306a36Sopenharmony_ci * Allocate a free ASID. If we can't find one, take a note of the 21662306a36Sopenharmony_ci * currently active ASIDs and mark the TLBs as requiring flushes. 21762306a36Sopenharmony_ci * We always count from ASID #1, as we reserve ASID #0 to switch 21862306a36Sopenharmony_ci * via TTBR0 and to avoid speculative page table walks from hitting 21962306a36Sopenharmony_ci * in any partial walk caches, which could be populated from 22062306a36Sopenharmony_ci * overlapping level-1 descriptors used to map both the module 22162306a36Sopenharmony_ci * area and the userspace stack. 22262306a36Sopenharmony_ci */ 22362306a36Sopenharmony_ci asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx); 22462306a36Sopenharmony_ci if (asid == NUM_USER_ASIDS) { 22562306a36Sopenharmony_ci generation = atomic64_add_return(ASID_FIRST_VERSION, 22662306a36Sopenharmony_ci &asid_generation); 22762306a36Sopenharmony_ci flush_context(cpu); 22862306a36Sopenharmony_ci asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1); 22962306a36Sopenharmony_ci } 23062306a36Sopenharmony_ci 23162306a36Sopenharmony_ci __set_bit(asid, asid_map); 23262306a36Sopenharmony_ci cur_idx = asid; 23362306a36Sopenharmony_ci cpumask_clear(mm_cpumask(mm)); 23462306a36Sopenharmony_ci return asid | generation; 23562306a36Sopenharmony_ci} 23662306a36Sopenharmony_ci 23762306a36Sopenharmony_civoid check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk) 23862306a36Sopenharmony_ci{ 23962306a36Sopenharmony_ci unsigned long flags; 24062306a36Sopenharmony_ci unsigned int cpu = smp_processor_id(); 24162306a36Sopenharmony_ci u64 asid; 24262306a36Sopenharmony_ci 24362306a36Sopenharmony_ci check_vmalloc_seq(mm); 24462306a36Sopenharmony_ci 24562306a36Sopenharmony_ci /* 24662306a36Sopenharmony_ci * We cannot update the pgd and the ASID atomicly with classic 24762306a36Sopenharmony_ci * MMU, so switch exclusively to global mappings to avoid 24862306a36Sopenharmony_ci * speculative page table walking with the wrong TTBR. 24962306a36Sopenharmony_ci */ 25062306a36Sopenharmony_ci cpu_set_reserved_ttbr0(); 25162306a36Sopenharmony_ci 25262306a36Sopenharmony_ci asid = atomic64_read(&mm->context.id); 25362306a36Sopenharmony_ci if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) 25462306a36Sopenharmony_ci && atomic64_xchg(&per_cpu(active_asids, cpu), asid)) 25562306a36Sopenharmony_ci goto switch_mm_fastpath; 25662306a36Sopenharmony_ci 25762306a36Sopenharmony_ci raw_spin_lock_irqsave(&cpu_asid_lock, flags); 25862306a36Sopenharmony_ci /* Check that our ASID belongs to the current generation. */ 25962306a36Sopenharmony_ci asid = atomic64_read(&mm->context.id); 26062306a36Sopenharmony_ci if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) { 26162306a36Sopenharmony_ci asid = new_context(mm, cpu); 26262306a36Sopenharmony_ci atomic64_set(&mm->context.id, asid); 26362306a36Sopenharmony_ci } 26462306a36Sopenharmony_ci 26562306a36Sopenharmony_ci if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) { 26662306a36Sopenharmony_ci local_flush_bp_all(); 26762306a36Sopenharmony_ci local_flush_tlb_all(); 26862306a36Sopenharmony_ci } 26962306a36Sopenharmony_ci 27062306a36Sopenharmony_ci atomic64_set(&per_cpu(active_asids, cpu), asid); 27162306a36Sopenharmony_ci cpumask_set_cpu(cpu, mm_cpumask(mm)); 27262306a36Sopenharmony_ci raw_spin_unlock_irqrestore(&cpu_asid_lock, flags); 27362306a36Sopenharmony_ci 27462306a36Sopenharmony_ciswitch_mm_fastpath: 27562306a36Sopenharmony_ci cpu_switch_mm(mm->pgd, mm); 27662306a36Sopenharmony_ci} 277