18c2ecf20Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0-only
28c2ecf20Sopenharmony_ci/*
38c2ecf20Sopenharmony_ci *  linux/arch/arm/mm/context.c
48c2ecf20Sopenharmony_ci *
58c2ecf20Sopenharmony_ci *  Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved.
68c2ecf20Sopenharmony_ci *  Copyright (C) 2012 ARM Limited
78c2ecf20Sopenharmony_ci *
88c2ecf20Sopenharmony_ci *  Author: Will Deacon <will.deacon@arm.com>
98c2ecf20Sopenharmony_ci */
108c2ecf20Sopenharmony_ci#include <linux/init.h>
118c2ecf20Sopenharmony_ci#include <linux/sched.h>
128c2ecf20Sopenharmony_ci#include <linux/mm.h>
138c2ecf20Sopenharmony_ci#include <linux/smp.h>
148c2ecf20Sopenharmony_ci#include <linux/percpu.h>
158c2ecf20Sopenharmony_ci
168c2ecf20Sopenharmony_ci#include <asm/mmu_context.h>
178c2ecf20Sopenharmony_ci#include <asm/smp_plat.h>
188c2ecf20Sopenharmony_ci#include <asm/thread_notify.h>
198c2ecf20Sopenharmony_ci#include <asm/tlbflush.h>
208c2ecf20Sopenharmony_ci#include <asm/proc-fns.h>
218c2ecf20Sopenharmony_ci
228c2ecf20Sopenharmony_ci/*
238c2ecf20Sopenharmony_ci * On ARMv6, we have the following structure in the Context ID:
248c2ecf20Sopenharmony_ci *
258c2ecf20Sopenharmony_ci * 31                         7          0
268c2ecf20Sopenharmony_ci * +-------------------------+-----------+
278c2ecf20Sopenharmony_ci * |      process ID         |   ASID    |
288c2ecf20Sopenharmony_ci * +-------------------------+-----------+
298c2ecf20Sopenharmony_ci * |              context ID             |
308c2ecf20Sopenharmony_ci * +-------------------------------------+
318c2ecf20Sopenharmony_ci *
328c2ecf20Sopenharmony_ci * The ASID is used to tag entries in the CPU caches and TLBs.
338c2ecf20Sopenharmony_ci * The context ID is used by debuggers and trace logic, and
348c2ecf20Sopenharmony_ci * should be unique within all running processes.
358c2ecf20Sopenharmony_ci *
368c2ecf20Sopenharmony_ci * In big endian operation, the two 32 bit words are swapped if accessed
378c2ecf20Sopenharmony_ci * by non-64-bit operations.
388c2ecf20Sopenharmony_ci */
398c2ecf20Sopenharmony_ci#define ASID_FIRST_VERSION	(1ULL << ASID_BITS)
408c2ecf20Sopenharmony_ci#define NUM_USER_ASIDS		ASID_FIRST_VERSION
418c2ecf20Sopenharmony_ci
428c2ecf20Sopenharmony_cistatic DEFINE_RAW_SPINLOCK(cpu_asid_lock);
438c2ecf20Sopenharmony_cistatic atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
448c2ecf20Sopenharmony_cistatic DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
458c2ecf20Sopenharmony_ci
468c2ecf20Sopenharmony_cistatic DEFINE_PER_CPU(atomic64_t, active_asids);
478c2ecf20Sopenharmony_cistatic DEFINE_PER_CPU(u64, reserved_asids);
488c2ecf20Sopenharmony_cistatic cpumask_t tlb_flush_pending;
498c2ecf20Sopenharmony_ci
508c2ecf20Sopenharmony_ci#ifdef CONFIG_ARM_ERRATA_798181
518c2ecf20Sopenharmony_civoid a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
528c2ecf20Sopenharmony_ci			     cpumask_t *mask)
538c2ecf20Sopenharmony_ci{
548c2ecf20Sopenharmony_ci	int cpu;
558c2ecf20Sopenharmony_ci	unsigned long flags;
568c2ecf20Sopenharmony_ci	u64 context_id, asid;
578c2ecf20Sopenharmony_ci
588c2ecf20Sopenharmony_ci	raw_spin_lock_irqsave(&cpu_asid_lock, flags);
598c2ecf20Sopenharmony_ci	context_id = mm->context.id.counter;
608c2ecf20Sopenharmony_ci	for_each_online_cpu(cpu) {
618c2ecf20Sopenharmony_ci		if (cpu == this_cpu)
628c2ecf20Sopenharmony_ci			continue;
638c2ecf20Sopenharmony_ci		/*
648c2ecf20Sopenharmony_ci		 * We only need to send an IPI if the other CPUs are
658c2ecf20Sopenharmony_ci		 * running the same ASID as the one being invalidated.
668c2ecf20Sopenharmony_ci		 */
678c2ecf20Sopenharmony_ci		asid = per_cpu(active_asids, cpu).counter;
688c2ecf20Sopenharmony_ci		if (asid == 0)
698c2ecf20Sopenharmony_ci			asid = per_cpu(reserved_asids, cpu);
708c2ecf20Sopenharmony_ci		if (context_id == asid)
718c2ecf20Sopenharmony_ci			cpumask_set_cpu(cpu, mask);
728c2ecf20Sopenharmony_ci	}
738c2ecf20Sopenharmony_ci	raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
748c2ecf20Sopenharmony_ci}
758c2ecf20Sopenharmony_ci#endif
768c2ecf20Sopenharmony_ci
778c2ecf20Sopenharmony_ci#ifdef CONFIG_ARM_LPAE
788c2ecf20Sopenharmony_ci/*
798c2ecf20Sopenharmony_ci * With LPAE, the ASID and page tables are updated atomicly, so there is
808c2ecf20Sopenharmony_ci * no need for a reserved set of tables (the active ASID tracking prevents
818c2ecf20Sopenharmony_ci * any issues across a rollover).
828c2ecf20Sopenharmony_ci */
838c2ecf20Sopenharmony_ci#define cpu_set_reserved_ttbr0()
848c2ecf20Sopenharmony_ci#else
858c2ecf20Sopenharmony_cistatic void cpu_set_reserved_ttbr0(void)
868c2ecf20Sopenharmony_ci{
878c2ecf20Sopenharmony_ci	u32 ttb;
888c2ecf20Sopenharmony_ci	/*
898c2ecf20Sopenharmony_ci	 * Copy TTBR1 into TTBR0.
908c2ecf20Sopenharmony_ci	 * This points at swapper_pg_dir, which contains only global
918c2ecf20Sopenharmony_ci	 * entries so any speculative walks are perfectly safe.
928c2ecf20Sopenharmony_ci	 */
938c2ecf20Sopenharmony_ci	asm volatile(
948c2ecf20Sopenharmony_ci	"	mrc	p15, 0, %0, c2, c0, 1		@ read TTBR1\n"
958c2ecf20Sopenharmony_ci	"	mcr	p15, 0, %0, c2, c0, 0		@ set TTBR0\n"
968c2ecf20Sopenharmony_ci	: "=r" (ttb));
978c2ecf20Sopenharmony_ci	isb();
988c2ecf20Sopenharmony_ci}
998c2ecf20Sopenharmony_ci#endif
1008c2ecf20Sopenharmony_ci
1018c2ecf20Sopenharmony_ci#ifdef CONFIG_PID_IN_CONTEXTIDR
1028c2ecf20Sopenharmony_cistatic int contextidr_notifier(struct notifier_block *unused, unsigned long cmd,
1038c2ecf20Sopenharmony_ci			       void *t)
1048c2ecf20Sopenharmony_ci{
1058c2ecf20Sopenharmony_ci	u32 contextidr;
1068c2ecf20Sopenharmony_ci	pid_t pid;
1078c2ecf20Sopenharmony_ci	struct thread_info *thread = t;
1088c2ecf20Sopenharmony_ci
1098c2ecf20Sopenharmony_ci	if (cmd != THREAD_NOTIFY_SWITCH)
1108c2ecf20Sopenharmony_ci		return NOTIFY_DONE;
1118c2ecf20Sopenharmony_ci
1128c2ecf20Sopenharmony_ci	pid = task_pid_nr(thread->task) << ASID_BITS;
1138c2ecf20Sopenharmony_ci	asm volatile(
1148c2ecf20Sopenharmony_ci	"	mrc	p15, 0, %0, c13, c0, 1\n"
1158c2ecf20Sopenharmony_ci	"	and	%0, %0, %2\n"
1168c2ecf20Sopenharmony_ci	"	orr	%0, %0, %1\n"
1178c2ecf20Sopenharmony_ci	"	mcr	p15, 0, %0, c13, c0, 1\n"
1188c2ecf20Sopenharmony_ci	: "=r" (contextidr), "+r" (pid)
1198c2ecf20Sopenharmony_ci	: "I" (~ASID_MASK));
1208c2ecf20Sopenharmony_ci	isb();
1218c2ecf20Sopenharmony_ci
1228c2ecf20Sopenharmony_ci	return NOTIFY_OK;
1238c2ecf20Sopenharmony_ci}
1248c2ecf20Sopenharmony_ci
1258c2ecf20Sopenharmony_cistatic struct notifier_block contextidr_notifier_block = {
1268c2ecf20Sopenharmony_ci	.notifier_call = contextidr_notifier,
1278c2ecf20Sopenharmony_ci};
1288c2ecf20Sopenharmony_ci
1298c2ecf20Sopenharmony_cistatic int __init contextidr_notifier_init(void)
1308c2ecf20Sopenharmony_ci{
1318c2ecf20Sopenharmony_ci	return thread_register_notifier(&contextidr_notifier_block);
1328c2ecf20Sopenharmony_ci}
1338c2ecf20Sopenharmony_ciarch_initcall(contextidr_notifier_init);
1348c2ecf20Sopenharmony_ci#endif
1358c2ecf20Sopenharmony_ci
1368c2ecf20Sopenharmony_cistatic void flush_context(unsigned int cpu)
1378c2ecf20Sopenharmony_ci{
1388c2ecf20Sopenharmony_ci	int i;
1398c2ecf20Sopenharmony_ci	u64 asid;
1408c2ecf20Sopenharmony_ci
1418c2ecf20Sopenharmony_ci	/* Update the list of reserved ASIDs and the ASID bitmap. */
1428c2ecf20Sopenharmony_ci	bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
1438c2ecf20Sopenharmony_ci	for_each_possible_cpu(i) {
1448c2ecf20Sopenharmony_ci		asid = atomic64_xchg(&per_cpu(active_asids, i), 0);
1458c2ecf20Sopenharmony_ci		/*
1468c2ecf20Sopenharmony_ci		 * If this CPU has already been through a
1478c2ecf20Sopenharmony_ci		 * rollover, but hasn't run another task in
1488c2ecf20Sopenharmony_ci		 * the meantime, we must preserve its reserved
1498c2ecf20Sopenharmony_ci		 * ASID, as this is the only trace we have of
1508c2ecf20Sopenharmony_ci		 * the process it is still running.
1518c2ecf20Sopenharmony_ci		 */
1528c2ecf20Sopenharmony_ci		if (asid == 0)
1538c2ecf20Sopenharmony_ci			asid = per_cpu(reserved_asids, i);
1548c2ecf20Sopenharmony_ci		__set_bit(asid & ~ASID_MASK, asid_map);
1558c2ecf20Sopenharmony_ci		per_cpu(reserved_asids, i) = asid;
1568c2ecf20Sopenharmony_ci	}
1578c2ecf20Sopenharmony_ci
1588c2ecf20Sopenharmony_ci	/* Queue a TLB invalidate and flush the I-cache if necessary. */
1598c2ecf20Sopenharmony_ci	cpumask_setall(&tlb_flush_pending);
1608c2ecf20Sopenharmony_ci
1618c2ecf20Sopenharmony_ci	if (icache_is_vivt_asid_tagged())
1628c2ecf20Sopenharmony_ci		__flush_icache_all();
1638c2ecf20Sopenharmony_ci}
1648c2ecf20Sopenharmony_ci
1658c2ecf20Sopenharmony_cistatic bool check_update_reserved_asid(u64 asid, u64 newasid)
1668c2ecf20Sopenharmony_ci{
1678c2ecf20Sopenharmony_ci	int cpu;
1688c2ecf20Sopenharmony_ci	bool hit = false;
1698c2ecf20Sopenharmony_ci
1708c2ecf20Sopenharmony_ci	/*
1718c2ecf20Sopenharmony_ci	 * Iterate over the set of reserved ASIDs looking for a match.
1728c2ecf20Sopenharmony_ci	 * If we find one, then we can update our mm to use newasid
1738c2ecf20Sopenharmony_ci	 * (i.e. the same ASID in the current generation) but we can't
1748c2ecf20Sopenharmony_ci	 * exit the loop early, since we need to ensure that all copies
1758c2ecf20Sopenharmony_ci	 * of the old ASID are updated to reflect the mm. Failure to do
1768c2ecf20Sopenharmony_ci	 * so could result in us missing the reserved ASID in a future
1778c2ecf20Sopenharmony_ci	 * generation.
1788c2ecf20Sopenharmony_ci	 */
1798c2ecf20Sopenharmony_ci	for_each_possible_cpu(cpu) {
1808c2ecf20Sopenharmony_ci		if (per_cpu(reserved_asids, cpu) == asid) {
1818c2ecf20Sopenharmony_ci			hit = true;
1828c2ecf20Sopenharmony_ci			per_cpu(reserved_asids, cpu) = newasid;
1838c2ecf20Sopenharmony_ci		}
1848c2ecf20Sopenharmony_ci	}
1858c2ecf20Sopenharmony_ci
1868c2ecf20Sopenharmony_ci	return hit;
1878c2ecf20Sopenharmony_ci}
1888c2ecf20Sopenharmony_ci
1898c2ecf20Sopenharmony_cistatic u64 new_context(struct mm_struct *mm, unsigned int cpu)
1908c2ecf20Sopenharmony_ci{
1918c2ecf20Sopenharmony_ci	static u32 cur_idx = 1;
1928c2ecf20Sopenharmony_ci	u64 asid = atomic64_read(&mm->context.id);
1938c2ecf20Sopenharmony_ci	u64 generation = atomic64_read(&asid_generation);
1948c2ecf20Sopenharmony_ci
1958c2ecf20Sopenharmony_ci	if (asid != 0) {
1968c2ecf20Sopenharmony_ci		u64 newasid = generation | (asid & ~ASID_MASK);
1978c2ecf20Sopenharmony_ci
1988c2ecf20Sopenharmony_ci		/*
1998c2ecf20Sopenharmony_ci		 * If our current ASID was active during a rollover, we
2008c2ecf20Sopenharmony_ci		 * can continue to use it and this was just a false alarm.
2018c2ecf20Sopenharmony_ci		 */
2028c2ecf20Sopenharmony_ci		if (check_update_reserved_asid(asid, newasid))
2038c2ecf20Sopenharmony_ci			return newasid;
2048c2ecf20Sopenharmony_ci
2058c2ecf20Sopenharmony_ci		/*
2068c2ecf20Sopenharmony_ci		 * We had a valid ASID in a previous life, so try to re-use
2078c2ecf20Sopenharmony_ci		 * it if possible.,
2088c2ecf20Sopenharmony_ci		 */
2098c2ecf20Sopenharmony_ci		asid &= ~ASID_MASK;
2108c2ecf20Sopenharmony_ci		if (!__test_and_set_bit(asid, asid_map))
2118c2ecf20Sopenharmony_ci			return newasid;
2128c2ecf20Sopenharmony_ci	}
2138c2ecf20Sopenharmony_ci
2148c2ecf20Sopenharmony_ci	/*
2158c2ecf20Sopenharmony_ci	 * Allocate a free ASID. If we can't find one, take a note of the
2168c2ecf20Sopenharmony_ci	 * currently active ASIDs and mark the TLBs as requiring flushes.
2178c2ecf20Sopenharmony_ci	 * We always count from ASID #1, as we reserve ASID #0 to switch
2188c2ecf20Sopenharmony_ci	 * via TTBR0 and to avoid speculative page table walks from hitting
2198c2ecf20Sopenharmony_ci	 * in any partial walk caches, which could be populated from
2208c2ecf20Sopenharmony_ci	 * overlapping level-1 descriptors used to map both the module
2218c2ecf20Sopenharmony_ci	 * area and the userspace stack.
2228c2ecf20Sopenharmony_ci	 */
2238c2ecf20Sopenharmony_ci	asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
2248c2ecf20Sopenharmony_ci	if (asid == NUM_USER_ASIDS) {
2258c2ecf20Sopenharmony_ci		generation = atomic64_add_return(ASID_FIRST_VERSION,
2268c2ecf20Sopenharmony_ci						 &asid_generation);
2278c2ecf20Sopenharmony_ci		flush_context(cpu);
2288c2ecf20Sopenharmony_ci		asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
2298c2ecf20Sopenharmony_ci	}
2308c2ecf20Sopenharmony_ci
2318c2ecf20Sopenharmony_ci	__set_bit(asid, asid_map);
2328c2ecf20Sopenharmony_ci	cur_idx = asid;
2338c2ecf20Sopenharmony_ci	cpumask_clear(mm_cpumask(mm));
2348c2ecf20Sopenharmony_ci	return asid | generation;
2358c2ecf20Sopenharmony_ci}
2368c2ecf20Sopenharmony_ci
2378c2ecf20Sopenharmony_civoid check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
2388c2ecf20Sopenharmony_ci{
2398c2ecf20Sopenharmony_ci	unsigned long flags;
2408c2ecf20Sopenharmony_ci	unsigned int cpu = smp_processor_id();
2418c2ecf20Sopenharmony_ci	u64 asid;
2428c2ecf20Sopenharmony_ci
2438c2ecf20Sopenharmony_ci	if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq))
2448c2ecf20Sopenharmony_ci		__check_vmalloc_seq(mm);
2458c2ecf20Sopenharmony_ci
2468c2ecf20Sopenharmony_ci	/*
2478c2ecf20Sopenharmony_ci	 * We cannot update the pgd and the ASID atomicly with classic
2488c2ecf20Sopenharmony_ci	 * MMU, so switch exclusively to global mappings to avoid
2498c2ecf20Sopenharmony_ci	 * speculative page table walking with the wrong TTBR.
2508c2ecf20Sopenharmony_ci	 */
2518c2ecf20Sopenharmony_ci	cpu_set_reserved_ttbr0();
2528c2ecf20Sopenharmony_ci
2538c2ecf20Sopenharmony_ci	asid = atomic64_read(&mm->context.id);
2548c2ecf20Sopenharmony_ci	if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS)
2558c2ecf20Sopenharmony_ci	    && atomic64_xchg(&per_cpu(active_asids, cpu), asid))
2568c2ecf20Sopenharmony_ci		goto switch_mm_fastpath;
2578c2ecf20Sopenharmony_ci
2588c2ecf20Sopenharmony_ci	raw_spin_lock_irqsave(&cpu_asid_lock, flags);
2598c2ecf20Sopenharmony_ci	/* Check that our ASID belongs to the current generation. */
2608c2ecf20Sopenharmony_ci	asid = atomic64_read(&mm->context.id);
2618c2ecf20Sopenharmony_ci	if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) {
2628c2ecf20Sopenharmony_ci		asid = new_context(mm, cpu);
2638c2ecf20Sopenharmony_ci		atomic64_set(&mm->context.id, asid);
2648c2ecf20Sopenharmony_ci	}
2658c2ecf20Sopenharmony_ci
2668c2ecf20Sopenharmony_ci	if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) {
2678c2ecf20Sopenharmony_ci		local_flush_bp_all();
2688c2ecf20Sopenharmony_ci		local_flush_tlb_all();
2698c2ecf20Sopenharmony_ci	}
2708c2ecf20Sopenharmony_ci
2718c2ecf20Sopenharmony_ci	atomic64_set(&per_cpu(active_asids, cpu), asid);
2728c2ecf20Sopenharmony_ci	cpumask_set_cpu(cpu, mm_cpumask(mm));
2738c2ecf20Sopenharmony_ci	raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
2748c2ecf20Sopenharmony_ci
2758c2ecf20Sopenharmony_ciswitch_mm_fastpath:
2768c2ecf20Sopenharmony_ci	cpu_switch_mm(mm->pgd, mm);
2778c2ecf20Sopenharmony_ci}
278