xref: /kernel/linux/linux-5.10/arch/arm64/mm/context.c (revision 8c2ecf20)
18c2ecf20Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0-only
28c2ecf20Sopenharmony_ci/*
38c2ecf20Sopenharmony_ci * Based on arch/arm/mm/context.c
48c2ecf20Sopenharmony_ci *
58c2ecf20Sopenharmony_ci * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved.
68c2ecf20Sopenharmony_ci * Copyright (C) 2012 ARM Ltd.
78c2ecf20Sopenharmony_ci */
88c2ecf20Sopenharmony_ci
98c2ecf20Sopenharmony_ci#include <linux/bitfield.h>
108c2ecf20Sopenharmony_ci#include <linux/bitops.h>
118c2ecf20Sopenharmony_ci#include <linux/sched.h>
128c2ecf20Sopenharmony_ci#include <linux/slab.h>
138c2ecf20Sopenharmony_ci#include <linux/mm.h>
148c2ecf20Sopenharmony_ci
158c2ecf20Sopenharmony_ci#include <asm/cpufeature.h>
168c2ecf20Sopenharmony_ci#include <asm/mmu_context.h>
178c2ecf20Sopenharmony_ci#include <asm/smp.h>
188c2ecf20Sopenharmony_ci#include <asm/tlbflush.h>
198c2ecf20Sopenharmony_ci
208c2ecf20Sopenharmony_cistatic u32 asid_bits;
218c2ecf20Sopenharmony_cistatic DEFINE_RAW_SPINLOCK(cpu_asid_lock);
228c2ecf20Sopenharmony_ci
238c2ecf20Sopenharmony_cistatic atomic64_t asid_generation;
248c2ecf20Sopenharmony_cistatic unsigned long *asid_map;
258c2ecf20Sopenharmony_ci
268c2ecf20Sopenharmony_cistatic DEFINE_PER_CPU(atomic64_t, active_asids);
278c2ecf20Sopenharmony_cistatic DEFINE_PER_CPU(u64, reserved_asids);
288c2ecf20Sopenharmony_cistatic cpumask_t tlb_flush_pending;
298c2ecf20Sopenharmony_ci
308c2ecf20Sopenharmony_cistatic unsigned long max_pinned_asids;
318c2ecf20Sopenharmony_cistatic unsigned long nr_pinned_asids;
328c2ecf20Sopenharmony_cistatic unsigned long *pinned_asid_map;
338c2ecf20Sopenharmony_ci
348c2ecf20Sopenharmony_ci#define ASID_MASK		(~GENMASK(asid_bits - 1, 0))
358c2ecf20Sopenharmony_ci#define ASID_FIRST_VERSION	(1UL << asid_bits)
368c2ecf20Sopenharmony_ci
378c2ecf20Sopenharmony_ci#define NUM_USER_ASIDS		ASID_FIRST_VERSION
388c2ecf20Sopenharmony_ci#define asid2idx(asid)		((asid) & ~ASID_MASK)
398c2ecf20Sopenharmony_ci#define idx2asid(idx)		asid2idx(idx)
408c2ecf20Sopenharmony_ci
418c2ecf20Sopenharmony_ci/* Get the ASIDBits supported by the current CPU */
428c2ecf20Sopenharmony_cistatic u32 get_cpu_asid_bits(void)
438c2ecf20Sopenharmony_ci{
448c2ecf20Sopenharmony_ci	u32 asid;
458c2ecf20Sopenharmony_ci	int fld = cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64MMFR0_EL1),
468c2ecf20Sopenharmony_ci						ID_AA64MMFR0_ASID_SHIFT);
478c2ecf20Sopenharmony_ci
488c2ecf20Sopenharmony_ci	switch (fld) {
498c2ecf20Sopenharmony_ci	default:
508c2ecf20Sopenharmony_ci		pr_warn("CPU%d: Unknown ASID size (%d); assuming 8-bit\n",
518c2ecf20Sopenharmony_ci					smp_processor_id(),  fld);
528c2ecf20Sopenharmony_ci		fallthrough;
538c2ecf20Sopenharmony_ci	case 0:
548c2ecf20Sopenharmony_ci		asid = 8;
558c2ecf20Sopenharmony_ci		break;
568c2ecf20Sopenharmony_ci	case 2:
578c2ecf20Sopenharmony_ci		asid = 16;
588c2ecf20Sopenharmony_ci	}
598c2ecf20Sopenharmony_ci
608c2ecf20Sopenharmony_ci	return asid;
618c2ecf20Sopenharmony_ci}
628c2ecf20Sopenharmony_ci
638c2ecf20Sopenharmony_ci/* Check if the current cpu's ASIDBits is compatible with asid_bits */
648c2ecf20Sopenharmony_civoid verify_cpu_asid_bits(void)
658c2ecf20Sopenharmony_ci{
668c2ecf20Sopenharmony_ci	u32 asid = get_cpu_asid_bits();
678c2ecf20Sopenharmony_ci
688c2ecf20Sopenharmony_ci	if (asid < asid_bits) {
698c2ecf20Sopenharmony_ci		/*
708c2ecf20Sopenharmony_ci		 * We cannot decrease the ASID size at runtime, so panic if we support
718c2ecf20Sopenharmony_ci		 * fewer ASID bits than the boot CPU.
728c2ecf20Sopenharmony_ci		 */
738c2ecf20Sopenharmony_ci		pr_crit("CPU%d: smaller ASID size(%u) than boot CPU (%u)\n",
748c2ecf20Sopenharmony_ci				smp_processor_id(), asid, asid_bits);
758c2ecf20Sopenharmony_ci		cpu_panic_kernel();
768c2ecf20Sopenharmony_ci	}
778c2ecf20Sopenharmony_ci}
788c2ecf20Sopenharmony_ci
798c2ecf20Sopenharmony_cistatic void set_kpti_asid_bits(unsigned long *map)
808c2ecf20Sopenharmony_ci{
818c2ecf20Sopenharmony_ci	unsigned int len = BITS_TO_LONGS(NUM_USER_ASIDS) * sizeof(unsigned long);
828c2ecf20Sopenharmony_ci	/*
838c2ecf20Sopenharmony_ci	 * In case of KPTI kernel/user ASIDs are allocated in
848c2ecf20Sopenharmony_ci	 * pairs, the bottom bit distinguishes the two: if it
858c2ecf20Sopenharmony_ci	 * is set, then the ASID will map only userspace. Thus
868c2ecf20Sopenharmony_ci	 * mark even as reserved for kernel.
878c2ecf20Sopenharmony_ci	 */
888c2ecf20Sopenharmony_ci	memset(map, 0xaa, len);
898c2ecf20Sopenharmony_ci}
908c2ecf20Sopenharmony_ci
918c2ecf20Sopenharmony_cistatic void set_reserved_asid_bits(void)
928c2ecf20Sopenharmony_ci{
938c2ecf20Sopenharmony_ci	if (pinned_asid_map)
948c2ecf20Sopenharmony_ci		bitmap_copy(asid_map, pinned_asid_map, NUM_USER_ASIDS);
958c2ecf20Sopenharmony_ci	else if (arm64_kernel_unmapped_at_el0())
968c2ecf20Sopenharmony_ci		set_kpti_asid_bits(asid_map);
978c2ecf20Sopenharmony_ci	else
988c2ecf20Sopenharmony_ci		bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
998c2ecf20Sopenharmony_ci}
1008c2ecf20Sopenharmony_ci
1018c2ecf20Sopenharmony_ci#define asid_gen_match(asid) \
1028c2ecf20Sopenharmony_ci	(!(((asid) ^ atomic64_read(&asid_generation)) >> asid_bits))
1038c2ecf20Sopenharmony_ci
1048c2ecf20Sopenharmony_cistatic void flush_context(void)
1058c2ecf20Sopenharmony_ci{
1068c2ecf20Sopenharmony_ci	int i;
1078c2ecf20Sopenharmony_ci	u64 asid;
1088c2ecf20Sopenharmony_ci
1098c2ecf20Sopenharmony_ci	/* Update the list of reserved ASIDs and the ASID bitmap. */
1108c2ecf20Sopenharmony_ci	set_reserved_asid_bits();
1118c2ecf20Sopenharmony_ci
1128c2ecf20Sopenharmony_ci	for_each_possible_cpu(i) {
1138c2ecf20Sopenharmony_ci		asid = atomic64_xchg_relaxed(&per_cpu(active_asids, i), 0);
1148c2ecf20Sopenharmony_ci		/*
1158c2ecf20Sopenharmony_ci		 * If this CPU has already been through a
1168c2ecf20Sopenharmony_ci		 * rollover, but hasn't run another task in
1178c2ecf20Sopenharmony_ci		 * the meantime, we must preserve its reserved
1188c2ecf20Sopenharmony_ci		 * ASID, as this is the only trace we have of
1198c2ecf20Sopenharmony_ci		 * the process it is still running.
1208c2ecf20Sopenharmony_ci		 */
1218c2ecf20Sopenharmony_ci		if (asid == 0)
1228c2ecf20Sopenharmony_ci			asid = per_cpu(reserved_asids, i);
1238c2ecf20Sopenharmony_ci		__set_bit(asid2idx(asid), asid_map);
1248c2ecf20Sopenharmony_ci		per_cpu(reserved_asids, i) = asid;
1258c2ecf20Sopenharmony_ci	}
1268c2ecf20Sopenharmony_ci
1278c2ecf20Sopenharmony_ci	/*
1288c2ecf20Sopenharmony_ci	 * Queue a TLB invalidation for each CPU to perform on next
1298c2ecf20Sopenharmony_ci	 * context-switch
1308c2ecf20Sopenharmony_ci	 */
1318c2ecf20Sopenharmony_ci	cpumask_setall(&tlb_flush_pending);
1328c2ecf20Sopenharmony_ci}
1338c2ecf20Sopenharmony_ci
1348c2ecf20Sopenharmony_cistatic bool check_update_reserved_asid(u64 asid, u64 newasid)
1358c2ecf20Sopenharmony_ci{
1368c2ecf20Sopenharmony_ci	int cpu;
1378c2ecf20Sopenharmony_ci	bool hit = false;
1388c2ecf20Sopenharmony_ci
1398c2ecf20Sopenharmony_ci	/*
1408c2ecf20Sopenharmony_ci	 * Iterate over the set of reserved ASIDs looking for a match.
1418c2ecf20Sopenharmony_ci	 * If we find one, then we can update our mm to use newasid
1428c2ecf20Sopenharmony_ci	 * (i.e. the same ASID in the current generation) but we can't
1438c2ecf20Sopenharmony_ci	 * exit the loop early, since we need to ensure that all copies
1448c2ecf20Sopenharmony_ci	 * of the old ASID are updated to reflect the mm. Failure to do
1458c2ecf20Sopenharmony_ci	 * so could result in us missing the reserved ASID in a future
1468c2ecf20Sopenharmony_ci	 * generation.
1478c2ecf20Sopenharmony_ci	 */
1488c2ecf20Sopenharmony_ci	for_each_possible_cpu(cpu) {
1498c2ecf20Sopenharmony_ci		if (per_cpu(reserved_asids, cpu) == asid) {
1508c2ecf20Sopenharmony_ci			hit = true;
1518c2ecf20Sopenharmony_ci			per_cpu(reserved_asids, cpu) = newasid;
1528c2ecf20Sopenharmony_ci		}
1538c2ecf20Sopenharmony_ci	}
1548c2ecf20Sopenharmony_ci
1558c2ecf20Sopenharmony_ci	return hit;
1568c2ecf20Sopenharmony_ci}
1578c2ecf20Sopenharmony_ci
1588c2ecf20Sopenharmony_cistatic u64 new_context(struct mm_struct *mm)
1598c2ecf20Sopenharmony_ci{
1608c2ecf20Sopenharmony_ci	static u32 cur_idx = 1;
1618c2ecf20Sopenharmony_ci	u64 asid = atomic64_read(&mm->context.id);
1628c2ecf20Sopenharmony_ci	u64 generation = atomic64_read(&asid_generation);
1638c2ecf20Sopenharmony_ci
1648c2ecf20Sopenharmony_ci	if (asid != 0) {
1658c2ecf20Sopenharmony_ci		u64 newasid = generation | (asid & ~ASID_MASK);
1668c2ecf20Sopenharmony_ci
1678c2ecf20Sopenharmony_ci		/*
1688c2ecf20Sopenharmony_ci		 * If our current ASID was active during a rollover, we
1698c2ecf20Sopenharmony_ci		 * can continue to use it and this was just a false alarm.
1708c2ecf20Sopenharmony_ci		 */
1718c2ecf20Sopenharmony_ci		if (check_update_reserved_asid(asid, newasid))
1728c2ecf20Sopenharmony_ci			return newasid;
1738c2ecf20Sopenharmony_ci
1748c2ecf20Sopenharmony_ci		/*
1758c2ecf20Sopenharmony_ci		 * If it is pinned, we can keep using it. Note that reserved
1768c2ecf20Sopenharmony_ci		 * takes priority, because even if it is also pinned, we need to
1778c2ecf20Sopenharmony_ci		 * update the generation into the reserved_asids.
1788c2ecf20Sopenharmony_ci		 */
1798c2ecf20Sopenharmony_ci		if (refcount_read(&mm->context.pinned))
1808c2ecf20Sopenharmony_ci			return newasid;
1818c2ecf20Sopenharmony_ci
1828c2ecf20Sopenharmony_ci		/*
1838c2ecf20Sopenharmony_ci		 * We had a valid ASID in a previous life, so try to re-use
1848c2ecf20Sopenharmony_ci		 * it if possible.
1858c2ecf20Sopenharmony_ci		 */
1868c2ecf20Sopenharmony_ci		if (!__test_and_set_bit(asid2idx(asid), asid_map))
1878c2ecf20Sopenharmony_ci			return newasid;
1888c2ecf20Sopenharmony_ci	}
1898c2ecf20Sopenharmony_ci
1908c2ecf20Sopenharmony_ci	/*
1918c2ecf20Sopenharmony_ci	 * Allocate a free ASID. If we can't find one, take a note of the
1928c2ecf20Sopenharmony_ci	 * currently active ASIDs and mark the TLBs as requiring flushes.  We
1938c2ecf20Sopenharmony_ci	 * always count from ASID #2 (index 1), as we use ASID #0 when setting
1948c2ecf20Sopenharmony_ci	 * a reserved TTBR0 for the init_mm and we allocate ASIDs in even/odd
1958c2ecf20Sopenharmony_ci	 * pairs.
1968c2ecf20Sopenharmony_ci	 */
1978c2ecf20Sopenharmony_ci	asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
1988c2ecf20Sopenharmony_ci	if (asid != NUM_USER_ASIDS)
1998c2ecf20Sopenharmony_ci		goto set_asid;
2008c2ecf20Sopenharmony_ci
2018c2ecf20Sopenharmony_ci	/* We're out of ASIDs, so increment the global generation count */
2028c2ecf20Sopenharmony_ci	generation = atomic64_add_return_relaxed(ASID_FIRST_VERSION,
2038c2ecf20Sopenharmony_ci						 &asid_generation);
2048c2ecf20Sopenharmony_ci	flush_context();
2058c2ecf20Sopenharmony_ci
2068c2ecf20Sopenharmony_ci	/* We have more ASIDs than CPUs, so this will always succeed */
2078c2ecf20Sopenharmony_ci	asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
2088c2ecf20Sopenharmony_ci
2098c2ecf20Sopenharmony_ciset_asid:
2108c2ecf20Sopenharmony_ci	__set_bit(asid, asid_map);
2118c2ecf20Sopenharmony_ci	cur_idx = asid;
2128c2ecf20Sopenharmony_ci	return idx2asid(asid) | generation;
2138c2ecf20Sopenharmony_ci}
2148c2ecf20Sopenharmony_ci
2158c2ecf20Sopenharmony_civoid check_and_switch_context(struct mm_struct *mm)
2168c2ecf20Sopenharmony_ci{
2178c2ecf20Sopenharmony_ci	unsigned long flags;
2188c2ecf20Sopenharmony_ci	unsigned int cpu;
2198c2ecf20Sopenharmony_ci	u64 asid, old_active_asid;
2208c2ecf20Sopenharmony_ci
2218c2ecf20Sopenharmony_ci	if (system_supports_cnp())
2228c2ecf20Sopenharmony_ci		cpu_set_reserved_ttbr0();
2238c2ecf20Sopenharmony_ci
2248c2ecf20Sopenharmony_ci	asid = atomic64_read(&mm->context.id);
2258c2ecf20Sopenharmony_ci
2268c2ecf20Sopenharmony_ci	/*
2278c2ecf20Sopenharmony_ci	 * The memory ordering here is subtle.
2288c2ecf20Sopenharmony_ci	 * If our active_asids is non-zero and the ASID matches the current
2298c2ecf20Sopenharmony_ci	 * generation, then we update the active_asids entry with a relaxed
2308c2ecf20Sopenharmony_ci	 * cmpxchg. Racing with a concurrent rollover means that either:
2318c2ecf20Sopenharmony_ci	 *
2328c2ecf20Sopenharmony_ci	 * - We get a zero back from the cmpxchg and end up waiting on the
2338c2ecf20Sopenharmony_ci	 *   lock. Taking the lock synchronises with the rollover and so
2348c2ecf20Sopenharmony_ci	 *   we are forced to see the updated generation.
2358c2ecf20Sopenharmony_ci	 *
2368c2ecf20Sopenharmony_ci	 * - We get a valid ASID back from the cmpxchg, which means the
2378c2ecf20Sopenharmony_ci	 *   relaxed xchg in flush_context will treat us as reserved
2388c2ecf20Sopenharmony_ci	 *   because atomic RmWs are totally ordered for a given location.
2398c2ecf20Sopenharmony_ci	 */
2408c2ecf20Sopenharmony_ci	old_active_asid = atomic64_read(this_cpu_ptr(&active_asids));
2418c2ecf20Sopenharmony_ci	if (old_active_asid && asid_gen_match(asid) &&
2428c2ecf20Sopenharmony_ci	    atomic64_cmpxchg_relaxed(this_cpu_ptr(&active_asids),
2438c2ecf20Sopenharmony_ci				     old_active_asid, asid))
2448c2ecf20Sopenharmony_ci		goto switch_mm_fastpath;
2458c2ecf20Sopenharmony_ci
2468c2ecf20Sopenharmony_ci	raw_spin_lock_irqsave(&cpu_asid_lock, flags);
2478c2ecf20Sopenharmony_ci	/* Check that our ASID belongs to the current generation. */
2488c2ecf20Sopenharmony_ci	asid = atomic64_read(&mm->context.id);
2498c2ecf20Sopenharmony_ci	if (!asid_gen_match(asid)) {
2508c2ecf20Sopenharmony_ci		asid = new_context(mm);
2518c2ecf20Sopenharmony_ci		atomic64_set(&mm->context.id, asid);
2528c2ecf20Sopenharmony_ci	}
2538c2ecf20Sopenharmony_ci
2548c2ecf20Sopenharmony_ci	cpu = smp_processor_id();
2558c2ecf20Sopenharmony_ci	if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending))
2568c2ecf20Sopenharmony_ci		local_flush_tlb_all();
2578c2ecf20Sopenharmony_ci
2588c2ecf20Sopenharmony_ci	atomic64_set(this_cpu_ptr(&active_asids), asid);
2598c2ecf20Sopenharmony_ci	raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
2608c2ecf20Sopenharmony_ci
2618c2ecf20Sopenharmony_ciswitch_mm_fastpath:
2628c2ecf20Sopenharmony_ci
2638c2ecf20Sopenharmony_ci	arm64_apply_bp_hardening();
2648c2ecf20Sopenharmony_ci
2658c2ecf20Sopenharmony_ci	/*
2668c2ecf20Sopenharmony_ci	 * Defer TTBR0_EL1 setting for user threads to uaccess_enable() when
2678c2ecf20Sopenharmony_ci	 * emulating PAN.
2688c2ecf20Sopenharmony_ci	 */
2698c2ecf20Sopenharmony_ci	if (!system_uses_ttbr0_pan())
2708c2ecf20Sopenharmony_ci		cpu_switch_mm(mm->pgd, mm);
2718c2ecf20Sopenharmony_ci}
2728c2ecf20Sopenharmony_ci
2738c2ecf20Sopenharmony_ciunsigned long arm64_mm_context_get(struct mm_struct *mm)
2748c2ecf20Sopenharmony_ci{
2758c2ecf20Sopenharmony_ci	unsigned long flags;
2768c2ecf20Sopenharmony_ci	u64 asid;
2778c2ecf20Sopenharmony_ci
2788c2ecf20Sopenharmony_ci	if (!pinned_asid_map)
2798c2ecf20Sopenharmony_ci		return 0;
2808c2ecf20Sopenharmony_ci
2818c2ecf20Sopenharmony_ci	raw_spin_lock_irqsave(&cpu_asid_lock, flags);
2828c2ecf20Sopenharmony_ci
2838c2ecf20Sopenharmony_ci	asid = atomic64_read(&mm->context.id);
2848c2ecf20Sopenharmony_ci
2858c2ecf20Sopenharmony_ci	if (refcount_inc_not_zero(&mm->context.pinned))
2868c2ecf20Sopenharmony_ci		goto out_unlock;
2878c2ecf20Sopenharmony_ci
2888c2ecf20Sopenharmony_ci	if (nr_pinned_asids >= max_pinned_asids) {
2898c2ecf20Sopenharmony_ci		asid = 0;
2908c2ecf20Sopenharmony_ci		goto out_unlock;
2918c2ecf20Sopenharmony_ci	}
2928c2ecf20Sopenharmony_ci
2938c2ecf20Sopenharmony_ci	if (!asid_gen_match(asid)) {
2948c2ecf20Sopenharmony_ci		/*
2958c2ecf20Sopenharmony_ci		 * We went through one or more rollover since that ASID was
2968c2ecf20Sopenharmony_ci		 * used. Ensure that it is still valid, or generate a new one.
2978c2ecf20Sopenharmony_ci		 */
2988c2ecf20Sopenharmony_ci		asid = new_context(mm);
2998c2ecf20Sopenharmony_ci		atomic64_set(&mm->context.id, asid);
3008c2ecf20Sopenharmony_ci	}
3018c2ecf20Sopenharmony_ci
3028c2ecf20Sopenharmony_ci	nr_pinned_asids++;
3038c2ecf20Sopenharmony_ci	__set_bit(asid2idx(asid), pinned_asid_map);
3048c2ecf20Sopenharmony_ci	refcount_set(&mm->context.pinned, 1);
3058c2ecf20Sopenharmony_ci
3068c2ecf20Sopenharmony_ciout_unlock:
3078c2ecf20Sopenharmony_ci	raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
3088c2ecf20Sopenharmony_ci
3098c2ecf20Sopenharmony_ci	asid &= ~ASID_MASK;
3108c2ecf20Sopenharmony_ci
3118c2ecf20Sopenharmony_ci	/* Set the equivalent of USER_ASID_BIT */
3128c2ecf20Sopenharmony_ci	if (asid && arm64_kernel_unmapped_at_el0())
3138c2ecf20Sopenharmony_ci		asid |= 1;
3148c2ecf20Sopenharmony_ci
3158c2ecf20Sopenharmony_ci	return asid;
3168c2ecf20Sopenharmony_ci}
3178c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(arm64_mm_context_get);
3188c2ecf20Sopenharmony_ci
3198c2ecf20Sopenharmony_civoid arm64_mm_context_put(struct mm_struct *mm)
3208c2ecf20Sopenharmony_ci{
3218c2ecf20Sopenharmony_ci	unsigned long flags;
3228c2ecf20Sopenharmony_ci	u64 asid = atomic64_read(&mm->context.id);
3238c2ecf20Sopenharmony_ci
3248c2ecf20Sopenharmony_ci	if (!pinned_asid_map)
3258c2ecf20Sopenharmony_ci		return;
3268c2ecf20Sopenharmony_ci
3278c2ecf20Sopenharmony_ci	raw_spin_lock_irqsave(&cpu_asid_lock, flags);
3288c2ecf20Sopenharmony_ci
3298c2ecf20Sopenharmony_ci	if (refcount_dec_and_test(&mm->context.pinned)) {
3308c2ecf20Sopenharmony_ci		__clear_bit(asid2idx(asid), pinned_asid_map);
3318c2ecf20Sopenharmony_ci		nr_pinned_asids--;
3328c2ecf20Sopenharmony_ci	}
3338c2ecf20Sopenharmony_ci
3348c2ecf20Sopenharmony_ci	raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
3358c2ecf20Sopenharmony_ci}
3368c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(arm64_mm_context_put);
3378c2ecf20Sopenharmony_ci
3388c2ecf20Sopenharmony_ci/* Errata workaround post TTBRx_EL1 update. */
3398c2ecf20Sopenharmony_ciasmlinkage void post_ttbr_update_workaround(void)
3408c2ecf20Sopenharmony_ci{
3418c2ecf20Sopenharmony_ci	if (!IS_ENABLED(CONFIG_CAVIUM_ERRATUM_27456))
3428c2ecf20Sopenharmony_ci		return;
3438c2ecf20Sopenharmony_ci
3448c2ecf20Sopenharmony_ci	asm(ALTERNATIVE("nop; nop; nop",
3458c2ecf20Sopenharmony_ci			"ic iallu; dsb nsh; isb",
3468c2ecf20Sopenharmony_ci			ARM64_WORKAROUND_CAVIUM_27456));
3478c2ecf20Sopenharmony_ci}
3488c2ecf20Sopenharmony_ci
3498c2ecf20Sopenharmony_civoid cpu_do_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm)
3508c2ecf20Sopenharmony_ci{
3518c2ecf20Sopenharmony_ci	unsigned long ttbr1 = read_sysreg(ttbr1_el1);
3528c2ecf20Sopenharmony_ci	unsigned long asid = ASID(mm);
3538c2ecf20Sopenharmony_ci	unsigned long ttbr0 = phys_to_ttbr(pgd_phys);
3548c2ecf20Sopenharmony_ci
3558c2ecf20Sopenharmony_ci	/* Skip CNP for the reserved ASID */
3568c2ecf20Sopenharmony_ci	if (system_supports_cnp() && asid)
3578c2ecf20Sopenharmony_ci		ttbr0 |= TTBR_CNP_BIT;
3588c2ecf20Sopenharmony_ci
3598c2ecf20Sopenharmony_ci	/* SW PAN needs a copy of the ASID in TTBR0 for entry */
3608c2ecf20Sopenharmony_ci	if (IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN))
3618c2ecf20Sopenharmony_ci		ttbr0 |= FIELD_PREP(TTBR_ASID_MASK, asid);
3628c2ecf20Sopenharmony_ci
3638c2ecf20Sopenharmony_ci	/* Set ASID in TTBR1 since TCR.A1 is set */
3648c2ecf20Sopenharmony_ci	ttbr1 &= ~TTBR_ASID_MASK;
3658c2ecf20Sopenharmony_ci	ttbr1 |= FIELD_PREP(TTBR_ASID_MASK, asid);
3668c2ecf20Sopenharmony_ci
3678c2ecf20Sopenharmony_ci	write_sysreg(ttbr1, ttbr1_el1);
3688c2ecf20Sopenharmony_ci	isb();
3698c2ecf20Sopenharmony_ci	write_sysreg(ttbr0, ttbr0_el1);
3708c2ecf20Sopenharmony_ci	isb();
3718c2ecf20Sopenharmony_ci	post_ttbr_update_workaround();
3728c2ecf20Sopenharmony_ci}
3738c2ecf20Sopenharmony_ci
3748c2ecf20Sopenharmony_cistatic int asids_update_limit(void)
3758c2ecf20Sopenharmony_ci{
3768c2ecf20Sopenharmony_ci	unsigned long num_available_asids = NUM_USER_ASIDS;
3778c2ecf20Sopenharmony_ci
3788c2ecf20Sopenharmony_ci	if (arm64_kernel_unmapped_at_el0()) {
3798c2ecf20Sopenharmony_ci		num_available_asids /= 2;
3808c2ecf20Sopenharmony_ci		if (pinned_asid_map)
3818c2ecf20Sopenharmony_ci			set_kpti_asid_bits(pinned_asid_map);
3828c2ecf20Sopenharmony_ci	}
3838c2ecf20Sopenharmony_ci	/*
3848c2ecf20Sopenharmony_ci	 * Expect allocation after rollover to fail if we don't have at least
3858c2ecf20Sopenharmony_ci	 * one more ASID than CPUs. ASID #0 is reserved for init_mm.
3868c2ecf20Sopenharmony_ci	 */
3878c2ecf20Sopenharmony_ci	WARN_ON(num_available_asids - 1 <= num_possible_cpus());
3888c2ecf20Sopenharmony_ci	pr_info("ASID allocator initialised with %lu entries\n",
3898c2ecf20Sopenharmony_ci		num_available_asids);
3908c2ecf20Sopenharmony_ci
3918c2ecf20Sopenharmony_ci	/*
3928c2ecf20Sopenharmony_ci	 * There must always be an ASID available after rollover. Ensure that,
3938c2ecf20Sopenharmony_ci	 * even if all CPUs have a reserved ASID and the maximum number of ASIDs
3948c2ecf20Sopenharmony_ci	 * are pinned, there still is at least one empty slot in the ASID map.
3958c2ecf20Sopenharmony_ci	 */
3968c2ecf20Sopenharmony_ci	max_pinned_asids = num_available_asids - num_possible_cpus() - 2;
3978c2ecf20Sopenharmony_ci	return 0;
3988c2ecf20Sopenharmony_ci}
3998c2ecf20Sopenharmony_ciarch_initcall(asids_update_limit);
4008c2ecf20Sopenharmony_ci
4018c2ecf20Sopenharmony_cistatic int asids_init(void)
4028c2ecf20Sopenharmony_ci{
4038c2ecf20Sopenharmony_ci	asid_bits = get_cpu_asid_bits();
4048c2ecf20Sopenharmony_ci	atomic64_set(&asid_generation, ASID_FIRST_VERSION);
4058c2ecf20Sopenharmony_ci	asid_map = kcalloc(BITS_TO_LONGS(NUM_USER_ASIDS), sizeof(*asid_map),
4068c2ecf20Sopenharmony_ci			   GFP_KERNEL);
4078c2ecf20Sopenharmony_ci	if (!asid_map)
4088c2ecf20Sopenharmony_ci		panic("Failed to allocate bitmap for %lu ASIDs\n",
4098c2ecf20Sopenharmony_ci		      NUM_USER_ASIDS);
4108c2ecf20Sopenharmony_ci
4118c2ecf20Sopenharmony_ci	pinned_asid_map = kcalloc(BITS_TO_LONGS(NUM_USER_ASIDS),
4128c2ecf20Sopenharmony_ci				  sizeof(*pinned_asid_map), GFP_KERNEL);
4138c2ecf20Sopenharmony_ci	nr_pinned_asids = 0;
4148c2ecf20Sopenharmony_ci
4158c2ecf20Sopenharmony_ci	/*
4168c2ecf20Sopenharmony_ci	 * We cannot call set_reserved_asid_bits() here because CPU
4178c2ecf20Sopenharmony_ci	 * caps are not finalized yet, so it is safer to assume KPTI
4188c2ecf20Sopenharmony_ci	 * and reserve kernel ASID's from beginning.
4198c2ecf20Sopenharmony_ci	 */
4208c2ecf20Sopenharmony_ci	if (IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0))
4218c2ecf20Sopenharmony_ci		set_kpti_asid_bits(asid_map);
4228c2ecf20Sopenharmony_ci	return 0;
4238c2ecf20Sopenharmony_ci}
4248c2ecf20Sopenharmony_ciearly_initcall(asids_init);
425