162306a36Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0-only
262306a36Sopenharmony_ci/*
362306a36Sopenharmony_ci * Based on arch/arm/mm/context.c
462306a36Sopenharmony_ci *
562306a36Sopenharmony_ci * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved.
662306a36Sopenharmony_ci * Copyright (C) 2012 ARM Ltd.
762306a36Sopenharmony_ci */
862306a36Sopenharmony_ci
962306a36Sopenharmony_ci#include <linux/bitfield.h>
1062306a36Sopenharmony_ci#include <linux/bitops.h>
1162306a36Sopenharmony_ci#include <linux/sched.h>
1262306a36Sopenharmony_ci#include <linux/slab.h>
1362306a36Sopenharmony_ci#include <linux/mm.h>
1462306a36Sopenharmony_ci
1562306a36Sopenharmony_ci#include <asm/cpufeature.h>
1662306a36Sopenharmony_ci#include <asm/mmu_context.h>
1762306a36Sopenharmony_ci#include <asm/smp.h>
1862306a36Sopenharmony_ci#include <asm/tlbflush.h>
1962306a36Sopenharmony_ci
2062306a36Sopenharmony_cistatic u32 asid_bits;
2162306a36Sopenharmony_cistatic DEFINE_RAW_SPINLOCK(cpu_asid_lock);
2262306a36Sopenharmony_ci
2362306a36Sopenharmony_cistatic atomic64_t asid_generation;
2462306a36Sopenharmony_cistatic unsigned long *asid_map;
2562306a36Sopenharmony_ci
2662306a36Sopenharmony_cistatic DEFINE_PER_CPU(atomic64_t, active_asids);
2762306a36Sopenharmony_cistatic DEFINE_PER_CPU(u64, reserved_asids);
2862306a36Sopenharmony_cistatic cpumask_t tlb_flush_pending;
2962306a36Sopenharmony_ci
3062306a36Sopenharmony_cistatic unsigned long max_pinned_asids;
3162306a36Sopenharmony_cistatic unsigned long nr_pinned_asids;
3262306a36Sopenharmony_cistatic unsigned long *pinned_asid_map;
3362306a36Sopenharmony_ci
3462306a36Sopenharmony_ci#define ASID_MASK		(~GENMASK(asid_bits - 1, 0))
3562306a36Sopenharmony_ci#define ASID_FIRST_VERSION	(1UL << asid_bits)
3662306a36Sopenharmony_ci
3762306a36Sopenharmony_ci#define NUM_USER_ASIDS		ASID_FIRST_VERSION
3862306a36Sopenharmony_ci#define ctxid2asid(asid)	((asid) & ~ASID_MASK)
3962306a36Sopenharmony_ci#define asid2ctxid(asid, genid)	((asid) | (genid))
4062306a36Sopenharmony_ci
4162306a36Sopenharmony_ci/* Get the ASIDBits supported by the current CPU */
4262306a36Sopenharmony_cistatic u32 get_cpu_asid_bits(void)
4362306a36Sopenharmony_ci{
4462306a36Sopenharmony_ci	u32 asid;
4562306a36Sopenharmony_ci	int fld = cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64MMFR0_EL1),
4662306a36Sopenharmony_ci						ID_AA64MMFR0_EL1_ASIDBITS_SHIFT);
4762306a36Sopenharmony_ci
4862306a36Sopenharmony_ci	switch (fld) {
4962306a36Sopenharmony_ci	default:
5062306a36Sopenharmony_ci		pr_warn("CPU%d: Unknown ASID size (%d); assuming 8-bit\n",
5162306a36Sopenharmony_ci					smp_processor_id(),  fld);
5262306a36Sopenharmony_ci		fallthrough;
5362306a36Sopenharmony_ci	case ID_AA64MMFR0_EL1_ASIDBITS_8:
5462306a36Sopenharmony_ci		asid = 8;
5562306a36Sopenharmony_ci		break;
5662306a36Sopenharmony_ci	case ID_AA64MMFR0_EL1_ASIDBITS_16:
5762306a36Sopenharmony_ci		asid = 16;
5862306a36Sopenharmony_ci	}
5962306a36Sopenharmony_ci
6062306a36Sopenharmony_ci	return asid;
6162306a36Sopenharmony_ci}
6262306a36Sopenharmony_ci
6362306a36Sopenharmony_ci/* Check if the current cpu's ASIDBits is compatible with asid_bits */
6462306a36Sopenharmony_civoid verify_cpu_asid_bits(void)
6562306a36Sopenharmony_ci{
6662306a36Sopenharmony_ci	u32 asid = get_cpu_asid_bits();
6762306a36Sopenharmony_ci
6862306a36Sopenharmony_ci	if (asid < asid_bits) {
6962306a36Sopenharmony_ci		/*
7062306a36Sopenharmony_ci		 * We cannot decrease the ASID size at runtime, so panic if we support
7162306a36Sopenharmony_ci		 * fewer ASID bits than the boot CPU.
7262306a36Sopenharmony_ci		 */
7362306a36Sopenharmony_ci		pr_crit("CPU%d: smaller ASID size(%u) than boot CPU (%u)\n",
7462306a36Sopenharmony_ci				smp_processor_id(), asid, asid_bits);
7562306a36Sopenharmony_ci		cpu_panic_kernel();
7662306a36Sopenharmony_ci	}
7762306a36Sopenharmony_ci}
7862306a36Sopenharmony_ci
7962306a36Sopenharmony_cistatic void set_kpti_asid_bits(unsigned long *map)
8062306a36Sopenharmony_ci{
8162306a36Sopenharmony_ci	unsigned int len = BITS_TO_LONGS(NUM_USER_ASIDS) * sizeof(unsigned long);
8262306a36Sopenharmony_ci	/*
8362306a36Sopenharmony_ci	 * In case of KPTI kernel/user ASIDs are allocated in
8462306a36Sopenharmony_ci	 * pairs, the bottom bit distinguishes the two: if it
8562306a36Sopenharmony_ci	 * is set, then the ASID will map only userspace. Thus
8662306a36Sopenharmony_ci	 * mark even as reserved for kernel.
8762306a36Sopenharmony_ci	 */
8862306a36Sopenharmony_ci	memset(map, 0xaa, len);
8962306a36Sopenharmony_ci}
9062306a36Sopenharmony_ci
9162306a36Sopenharmony_cistatic void set_reserved_asid_bits(void)
9262306a36Sopenharmony_ci{
9362306a36Sopenharmony_ci	if (pinned_asid_map)
9462306a36Sopenharmony_ci		bitmap_copy(asid_map, pinned_asid_map, NUM_USER_ASIDS);
9562306a36Sopenharmony_ci	else if (arm64_kernel_unmapped_at_el0())
9662306a36Sopenharmony_ci		set_kpti_asid_bits(asid_map);
9762306a36Sopenharmony_ci	else
9862306a36Sopenharmony_ci		bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
9962306a36Sopenharmony_ci}
10062306a36Sopenharmony_ci
10162306a36Sopenharmony_ci#define asid_gen_match(asid) \
10262306a36Sopenharmony_ci	(!(((asid) ^ atomic64_read(&asid_generation)) >> asid_bits))
10362306a36Sopenharmony_ci
10462306a36Sopenharmony_cistatic void flush_context(void)
10562306a36Sopenharmony_ci{
10662306a36Sopenharmony_ci	int i;
10762306a36Sopenharmony_ci	u64 asid;
10862306a36Sopenharmony_ci
10962306a36Sopenharmony_ci	/* Update the list of reserved ASIDs and the ASID bitmap. */
11062306a36Sopenharmony_ci	set_reserved_asid_bits();
11162306a36Sopenharmony_ci
11262306a36Sopenharmony_ci	for_each_possible_cpu(i) {
11362306a36Sopenharmony_ci		asid = atomic64_xchg_relaxed(&per_cpu(active_asids, i), 0);
11462306a36Sopenharmony_ci		/*
11562306a36Sopenharmony_ci		 * If this CPU has already been through a
11662306a36Sopenharmony_ci		 * rollover, but hasn't run another task in
11762306a36Sopenharmony_ci		 * the meantime, we must preserve its reserved
11862306a36Sopenharmony_ci		 * ASID, as this is the only trace we have of
11962306a36Sopenharmony_ci		 * the process it is still running.
12062306a36Sopenharmony_ci		 */
12162306a36Sopenharmony_ci		if (asid == 0)
12262306a36Sopenharmony_ci			asid = per_cpu(reserved_asids, i);
12362306a36Sopenharmony_ci		__set_bit(ctxid2asid(asid), asid_map);
12462306a36Sopenharmony_ci		per_cpu(reserved_asids, i) = asid;
12562306a36Sopenharmony_ci	}
12662306a36Sopenharmony_ci
12762306a36Sopenharmony_ci	/*
12862306a36Sopenharmony_ci	 * Queue a TLB invalidation for each CPU to perform on next
12962306a36Sopenharmony_ci	 * context-switch
13062306a36Sopenharmony_ci	 */
13162306a36Sopenharmony_ci	cpumask_setall(&tlb_flush_pending);
13262306a36Sopenharmony_ci}
13362306a36Sopenharmony_ci
13462306a36Sopenharmony_cistatic bool check_update_reserved_asid(u64 asid, u64 newasid)
13562306a36Sopenharmony_ci{
13662306a36Sopenharmony_ci	int cpu;
13762306a36Sopenharmony_ci	bool hit = false;
13862306a36Sopenharmony_ci
13962306a36Sopenharmony_ci	/*
14062306a36Sopenharmony_ci	 * Iterate over the set of reserved ASIDs looking for a match.
14162306a36Sopenharmony_ci	 * If we find one, then we can update our mm to use newasid
14262306a36Sopenharmony_ci	 * (i.e. the same ASID in the current generation) but we can't
14362306a36Sopenharmony_ci	 * exit the loop early, since we need to ensure that all copies
14462306a36Sopenharmony_ci	 * of the old ASID are updated to reflect the mm. Failure to do
14562306a36Sopenharmony_ci	 * so could result in us missing the reserved ASID in a future
14662306a36Sopenharmony_ci	 * generation.
14762306a36Sopenharmony_ci	 */
14862306a36Sopenharmony_ci	for_each_possible_cpu(cpu) {
14962306a36Sopenharmony_ci		if (per_cpu(reserved_asids, cpu) == asid) {
15062306a36Sopenharmony_ci			hit = true;
15162306a36Sopenharmony_ci			per_cpu(reserved_asids, cpu) = newasid;
15262306a36Sopenharmony_ci		}
15362306a36Sopenharmony_ci	}
15462306a36Sopenharmony_ci
15562306a36Sopenharmony_ci	return hit;
15662306a36Sopenharmony_ci}
15762306a36Sopenharmony_ci
15862306a36Sopenharmony_cistatic u64 new_context(struct mm_struct *mm)
15962306a36Sopenharmony_ci{
16062306a36Sopenharmony_ci	static u32 cur_idx = 1;
16162306a36Sopenharmony_ci	u64 asid = atomic64_read(&mm->context.id);
16262306a36Sopenharmony_ci	u64 generation = atomic64_read(&asid_generation);
16362306a36Sopenharmony_ci
16462306a36Sopenharmony_ci	if (asid != 0) {
16562306a36Sopenharmony_ci		u64 newasid = asid2ctxid(ctxid2asid(asid), generation);
16662306a36Sopenharmony_ci
16762306a36Sopenharmony_ci		/*
16862306a36Sopenharmony_ci		 * If our current ASID was active during a rollover, we
16962306a36Sopenharmony_ci		 * can continue to use it and this was just a false alarm.
17062306a36Sopenharmony_ci		 */
17162306a36Sopenharmony_ci		if (check_update_reserved_asid(asid, newasid))
17262306a36Sopenharmony_ci			return newasid;
17362306a36Sopenharmony_ci
17462306a36Sopenharmony_ci		/*
17562306a36Sopenharmony_ci		 * If it is pinned, we can keep using it. Note that reserved
17662306a36Sopenharmony_ci		 * takes priority, because even if it is also pinned, we need to
17762306a36Sopenharmony_ci		 * update the generation into the reserved_asids.
17862306a36Sopenharmony_ci		 */
17962306a36Sopenharmony_ci		if (refcount_read(&mm->context.pinned))
18062306a36Sopenharmony_ci			return newasid;
18162306a36Sopenharmony_ci
18262306a36Sopenharmony_ci		/*
18362306a36Sopenharmony_ci		 * We had a valid ASID in a previous life, so try to re-use
18462306a36Sopenharmony_ci		 * it if possible.
18562306a36Sopenharmony_ci		 */
18662306a36Sopenharmony_ci		if (!__test_and_set_bit(ctxid2asid(asid), asid_map))
18762306a36Sopenharmony_ci			return newasid;
18862306a36Sopenharmony_ci	}
18962306a36Sopenharmony_ci
19062306a36Sopenharmony_ci	/*
19162306a36Sopenharmony_ci	 * Allocate a free ASID. If we can't find one, take a note of the
19262306a36Sopenharmony_ci	 * currently active ASIDs and mark the TLBs as requiring flushes.  We
19362306a36Sopenharmony_ci	 * always count from ASID #2 (index 1), as we use ASID #0 when setting
19462306a36Sopenharmony_ci	 * a reserved TTBR0 for the init_mm and we allocate ASIDs in even/odd
19562306a36Sopenharmony_ci	 * pairs.
19662306a36Sopenharmony_ci	 */
19762306a36Sopenharmony_ci	asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
19862306a36Sopenharmony_ci	if (asid != NUM_USER_ASIDS)
19962306a36Sopenharmony_ci		goto set_asid;
20062306a36Sopenharmony_ci
20162306a36Sopenharmony_ci	/* We're out of ASIDs, so increment the global generation count */
20262306a36Sopenharmony_ci	generation = atomic64_add_return_relaxed(ASID_FIRST_VERSION,
20362306a36Sopenharmony_ci						 &asid_generation);
20462306a36Sopenharmony_ci	flush_context();
20562306a36Sopenharmony_ci
20662306a36Sopenharmony_ci	/* We have more ASIDs than CPUs, so this will always succeed */
20762306a36Sopenharmony_ci	asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
20862306a36Sopenharmony_ci
20962306a36Sopenharmony_ciset_asid:
21062306a36Sopenharmony_ci	__set_bit(asid, asid_map);
21162306a36Sopenharmony_ci	cur_idx = asid;
21262306a36Sopenharmony_ci	return asid2ctxid(asid, generation);
21362306a36Sopenharmony_ci}
21462306a36Sopenharmony_ci
21562306a36Sopenharmony_civoid check_and_switch_context(struct mm_struct *mm)
21662306a36Sopenharmony_ci{
21762306a36Sopenharmony_ci	unsigned long flags;
21862306a36Sopenharmony_ci	unsigned int cpu;
21962306a36Sopenharmony_ci	u64 asid, old_active_asid;
22062306a36Sopenharmony_ci
22162306a36Sopenharmony_ci	if (system_supports_cnp())
22262306a36Sopenharmony_ci		cpu_set_reserved_ttbr0();
22362306a36Sopenharmony_ci
22462306a36Sopenharmony_ci	asid = atomic64_read(&mm->context.id);
22562306a36Sopenharmony_ci
22662306a36Sopenharmony_ci	/*
22762306a36Sopenharmony_ci	 * The memory ordering here is subtle.
22862306a36Sopenharmony_ci	 * If our active_asids is non-zero and the ASID matches the current
22962306a36Sopenharmony_ci	 * generation, then we update the active_asids entry with a relaxed
23062306a36Sopenharmony_ci	 * cmpxchg. Racing with a concurrent rollover means that either:
23162306a36Sopenharmony_ci	 *
23262306a36Sopenharmony_ci	 * - We get a zero back from the cmpxchg and end up waiting on the
23362306a36Sopenharmony_ci	 *   lock. Taking the lock synchronises with the rollover and so
23462306a36Sopenharmony_ci	 *   we are forced to see the updated generation.
23562306a36Sopenharmony_ci	 *
23662306a36Sopenharmony_ci	 * - We get a valid ASID back from the cmpxchg, which means the
23762306a36Sopenharmony_ci	 *   relaxed xchg in flush_context will treat us as reserved
23862306a36Sopenharmony_ci	 *   because atomic RmWs are totally ordered for a given location.
23962306a36Sopenharmony_ci	 */
24062306a36Sopenharmony_ci	old_active_asid = atomic64_read(this_cpu_ptr(&active_asids));
24162306a36Sopenharmony_ci	if (old_active_asid && asid_gen_match(asid) &&
24262306a36Sopenharmony_ci	    atomic64_cmpxchg_relaxed(this_cpu_ptr(&active_asids),
24362306a36Sopenharmony_ci				     old_active_asid, asid))
24462306a36Sopenharmony_ci		goto switch_mm_fastpath;
24562306a36Sopenharmony_ci
24662306a36Sopenharmony_ci	raw_spin_lock_irqsave(&cpu_asid_lock, flags);
24762306a36Sopenharmony_ci	/* Check that our ASID belongs to the current generation. */
24862306a36Sopenharmony_ci	asid = atomic64_read(&mm->context.id);
24962306a36Sopenharmony_ci	if (!asid_gen_match(asid)) {
25062306a36Sopenharmony_ci		asid = new_context(mm);
25162306a36Sopenharmony_ci		atomic64_set(&mm->context.id, asid);
25262306a36Sopenharmony_ci	}
25362306a36Sopenharmony_ci
25462306a36Sopenharmony_ci	cpu = smp_processor_id();
25562306a36Sopenharmony_ci	if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending))
25662306a36Sopenharmony_ci		local_flush_tlb_all();
25762306a36Sopenharmony_ci
25862306a36Sopenharmony_ci	atomic64_set(this_cpu_ptr(&active_asids), asid);
25962306a36Sopenharmony_ci	raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
26062306a36Sopenharmony_ci
26162306a36Sopenharmony_ciswitch_mm_fastpath:
26262306a36Sopenharmony_ci
26362306a36Sopenharmony_ci	arm64_apply_bp_hardening();
26462306a36Sopenharmony_ci
26562306a36Sopenharmony_ci	/*
26662306a36Sopenharmony_ci	 * Defer TTBR0_EL1 setting for user threads to uaccess_enable() when
26762306a36Sopenharmony_ci	 * emulating PAN.
26862306a36Sopenharmony_ci	 */
26962306a36Sopenharmony_ci	if (!system_uses_ttbr0_pan())
27062306a36Sopenharmony_ci		cpu_switch_mm(mm->pgd, mm);
27162306a36Sopenharmony_ci}
27262306a36Sopenharmony_ci
27362306a36Sopenharmony_ciunsigned long arm64_mm_context_get(struct mm_struct *mm)
27462306a36Sopenharmony_ci{
27562306a36Sopenharmony_ci	unsigned long flags;
27662306a36Sopenharmony_ci	u64 asid;
27762306a36Sopenharmony_ci
27862306a36Sopenharmony_ci	if (!pinned_asid_map)
27962306a36Sopenharmony_ci		return 0;
28062306a36Sopenharmony_ci
28162306a36Sopenharmony_ci	raw_spin_lock_irqsave(&cpu_asid_lock, flags);
28262306a36Sopenharmony_ci
28362306a36Sopenharmony_ci	asid = atomic64_read(&mm->context.id);
28462306a36Sopenharmony_ci
28562306a36Sopenharmony_ci	if (refcount_inc_not_zero(&mm->context.pinned))
28662306a36Sopenharmony_ci		goto out_unlock;
28762306a36Sopenharmony_ci
28862306a36Sopenharmony_ci	if (nr_pinned_asids >= max_pinned_asids) {
28962306a36Sopenharmony_ci		asid = 0;
29062306a36Sopenharmony_ci		goto out_unlock;
29162306a36Sopenharmony_ci	}
29262306a36Sopenharmony_ci
29362306a36Sopenharmony_ci	if (!asid_gen_match(asid)) {
29462306a36Sopenharmony_ci		/*
29562306a36Sopenharmony_ci		 * We went through one or more rollover since that ASID was
29662306a36Sopenharmony_ci		 * used. Ensure that it is still valid, or generate a new one.
29762306a36Sopenharmony_ci		 */
29862306a36Sopenharmony_ci		asid = new_context(mm);
29962306a36Sopenharmony_ci		atomic64_set(&mm->context.id, asid);
30062306a36Sopenharmony_ci	}
30162306a36Sopenharmony_ci
30262306a36Sopenharmony_ci	nr_pinned_asids++;
30362306a36Sopenharmony_ci	__set_bit(ctxid2asid(asid), pinned_asid_map);
30462306a36Sopenharmony_ci	refcount_set(&mm->context.pinned, 1);
30562306a36Sopenharmony_ci
30662306a36Sopenharmony_ciout_unlock:
30762306a36Sopenharmony_ci	raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
30862306a36Sopenharmony_ci
30962306a36Sopenharmony_ci	asid = ctxid2asid(asid);
31062306a36Sopenharmony_ci
31162306a36Sopenharmony_ci	/* Set the equivalent of USER_ASID_BIT */
31262306a36Sopenharmony_ci	if (asid && arm64_kernel_unmapped_at_el0())
31362306a36Sopenharmony_ci		asid |= 1;
31462306a36Sopenharmony_ci
31562306a36Sopenharmony_ci	return asid;
31662306a36Sopenharmony_ci}
31762306a36Sopenharmony_ciEXPORT_SYMBOL_GPL(arm64_mm_context_get);
31862306a36Sopenharmony_ci
31962306a36Sopenharmony_civoid arm64_mm_context_put(struct mm_struct *mm)
32062306a36Sopenharmony_ci{
32162306a36Sopenharmony_ci	unsigned long flags;
32262306a36Sopenharmony_ci	u64 asid = atomic64_read(&mm->context.id);
32362306a36Sopenharmony_ci
32462306a36Sopenharmony_ci	if (!pinned_asid_map)
32562306a36Sopenharmony_ci		return;
32662306a36Sopenharmony_ci
32762306a36Sopenharmony_ci	raw_spin_lock_irqsave(&cpu_asid_lock, flags);
32862306a36Sopenharmony_ci
32962306a36Sopenharmony_ci	if (refcount_dec_and_test(&mm->context.pinned)) {
33062306a36Sopenharmony_ci		__clear_bit(ctxid2asid(asid), pinned_asid_map);
33162306a36Sopenharmony_ci		nr_pinned_asids--;
33262306a36Sopenharmony_ci	}
33362306a36Sopenharmony_ci
33462306a36Sopenharmony_ci	raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
33562306a36Sopenharmony_ci}
33662306a36Sopenharmony_ciEXPORT_SYMBOL_GPL(arm64_mm_context_put);
33762306a36Sopenharmony_ci
33862306a36Sopenharmony_ci/* Errata workaround post TTBRx_EL1 update. */
33962306a36Sopenharmony_ciasmlinkage void post_ttbr_update_workaround(void)
34062306a36Sopenharmony_ci{
34162306a36Sopenharmony_ci	if (!IS_ENABLED(CONFIG_CAVIUM_ERRATUM_27456))
34262306a36Sopenharmony_ci		return;
34362306a36Sopenharmony_ci
34462306a36Sopenharmony_ci	asm(ALTERNATIVE("nop; nop; nop",
34562306a36Sopenharmony_ci			"ic iallu; dsb nsh; isb",
34662306a36Sopenharmony_ci			ARM64_WORKAROUND_CAVIUM_27456));
34762306a36Sopenharmony_ci}
34862306a36Sopenharmony_ci
34962306a36Sopenharmony_civoid cpu_do_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm)
35062306a36Sopenharmony_ci{
35162306a36Sopenharmony_ci	unsigned long ttbr1 = read_sysreg(ttbr1_el1);
35262306a36Sopenharmony_ci	unsigned long asid = ASID(mm);
35362306a36Sopenharmony_ci	unsigned long ttbr0 = phys_to_ttbr(pgd_phys);
35462306a36Sopenharmony_ci
35562306a36Sopenharmony_ci	/* Skip CNP for the reserved ASID */
35662306a36Sopenharmony_ci	if (system_supports_cnp() && asid)
35762306a36Sopenharmony_ci		ttbr0 |= TTBR_CNP_BIT;
35862306a36Sopenharmony_ci
35962306a36Sopenharmony_ci	/* SW PAN needs a copy of the ASID in TTBR0 for entry */
36062306a36Sopenharmony_ci	if (IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN))
36162306a36Sopenharmony_ci		ttbr0 |= FIELD_PREP(TTBR_ASID_MASK, asid);
36262306a36Sopenharmony_ci
36362306a36Sopenharmony_ci	/* Set ASID in TTBR1 since TCR.A1 is set */
36462306a36Sopenharmony_ci	ttbr1 &= ~TTBR_ASID_MASK;
36562306a36Sopenharmony_ci	ttbr1 |= FIELD_PREP(TTBR_ASID_MASK, asid);
36662306a36Sopenharmony_ci
36762306a36Sopenharmony_ci	cpu_set_reserved_ttbr0_nosync();
36862306a36Sopenharmony_ci	write_sysreg(ttbr1, ttbr1_el1);
36962306a36Sopenharmony_ci	write_sysreg(ttbr0, ttbr0_el1);
37062306a36Sopenharmony_ci	isb();
37162306a36Sopenharmony_ci	post_ttbr_update_workaround();
37262306a36Sopenharmony_ci}
37362306a36Sopenharmony_ci
37462306a36Sopenharmony_cistatic int asids_update_limit(void)
37562306a36Sopenharmony_ci{
37662306a36Sopenharmony_ci	unsigned long num_available_asids = NUM_USER_ASIDS;
37762306a36Sopenharmony_ci
37862306a36Sopenharmony_ci	if (arm64_kernel_unmapped_at_el0()) {
37962306a36Sopenharmony_ci		num_available_asids /= 2;
38062306a36Sopenharmony_ci		if (pinned_asid_map)
38162306a36Sopenharmony_ci			set_kpti_asid_bits(pinned_asid_map);
38262306a36Sopenharmony_ci	}
38362306a36Sopenharmony_ci	/*
38462306a36Sopenharmony_ci	 * Expect allocation after rollover to fail if we don't have at least
38562306a36Sopenharmony_ci	 * one more ASID than CPUs. ASID #0 is reserved for init_mm.
38662306a36Sopenharmony_ci	 */
38762306a36Sopenharmony_ci	WARN_ON(num_available_asids - 1 <= num_possible_cpus());
38862306a36Sopenharmony_ci	pr_info("ASID allocator initialised with %lu entries\n",
38962306a36Sopenharmony_ci		num_available_asids);
39062306a36Sopenharmony_ci
39162306a36Sopenharmony_ci	/*
39262306a36Sopenharmony_ci	 * There must always be an ASID available after rollover. Ensure that,
39362306a36Sopenharmony_ci	 * even if all CPUs have a reserved ASID and the maximum number of ASIDs
39462306a36Sopenharmony_ci	 * are pinned, there still is at least one empty slot in the ASID map.
39562306a36Sopenharmony_ci	 */
39662306a36Sopenharmony_ci	max_pinned_asids = num_available_asids - num_possible_cpus() - 2;
39762306a36Sopenharmony_ci	return 0;
39862306a36Sopenharmony_ci}
39962306a36Sopenharmony_ciarch_initcall(asids_update_limit);
40062306a36Sopenharmony_ci
40162306a36Sopenharmony_cistatic int asids_init(void)
40262306a36Sopenharmony_ci{
40362306a36Sopenharmony_ci	asid_bits = get_cpu_asid_bits();
40462306a36Sopenharmony_ci	atomic64_set(&asid_generation, ASID_FIRST_VERSION);
40562306a36Sopenharmony_ci	asid_map = bitmap_zalloc(NUM_USER_ASIDS, GFP_KERNEL);
40662306a36Sopenharmony_ci	if (!asid_map)
40762306a36Sopenharmony_ci		panic("Failed to allocate bitmap for %lu ASIDs\n",
40862306a36Sopenharmony_ci		      NUM_USER_ASIDS);
40962306a36Sopenharmony_ci
41062306a36Sopenharmony_ci	pinned_asid_map = bitmap_zalloc(NUM_USER_ASIDS, GFP_KERNEL);
41162306a36Sopenharmony_ci	nr_pinned_asids = 0;
41262306a36Sopenharmony_ci
41362306a36Sopenharmony_ci	/*
41462306a36Sopenharmony_ci	 * We cannot call set_reserved_asid_bits() here because CPU
41562306a36Sopenharmony_ci	 * caps are not finalized yet, so it is safer to assume KPTI
41662306a36Sopenharmony_ci	 * and reserve kernel ASID's from beginning.
41762306a36Sopenharmony_ci	 */
41862306a36Sopenharmony_ci	if (IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0))
41962306a36Sopenharmony_ci		set_kpti_asid_bits(asid_map);
42062306a36Sopenharmony_ci	return 0;
42162306a36Sopenharmony_ci}
42262306a36Sopenharmony_ciearly_initcall(asids_init);
423