162306a36Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0
262306a36Sopenharmony_ci#include <linux/atomic.h>
362306a36Sopenharmony_ci#include <linux/mmu_context.h>
462306a36Sopenharmony_ci#include <linux/percpu.h>
562306a36Sopenharmony_ci#include <linux/spinlock.h>
662306a36Sopenharmony_ci
762306a36Sopenharmony_cistatic DEFINE_RAW_SPINLOCK(cpu_mmid_lock);
862306a36Sopenharmony_ci
962306a36Sopenharmony_cistatic atomic64_t mmid_version;
1062306a36Sopenharmony_cistatic unsigned int num_mmids;
1162306a36Sopenharmony_cistatic unsigned long *mmid_map;
1262306a36Sopenharmony_ci
1362306a36Sopenharmony_cistatic DEFINE_PER_CPU(u64, reserved_mmids);
1462306a36Sopenharmony_cistatic cpumask_t tlb_flush_pending;
1562306a36Sopenharmony_ci
1662306a36Sopenharmony_cistatic bool asid_versions_eq(int cpu, u64 a, u64 b)
1762306a36Sopenharmony_ci{
1862306a36Sopenharmony_ci	return ((a ^ b) & asid_version_mask(cpu)) == 0;
1962306a36Sopenharmony_ci}
2062306a36Sopenharmony_ci
2162306a36Sopenharmony_civoid get_new_mmu_context(struct mm_struct *mm)
2262306a36Sopenharmony_ci{
2362306a36Sopenharmony_ci	unsigned int cpu;
2462306a36Sopenharmony_ci	u64 asid;
2562306a36Sopenharmony_ci
2662306a36Sopenharmony_ci	/*
2762306a36Sopenharmony_ci	 * This function is specific to ASIDs, and should not be called when
2862306a36Sopenharmony_ci	 * MMIDs are in use.
2962306a36Sopenharmony_ci	 */
3062306a36Sopenharmony_ci	if (WARN_ON(IS_ENABLED(CONFIG_DEBUG_VM) && cpu_has_mmid))
3162306a36Sopenharmony_ci		return;
3262306a36Sopenharmony_ci
3362306a36Sopenharmony_ci	cpu = smp_processor_id();
3462306a36Sopenharmony_ci	asid = asid_cache(cpu);
3562306a36Sopenharmony_ci
3662306a36Sopenharmony_ci	if (!((asid += cpu_asid_inc()) & cpu_asid_mask(&cpu_data[cpu]))) {
3762306a36Sopenharmony_ci		if (cpu_has_vtag_icache)
3862306a36Sopenharmony_ci			flush_icache_all();
3962306a36Sopenharmony_ci		local_flush_tlb_all();	/* start new asid cycle */
4062306a36Sopenharmony_ci	}
4162306a36Sopenharmony_ci
4262306a36Sopenharmony_ci	set_cpu_context(cpu, mm, asid);
4362306a36Sopenharmony_ci	asid_cache(cpu) = asid;
4462306a36Sopenharmony_ci}
4562306a36Sopenharmony_ciEXPORT_SYMBOL_GPL(get_new_mmu_context);
4662306a36Sopenharmony_ci
4762306a36Sopenharmony_civoid check_mmu_context(struct mm_struct *mm)
4862306a36Sopenharmony_ci{
4962306a36Sopenharmony_ci	unsigned int cpu = smp_processor_id();
5062306a36Sopenharmony_ci
5162306a36Sopenharmony_ci	/*
5262306a36Sopenharmony_ci	 * This function is specific to ASIDs, and should not be called when
5362306a36Sopenharmony_ci	 * MMIDs are in use.
5462306a36Sopenharmony_ci	 */
5562306a36Sopenharmony_ci	if (WARN_ON(IS_ENABLED(CONFIG_DEBUG_VM) && cpu_has_mmid))
5662306a36Sopenharmony_ci		return;
5762306a36Sopenharmony_ci
5862306a36Sopenharmony_ci	/* Check if our ASID is of an older version and thus invalid */
5962306a36Sopenharmony_ci	if (!asid_versions_eq(cpu, cpu_context(cpu, mm), asid_cache(cpu)))
6062306a36Sopenharmony_ci		get_new_mmu_context(mm);
6162306a36Sopenharmony_ci}
6262306a36Sopenharmony_ciEXPORT_SYMBOL_GPL(check_mmu_context);
6362306a36Sopenharmony_ci
6462306a36Sopenharmony_cistatic void flush_context(void)
6562306a36Sopenharmony_ci{
6662306a36Sopenharmony_ci	u64 mmid;
6762306a36Sopenharmony_ci	int cpu;
6862306a36Sopenharmony_ci
6962306a36Sopenharmony_ci	/* Update the list of reserved MMIDs and the MMID bitmap */
7062306a36Sopenharmony_ci	bitmap_zero(mmid_map, num_mmids);
7162306a36Sopenharmony_ci
7262306a36Sopenharmony_ci	/* Reserve an MMID for kmap/wired entries */
7362306a36Sopenharmony_ci	__set_bit(MMID_KERNEL_WIRED, mmid_map);
7462306a36Sopenharmony_ci
7562306a36Sopenharmony_ci	for_each_possible_cpu(cpu) {
7662306a36Sopenharmony_ci		mmid = xchg_relaxed(&cpu_data[cpu].asid_cache, 0);
7762306a36Sopenharmony_ci
7862306a36Sopenharmony_ci		/*
7962306a36Sopenharmony_ci		 * If this CPU has already been through a
8062306a36Sopenharmony_ci		 * rollover, but hasn't run another task in
8162306a36Sopenharmony_ci		 * the meantime, we must preserve its reserved
8262306a36Sopenharmony_ci		 * MMID, as this is the only trace we have of
8362306a36Sopenharmony_ci		 * the process it is still running.
8462306a36Sopenharmony_ci		 */
8562306a36Sopenharmony_ci		if (mmid == 0)
8662306a36Sopenharmony_ci			mmid = per_cpu(reserved_mmids, cpu);
8762306a36Sopenharmony_ci
8862306a36Sopenharmony_ci		__set_bit(mmid & cpu_asid_mask(&cpu_data[cpu]), mmid_map);
8962306a36Sopenharmony_ci		per_cpu(reserved_mmids, cpu) = mmid;
9062306a36Sopenharmony_ci	}
9162306a36Sopenharmony_ci
9262306a36Sopenharmony_ci	/*
9362306a36Sopenharmony_ci	 * Queue a TLB invalidation for each CPU to perform on next
9462306a36Sopenharmony_ci	 * context-switch
9562306a36Sopenharmony_ci	 */
9662306a36Sopenharmony_ci	cpumask_setall(&tlb_flush_pending);
9762306a36Sopenharmony_ci}
9862306a36Sopenharmony_ci
9962306a36Sopenharmony_cistatic bool check_update_reserved_mmid(u64 mmid, u64 newmmid)
10062306a36Sopenharmony_ci{
10162306a36Sopenharmony_ci	bool hit;
10262306a36Sopenharmony_ci	int cpu;
10362306a36Sopenharmony_ci
10462306a36Sopenharmony_ci	/*
10562306a36Sopenharmony_ci	 * Iterate over the set of reserved MMIDs looking for a match.
10662306a36Sopenharmony_ci	 * If we find one, then we can update our mm to use newmmid
10762306a36Sopenharmony_ci	 * (i.e. the same MMID in the current generation) but we can't
10862306a36Sopenharmony_ci	 * exit the loop early, since we need to ensure that all copies
10962306a36Sopenharmony_ci	 * of the old MMID are updated to reflect the mm. Failure to do
11062306a36Sopenharmony_ci	 * so could result in us missing the reserved MMID in a future
11162306a36Sopenharmony_ci	 * generation.
11262306a36Sopenharmony_ci	 */
11362306a36Sopenharmony_ci	hit = false;
11462306a36Sopenharmony_ci	for_each_possible_cpu(cpu) {
11562306a36Sopenharmony_ci		if (per_cpu(reserved_mmids, cpu) == mmid) {
11662306a36Sopenharmony_ci			hit = true;
11762306a36Sopenharmony_ci			per_cpu(reserved_mmids, cpu) = newmmid;
11862306a36Sopenharmony_ci		}
11962306a36Sopenharmony_ci	}
12062306a36Sopenharmony_ci
12162306a36Sopenharmony_ci	return hit;
12262306a36Sopenharmony_ci}
12362306a36Sopenharmony_ci
12462306a36Sopenharmony_cistatic u64 get_new_mmid(struct mm_struct *mm)
12562306a36Sopenharmony_ci{
12662306a36Sopenharmony_ci	static u32 cur_idx = MMID_KERNEL_WIRED + 1;
12762306a36Sopenharmony_ci	u64 mmid, version, mmid_mask;
12862306a36Sopenharmony_ci
12962306a36Sopenharmony_ci	mmid = cpu_context(0, mm);
13062306a36Sopenharmony_ci	version = atomic64_read(&mmid_version);
13162306a36Sopenharmony_ci	mmid_mask = cpu_asid_mask(&boot_cpu_data);
13262306a36Sopenharmony_ci
13362306a36Sopenharmony_ci	if (!asid_versions_eq(0, mmid, 0)) {
13462306a36Sopenharmony_ci		u64 newmmid = version | (mmid & mmid_mask);
13562306a36Sopenharmony_ci
13662306a36Sopenharmony_ci		/*
13762306a36Sopenharmony_ci		 * If our current MMID was active during a rollover, we
13862306a36Sopenharmony_ci		 * can continue to use it and this was just a false alarm.
13962306a36Sopenharmony_ci		 */
14062306a36Sopenharmony_ci		if (check_update_reserved_mmid(mmid, newmmid)) {
14162306a36Sopenharmony_ci			mmid = newmmid;
14262306a36Sopenharmony_ci			goto set_context;
14362306a36Sopenharmony_ci		}
14462306a36Sopenharmony_ci
14562306a36Sopenharmony_ci		/*
14662306a36Sopenharmony_ci		 * We had a valid MMID in a previous life, so try to re-use
14762306a36Sopenharmony_ci		 * it if possible.
14862306a36Sopenharmony_ci		 */
14962306a36Sopenharmony_ci		if (!__test_and_set_bit(mmid & mmid_mask, mmid_map)) {
15062306a36Sopenharmony_ci			mmid = newmmid;
15162306a36Sopenharmony_ci			goto set_context;
15262306a36Sopenharmony_ci		}
15362306a36Sopenharmony_ci	}
15462306a36Sopenharmony_ci
15562306a36Sopenharmony_ci	/* Allocate a free MMID */
15662306a36Sopenharmony_ci	mmid = find_next_zero_bit(mmid_map, num_mmids, cur_idx);
15762306a36Sopenharmony_ci	if (mmid != num_mmids)
15862306a36Sopenharmony_ci		goto reserve_mmid;
15962306a36Sopenharmony_ci
16062306a36Sopenharmony_ci	/* We're out of MMIDs, so increment the global version */
16162306a36Sopenharmony_ci	version = atomic64_add_return_relaxed(asid_first_version(0),
16262306a36Sopenharmony_ci					      &mmid_version);
16362306a36Sopenharmony_ci
16462306a36Sopenharmony_ci	/* Note currently active MMIDs & mark TLBs as requiring flushes */
16562306a36Sopenharmony_ci	flush_context();
16662306a36Sopenharmony_ci
16762306a36Sopenharmony_ci	/* We have more MMIDs than CPUs, so this will always succeed */
16862306a36Sopenharmony_ci	mmid = find_first_zero_bit(mmid_map, num_mmids);
16962306a36Sopenharmony_ci
17062306a36Sopenharmony_cireserve_mmid:
17162306a36Sopenharmony_ci	__set_bit(mmid, mmid_map);
17262306a36Sopenharmony_ci	cur_idx = mmid;
17362306a36Sopenharmony_ci	mmid |= version;
17462306a36Sopenharmony_ciset_context:
17562306a36Sopenharmony_ci	set_cpu_context(0, mm, mmid);
17662306a36Sopenharmony_ci	return mmid;
17762306a36Sopenharmony_ci}
17862306a36Sopenharmony_ci
17962306a36Sopenharmony_civoid check_switch_mmu_context(struct mm_struct *mm)
18062306a36Sopenharmony_ci{
18162306a36Sopenharmony_ci	unsigned int cpu = smp_processor_id();
18262306a36Sopenharmony_ci	u64 ctx, old_active_mmid;
18362306a36Sopenharmony_ci	unsigned long flags;
18462306a36Sopenharmony_ci
18562306a36Sopenharmony_ci	if (!cpu_has_mmid) {
18662306a36Sopenharmony_ci		check_mmu_context(mm);
18762306a36Sopenharmony_ci		write_c0_entryhi(cpu_asid(cpu, mm));
18862306a36Sopenharmony_ci		goto setup_pgd;
18962306a36Sopenharmony_ci	}
19062306a36Sopenharmony_ci
19162306a36Sopenharmony_ci	/*
19262306a36Sopenharmony_ci	 * MMID switch fast-path, to avoid acquiring cpu_mmid_lock when it's
19362306a36Sopenharmony_ci	 * unnecessary.
19462306a36Sopenharmony_ci	 *
19562306a36Sopenharmony_ci	 * The memory ordering here is subtle. If our active_mmids is non-zero
19662306a36Sopenharmony_ci	 * and the MMID matches the current version, then we update the CPU's
19762306a36Sopenharmony_ci	 * asid_cache with a relaxed cmpxchg. Racing with a concurrent rollover
19862306a36Sopenharmony_ci	 * means that either:
19962306a36Sopenharmony_ci	 *
20062306a36Sopenharmony_ci	 * - We get a zero back from the cmpxchg and end up waiting on
20162306a36Sopenharmony_ci	 *   cpu_mmid_lock in check_mmu_context(). Taking the lock synchronises
20262306a36Sopenharmony_ci	 *   with the rollover and so we are forced to see the updated
20362306a36Sopenharmony_ci	 *   generation.
20462306a36Sopenharmony_ci	 *
20562306a36Sopenharmony_ci	 * - We get a valid MMID back from the cmpxchg, which means the
20662306a36Sopenharmony_ci	 *   relaxed xchg in flush_context will treat us as reserved
20762306a36Sopenharmony_ci	 *   because atomic RmWs are totally ordered for a given location.
20862306a36Sopenharmony_ci	 */
20962306a36Sopenharmony_ci	ctx = cpu_context(cpu, mm);
21062306a36Sopenharmony_ci	old_active_mmid = READ_ONCE(cpu_data[cpu].asid_cache);
21162306a36Sopenharmony_ci	if (!old_active_mmid ||
21262306a36Sopenharmony_ci	    !asid_versions_eq(cpu, ctx, atomic64_read(&mmid_version)) ||
21362306a36Sopenharmony_ci	    !cmpxchg_relaxed(&cpu_data[cpu].asid_cache, old_active_mmid, ctx)) {
21462306a36Sopenharmony_ci		raw_spin_lock_irqsave(&cpu_mmid_lock, flags);
21562306a36Sopenharmony_ci
21662306a36Sopenharmony_ci		ctx = cpu_context(cpu, mm);
21762306a36Sopenharmony_ci		if (!asid_versions_eq(cpu, ctx, atomic64_read(&mmid_version)))
21862306a36Sopenharmony_ci			ctx = get_new_mmid(mm);
21962306a36Sopenharmony_ci
22062306a36Sopenharmony_ci		WRITE_ONCE(cpu_data[cpu].asid_cache, ctx);
22162306a36Sopenharmony_ci		raw_spin_unlock_irqrestore(&cpu_mmid_lock, flags);
22262306a36Sopenharmony_ci	}
22362306a36Sopenharmony_ci
22462306a36Sopenharmony_ci	/*
22562306a36Sopenharmony_ci	 * Invalidate the local TLB if needed. Note that we must only clear our
22662306a36Sopenharmony_ci	 * bit in tlb_flush_pending after this is complete, so that the
22762306a36Sopenharmony_ci	 * cpu_has_shared_ftlb_entries case below isn't misled.
22862306a36Sopenharmony_ci	 */
22962306a36Sopenharmony_ci	if (cpumask_test_cpu(cpu, &tlb_flush_pending)) {
23062306a36Sopenharmony_ci		if (cpu_has_vtag_icache)
23162306a36Sopenharmony_ci			flush_icache_all();
23262306a36Sopenharmony_ci		local_flush_tlb_all();
23362306a36Sopenharmony_ci		cpumask_clear_cpu(cpu, &tlb_flush_pending);
23462306a36Sopenharmony_ci	}
23562306a36Sopenharmony_ci
23662306a36Sopenharmony_ci	write_c0_memorymapid(ctx & cpu_asid_mask(&boot_cpu_data));
23762306a36Sopenharmony_ci
23862306a36Sopenharmony_ci	/*
23962306a36Sopenharmony_ci	 * If this CPU shares FTLB entries with its siblings and one or more of
24062306a36Sopenharmony_ci	 * those siblings hasn't yet invalidated its TLB following a version
24162306a36Sopenharmony_ci	 * increase then we need to invalidate any TLB entries for our MMID
24262306a36Sopenharmony_ci	 * that we might otherwise pick up from a sibling.
24362306a36Sopenharmony_ci	 *
24462306a36Sopenharmony_ci	 * We ifdef on CONFIG_SMP because cpu_sibling_map isn't defined in
24562306a36Sopenharmony_ci	 * CONFIG_SMP=n kernels.
24662306a36Sopenharmony_ci	 */
24762306a36Sopenharmony_ci#ifdef CONFIG_SMP
24862306a36Sopenharmony_ci	if (cpu_has_shared_ftlb_entries &&
24962306a36Sopenharmony_ci	    cpumask_intersects(&tlb_flush_pending, &cpu_sibling_map[cpu])) {
25062306a36Sopenharmony_ci		/* Ensure we operate on the new MMID */
25162306a36Sopenharmony_ci		mtc0_tlbw_hazard();
25262306a36Sopenharmony_ci
25362306a36Sopenharmony_ci		/*
25462306a36Sopenharmony_ci		 * Invalidate all TLB entries associated with the new
25562306a36Sopenharmony_ci		 * MMID, and wait for the invalidation to complete.
25662306a36Sopenharmony_ci		 */
25762306a36Sopenharmony_ci		ginvt_mmid();
25862306a36Sopenharmony_ci		sync_ginv();
25962306a36Sopenharmony_ci	}
26062306a36Sopenharmony_ci#endif
26162306a36Sopenharmony_ci
26262306a36Sopenharmony_cisetup_pgd:
26362306a36Sopenharmony_ci	TLBMISS_HANDLER_SETUP_PGD(mm->pgd);
26462306a36Sopenharmony_ci}
26562306a36Sopenharmony_ciEXPORT_SYMBOL_GPL(check_switch_mmu_context);
26662306a36Sopenharmony_ci
26762306a36Sopenharmony_cistatic int mmid_init(void)
26862306a36Sopenharmony_ci{
26962306a36Sopenharmony_ci	if (!cpu_has_mmid)
27062306a36Sopenharmony_ci		return 0;
27162306a36Sopenharmony_ci
27262306a36Sopenharmony_ci	/*
27362306a36Sopenharmony_ci	 * Expect allocation after rollover to fail if we don't have at least
27462306a36Sopenharmony_ci	 * one more MMID than CPUs.
27562306a36Sopenharmony_ci	 */
27662306a36Sopenharmony_ci	num_mmids = asid_first_version(0);
27762306a36Sopenharmony_ci	WARN_ON(num_mmids <= num_possible_cpus());
27862306a36Sopenharmony_ci
27962306a36Sopenharmony_ci	atomic64_set(&mmid_version, asid_first_version(0));
28062306a36Sopenharmony_ci	mmid_map = bitmap_zalloc(num_mmids, GFP_KERNEL);
28162306a36Sopenharmony_ci	if (!mmid_map)
28262306a36Sopenharmony_ci		panic("Failed to allocate bitmap for %u MMIDs\n", num_mmids);
28362306a36Sopenharmony_ci
28462306a36Sopenharmony_ci	/* Reserve an MMID for kmap/wired entries */
28562306a36Sopenharmony_ci	__set_bit(MMID_KERNEL_WIRED, mmid_map);
28662306a36Sopenharmony_ci
28762306a36Sopenharmony_ci	pr_info("MMID allocator initialised with %u entries\n", num_mmids);
28862306a36Sopenharmony_ci	return 0;
28962306a36Sopenharmony_ci}
29062306a36Sopenharmony_ciearly_initcall(mmid_init);
291