18c2ecf20Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0
28c2ecf20Sopenharmony_ci#include <linux/atomic.h>
38c2ecf20Sopenharmony_ci#include <linux/mmu_context.h>
48c2ecf20Sopenharmony_ci#include <linux/percpu.h>
58c2ecf20Sopenharmony_ci#include <linux/spinlock.h>
68c2ecf20Sopenharmony_ci
78c2ecf20Sopenharmony_cistatic DEFINE_RAW_SPINLOCK(cpu_mmid_lock);
88c2ecf20Sopenharmony_ci
98c2ecf20Sopenharmony_cistatic atomic64_t mmid_version;
108c2ecf20Sopenharmony_cistatic unsigned int num_mmids;
118c2ecf20Sopenharmony_cistatic unsigned long *mmid_map;
128c2ecf20Sopenharmony_ci
138c2ecf20Sopenharmony_cistatic DEFINE_PER_CPU(u64, reserved_mmids);
148c2ecf20Sopenharmony_cistatic cpumask_t tlb_flush_pending;
158c2ecf20Sopenharmony_ci
168c2ecf20Sopenharmony_cistatic bool asid_versions_eq(int cpu, u64 a, u64 b)
178c2ecf20Sopenharmony_ci{
188c2ecf20Sopenharmony_ci	return ((a ^ b) & asid_version_mask(cpu)) == 0;
198c2ecf20Sopenharmony_ci}
208c2ecf20Sopenharmony_ci
218c2ecf20Sopenharmony_civoid get_new_mmu_context(struct mm_struct *mm)
228c2ecf20Sopenharmony_ci{
238c2ecf20Sopenharmony_ci	unsigned int cpu;
248c2ecf20Sopenharmony_ci	u64 asid;
258c2ecf20Sopenharmony_ci
268c2ecf20Sopenharmony_ci	/*
278c2ecf20Sopenharmony_ci	 * This function is specific to ASIDs, and should not be called when
288c2ecf20Sopenharmony_ci	 * MMIDs are in use.
298c2ecf20Sopenharmony_ci	 */
308c2ecf20Sopenharmony_ci	if (WARN_ON(IS_ENABLED(CONFIG_DEBUG_VM) && cpu_has_mmid))
318c2ecf20Sopenharmony_ci		return;
328c2ecf20Sopenharmony_ci
338c2ecf20Sopenharmony_ci	cpu = smp_processor_id();
348c2ecf20Sopenharmony_ci	asid = asid_cache(cpu);
358c2ecf20Sopenharmony_ci
368c2ecf20Sopenharmony_ci	if (!((asid += cpu_asid_inc()) & cpu_asid_mask(&cpu_data[cpu]))) {
378c2ecf20Sopenharmony_ci		if (cpu_has_vtag_icache)
388c2ecf20Sopenharmony_ci			flush_icache_all();
398c2ecf20Sopenharmony_ci		local_flush_tlb_all();	/* start new asid cycle */
408c2ecf20Sopenharmony_ci	}
418c2ecf20Sopenharmony_ci
428c2ecf20Sopenharmony_ci	set_cpu_context(cpu, mm, asid);
438c2ecf20Sopenharmony_ci	asid_cache(cpu) = asid;
448c2ecf20Sopenharmony_ci}
458c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(get_new_mmu_context);
468c2ecf20Sopenharmony_ci
478c2ecf20Sopenharmony_civoid check_mmu_context(struct mm_struct *mm)
488c2ecf20Sopenharmony_ci{
498c2ecf20Sopenharmony_ci	unsigned int cpu = smp_processor_id();
508c2ecf20Sopenharmony_ci
518c2ecf20Sopenharmony_ci	/*
528c2ecf20Sopenharmony_ci	 * This function is specific to ASIDs, and should not be called when
538c2ecf20Sopenharmony_ci	 * MMIDs are in use.
548c2ecf20Sopenharmony_ci	 */
558c2ecf20Sopenharmony_ci	if (WARN_ON(IS_ENABLED(CONFIG_DEBUG_VM) && cpu_has_mmid))
568c2ecf20Sopenharmony_ci		return;
578c2ecf20Sopenharmony_ci
588c2ecf20Sopenharmony_ci	/* Check if our ASID is of an older version and thus invalid */
598c2ecf20Sopenharmony_ci	if (!asid_versions_eq(cpu, cpu_context(cpu, mm), asid_cache(cpu)))
608c2ecf20Sopenharmony_ci		get_new_mmu_context(mm);
618c2ecf20Sopenharmony_ci}
628c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(check_mmu_context);
638c2ecf20Sopenharmony_ci
648c2ecf20Sopenharmony_cistatic void flush_context(void)
658c2ecf20Sopenharmony_ci{
668c2ecf20Sopenharmony_ci	u64 mmid;
678c2ecf20Sopenharmony_ci	int cpu;
688c2ecf20Sopenharmony_ci
698c2ecf20Sopenharmony_ci	/* Update the list of reserved MMIDs and the MMID bitmap */
708c2ecf20Sopenharmony_ci	bitmap_clear(mmid_map, 0, num_mmids);
718c2ecf20Sopenharmony_ci
728c2ecf20Sopenharmony_ci	/* Reserve an MMID for kmap/wired entries */
738c2ecf20Sopenharmony_ci	__set_bit(MMID_KERNEL_WIRED, mmid_map);
748c2ecf20Sopenharmony_ci
758c2ecf20Sopenharmony_ci	for_each_possible_cpu(cpu) {
768c2ecf20Sopenharmony_ci		mmid = xchg_relaxed(&cpu_data[cpu].asid_cache, 0);
778c2ecf20Sopenharmony_ci
788c2ecf20Sopenharmony_ci		/*
798c2ecf20Sopenharmony_ci		 * If this CPU has already been through a
808c2ecf20Sopenharmony_ci		 * rollover, but hasn't run another task in
818c2ecf20Sopenharmony_ci		 * the meantime, we must preserve its reserved
828c2ecf20Sopenharmony_ci		 * MMID, as this is the only trace we have of
838c2ecf20Sopenharmony_ci		 * the process it is still running.
848c2ecf20Sopenharmony_ci		 */
858c2ecf20Sopenharmony_ci		if (mmid == 0)
868c2ecf20Sopenharmony_ci			mmid = per_cpu(reserved_mmids, cpu);
878c2ecf20Sopenharmony_ci
888c2ecf20Sopenharmony_ci		__set_bit(mmid & cpu_asid_mask(&cpu_data[cpu]), mmid_map);
898c2ecf20Sopenharmony_ci		per_cpu(reserved_mmids, cpu) = mmid;
908c2ecf20Sopenharmony_ci	}
918c2ecf20Sopenharmony_ci
928c2ecf20Sopenharmony_ci	/*
938c2ecf20Sopenharmony_ci	 * Queue a TLB invalidation for each CPU to perform on next
948c2ecf20Sopenharmony_ci	 * context-switch
958c2ecf20Sopenharmony_ci	 */
968c2ecf20Sopenharmony_ci	cpumask_setall(&tlb_flush_pending);
978c2ecf20Sopenharmony_ci}
988c2ecf20Sopenharmony_ci
998c2ecf20Sopenharmony_cistatic bool check_update_reserved_mmid(u64 mmid, u64 newmmid)
1008c2ecf20Sopenharmony_ci{
1018c2ecf20Sopenharmony_ci	bool hit;
1028c2ecf20Sopenharmony_ci	int cpu;
1038c2ecf20Sopenharmony_ci
1048c2ecf20Sopenharmony_ci	/*
1058c2ecf20Sopenharmony_ci	 * Iterate over the set of reserved MMIDs looking for a match.
1068c2ecf20Sopenharmony_ci	 * If we find one, then we can update our mm to use newmmid
1078c2ecf20Sopenharmony_ci	 * (i.e. the same MMID in the current generation) but we can't
1088c2ecf20Sopenharmony_ci	 * exit the loop early, since we need to ensure that all copies
1098c2ecf20Sopenharmony_ci	 * of the old MMID are updated to reflect the mm. Failure to do
1108c2ecf20Sopenharmony_ci	 * so could result in us missing the reserved MMID in a future
1118c2ecf20Sopenharmony_ci	 * generation.
1128c2ecf20Sopenharmony_ci	 */
1138c2ecf20Sopenharmony_ci	hit = false;
1148c2ecf20Sopenharmony_ci	for_each_possible_cpu(cpu) {
1158c2ecf20Sopenharmony_ci		if (per_cpu(reserved_mmids, cpu) == mmid) {
1168c2ecf20Sopenharmony_ci			hit = true;
1178c2ecf20Sopenharmony_ci			per_cpu(reserved_mmids, cpu) = newmmid;
1188c2ecf20Sopenharmony_ci		}
1198c2ecf20Sopenharmony_ci	}
1208c2ecf20Sopenharmony_ci
1218c2ecf20Sopenharmony_ci	return hit;
1228c2ecf20Sopenharmony_ci}
1238c2ecf20Sopenharmony_ci
1248c2ecf20Sopenharmony_cistatic u64 get_new_mmid(struct mm_struct *mm)
1258c2ecf20Sopenharmony_ci{
1268c2ecf20Sopenharmony_ci	static u32 cur_idx = MMID_KERNEL_WIRED + 1;
1278c2ecf20Sopenharmony_ci	u64 mmid, version, mmid_mask;
1288c2ecf20Sopenharmony_ci
1298c2ecf20Sopenharmony_ci	mmid = cpu_context(0, mm);
1308c2ecf20Sopenharmony_ci	version = atomic64_read(&mmid_version);
1318c2ecf20Sopenharmony_ci	mmid_mask = cpu_asid_mask(&boot_cpu_data);
1328c2ecf20Sopenharmony_ci
1338c2ecf20Sopenharmony_ci	if (!asid_versions_eq(0, mmid, 0)) {
1348c2ecf20Sopenharmony_ci		u64 newmmid = version | (mmid & mmid_mask);
1358c2ecf20Sopenharmony_ci
1368c2ecf20Sopenharmony_ci		/*
1378c2ecf20Sopenharmony_ci		 * If our current MMID was active during a rollover, we
1388c2ecf20Sopenharmony_ci		 * can continue to use it and this was just a false alarm.
1398c2ecf20Sopenharmony_ci		 */
1408c2ecf20Sopenharmony_ci		if (check_update_reserved_mmid(mmid, newmmid)) {
1418c2ecf20Sopenharmony_ci			mmid = newmmid;
1428c2ecf20Sopenharmony_ci			goto set_context;
1438c2ecf20Sopenharmony_ci		}
1448c2ecf20Sopenharmony_ci
1458c2ecf20Sopenharmony_ci		/*
1468c2ecf20Sopenharmony_ci		 * We had a valid MMID in a previous life, so try to re-use
1478c2ecf20Sopenharmony_ci		 * it if possible.
1488c2ecf20Sopenharmony_ci		 */
1498c2ecf20Sopenharmony_ci		if (!__test_and_set_bit(mmid & mmid_mask, mmid_map)) {
1508c2ecf20Sopenharmony_ci			mmid = newmmid;
1518c2ecf20Sopenharmony_ci			goto set_context;
1528c2ecf20Sopenharmony_ci		}
1538c2ecf20Sopenharmony_ci	}
1548c2ecf20Sopenharmony_ci
1558c2ecf20Sopenharmony_ci	/* Allocate a free MMID */
1568c2ecf20Sopenharmony_ci	mmid = find_next_zero_bit(mmid_map, num_mmids, cur_idx);
1578c2ecf20Sopenharmony_ci	if (mmid != num_mmids)
1588c2ecf20Sopenharmony_ci		goto reserve_mmid;
1598c2ecf20Sopenharmony_ci
1608c2ecf20Sopenharmony_ci	/* We're out of MMIDs, so increment the global version */
1618c2ecf20Sopenharmony_ci	version = atomic64_add_return_relaxed(asid_first_version(0),
1628c2ecf20Sopenharmony_ci					      &mmid_version);
1638c2ecf20Sopenharmony_ci
1648c2ecf20Sopenharmony_ci	/* Note currently active MMIDs & mark TLBs as requiring flushes */
1658c2ecf20Sopenharmony_ci	flush_context();
1668c2ecf20Sopenharmony_ci
1678c2ecf20Sopenharmony_ci	/* We have more MMIDs than CPUs, so this will always succeed */
1688c2ecf20Sopenharmony_ci	mmid = find_first_zero_bit(mmid_map, num_mmids);
1698c2ecf20Sopenharmony_ci
1708c2ecf20Sopenharmony_cireserve_mmid:
1718c2ecf20Sopenharmony_ci	__set_bit(mmid, mmid_map);
1728c2ecf20Sopenharmony_ci	cur_idx = mmid;
1738c2ecf20Sopenharmony_ci	mmid |= version;
1748c2ecf20Sopenharmony_ciset_context:
1758c2ecf20Sopenharmony_ci	set_cpu_context(0, mm, mmid);
1768c2ecf20Sopenharmony_ci	return mmid;
1778c2ecf20Sopenharmony_ci}
1788c2ecf20Sopenharmony_ci
1798c2ecf20Sopenharmony_civoid check_switch_mmu_context(struct mm_struct *mm)
1808c2ecf20Sopenharmony_ci{
1818c2ecf20Sopenharmony_ci	unsigned int cpu = smp_processor_id();
1828c2ecf20Sopenharmony_ci	u64 ctx, old_active_mmid;
1838c2ecf20Sopenharmony_ci	unsigned long flags;
1848c2ecf20Sopenharmony_ci
1858c2ecf20Sopenharmony_ci	if (!cpu_has_mmid) {
1868c2ecf20Sopenharmony_ci		check_mmu_context(mm);
1878c2ecf20Sopenharmony_ci		write_c0_entryhi(cpu_asid(cpu, mm));
1888c2ecf20Sopenharmony_ci		goto setup_pgd;
1898c2ecf20Sopenharmony_ci	}
1908c2ecf20Sopenharmony_ci
1918c2ecf20Sopenharmony_ci	/*
1928c2ecf20Sopenharmony_ci	 * MMID switch fast-path, to avoid acquiring cpu_mmid_lock when it's
1938c2ecf20Sopenharmony_ci	 * unnecessary.
1948c2ecf20Sopenharmony_ci	 *
1958c2ecf20Sopenharmony_ci	 * The memory ordering here is subtle. If our active_mmids is non-zero
1968c2ecf20Sopenharmony_ci	 * and the MMID matches the current version, then we update the CPU's
1978c2ecf20Sopenharmony_ci	 * asid_cache with a relaxed cmpxchg. Racing with a concurrent rollover
1988c2ecf20Sopenharmony_ci	 * means that either:
1998c2ecf20Sopenharmony_ci	 *
2008c2ecf20Sopenharmony_ci	 * - We get a zero back from the cmpxchg and end up waiting on
2018c2ecf20Sopenharmony_ci	 *   cpu_mmid_lock in check_mmu_context(). Taking the lock synchronises
2028c2ecf20Sopenharmony_ci	 *   with the rollover and so we are forced to see the updated
2038c2ecf20Sopenharmony_ci	 *   generation.
2048c2ecf20Sopenharmony_ci	 *
2058c2ecf20Sopenharmony_ci	 * - We get a valid MMID back from the cmpxchg, which means the
2068c2ecf20Sopenharmony_ci	 *   relaxed xchg in flush_context will treat us as reserved
2078c2ecf20Sopenharmony_ci	 *   because atomic RmWs are totally ordered for a given location.
2088c2ecf20Sopenharmony_ci	 */
2098c2ecf20Sopenharmony_ci	ctx = cpu_context(cpu, mm);
2108c2ecf20Sopenharmony_ci	old_active_mmid = READ_ONCE(cpu_data[cpu].asid_cache);
2118c2ecf20Sopenharmony_ci	if (!old_active_mmid ||
2128c2ecf20Sopenharmony_ci	    !asid_versions_eq(cpu, ctx, atomic64_read(&mmid_version)) ||
2138c2ecf20Sopenharmony_ci	    !cmpxchg_relaxed(&cpu_data[cpu].asid_cache, old_active_mmid, ctx)) {
2148c2ecf20Sopenharmony_ci		raw_spin_lock_irqsave(&cpu_mmid_lock, flags);
2158c2ecf20Sopenharmony_ci
2168c2ecf20Sopenharmony_ci		ctx = cpu_context(cpu, mm);
2178c2ecf20Sopenharmony_ci		if (!asid_versions_eq(cpu, ctx, atomic64_read(&mmid_version)))
2188c2ecf20Sopenharmony_ci			ctx = get_new_mmid(mm);
2198c2ecf20Sopenharmony_ci
2208c2ecf20Sopenharmony_ci		WRITE_ONCE(cpu_data[cpu].asid_cache, ctx);
2218c2ecf20Sopenharmony_ci		raw_spin_unlock_irqrestore(&cpu_mmid_lock, flags);
2228c2ecf20Sopenharmony_ci	}
2238c2ecf20Sopenharmony_ci
2248c2ecf20Sopenharmony_ci	/*
2258c2ecf20Sopenharmony_ci	 * Invalidate the local TLB if needed. Note that we must only clear our
2268c2ecf20Sopenharmony_ci	 * bit in tlb_flush_pending after this is complete, so that the
2278c2ecf20Sopenharmony_ci	 * cpu_has_shared_ftlb_entries case below isn't misled.
2288c2ecf20Sopenharmony_ci	 */
2298c2ecf20Sopenharmony_ci	if (cpumask_test_cpu(cpu, &tlb_flush_pending)) {
2308c2ecf20Sopenharmony_ci		if (cpu_has_vtag_icache)
2318c2ecf20Sopenharmony_ci			flush_icache_all();
2328c2ecf20Sopenharmony_ci		local_flush_tlb_all();
2338c2ecf20Sopenharmony_ci		cpumask_clear_cpu(cpu, &tlb_flush_pending);
2348c2ecf20Sopenharmony_ci	}
2358c2ecf20Sopenharmony_ci
2368c2ecf20Sopenharmony_ci	write_c0_memorymapid(ctx & cpu_asid_mask(&boot_cpu_data));
2378c2ecf20Sopenharmony_ci
2388c2ecf20Sopenharmony_ci	/*
2398c2ecf20Sopenharmony_ci	 * If this CPU shares FTLB entries with its siblings and one or more of
2408c2ecf20Sopenharmony_ci	 * those siblings hasn't yet invalidated its TLB following a version
2418c2ecf20Sopenharmony_ci	 * increase then we need to invalidate any TLB entries for our MMID
2428c2ecf20Sopenharmony_ci	 * that we might otherwise pick up from a sibling.
2438c2ecf20Sopenharmony_ci	 *
2448c2ecf20Sopenharmony_ci	 * We ifdef on CONFIG_SMP because cpu_sibling_map isn't defined in
2458c2ecf20Sopenharmony_ci	 * CONFIG_SMP=n kernels.
2468c2ecf20Sopenharmony_ci	 */
2478c2ecf20Sopenharmony_ci#ifdef CONFIG_SMP
2488c2ecf20Sopenharmony_ci	if (cpu_has_shared_ftlb_entries &&
2498c2ecf20Sopenharmony_ci	    cpumask_intersects(&tlb_flush_pending, &cpu_sibling_map[cpu])) {
2508c2ecf20Sopenharmony_ci		/* Ensure we operate on the new MMID */
2518c2ecf20Sopenharmony_ci		mtc0_tlbw_hazard();
2528c2ecf20Sopenharmony_ci
2538c2ecf20Sopenharmony_ci		/*
2548c2ecf20Sopenharmony_ci		 * Invalidate all TLB entries associated with the new
2558c2ecf20Sopenharmony_ci		 * MMID, and wait for the invalidation to complete.
2568c2ecf20Sopenharmony_ci		 */
2578c2ecf20Sopenharmony_ci		ginvt_mmid();
2588c2ecf20Sopenharmony_ci		sync_ginv();
2598c2ecf20Sopenharmony_ci	}
2608c2ecf20Sopenharmony_ci#endif
2618c2ecf20Sopenharmony_ci
2628c2ecf20Sopenharmony_cisetup_pgd:
2638c2ecf20Sopenharmony_ci	TLBMISS_HANDLER_SETUP_PGD(mm->pgd);
2648c2ecf20Sopenharmony_ci}
2658c2ecf20Sopenharmony_ciEXPORT_SYMBOL_GPL(check_switch_mmu_context);
2668c2ecf20Sopenharmony_ci
2678c2ecf20Sopenharmony_cistatic int mmid_init(void)
2688c2ecf20Sopenharmony_ci{
2698c2ecf20Sopenharmony_ci	if (!cpu_has_mmid)
2708c2ecf20Sopenharmony_ci		return 0;
2718c2ecf20Sopenharmony_ci
2728c2ecf20Sopenharmony_ci	/*
2738c2ecf20Sopenharmony_ci	 * Expect allocation after rollover to fail if we don't have at least
2748c2ecf20Sopenharmony_ci	 * one more MMID than CPUs.
2758c2ecf20Sopenharmony_ci	 */
2768c2ecf20Sopenharmony_ci	num_mmids = asid_first_version(0);
2778c2ecf20Sopenharmony_ci	WARN_ON(num_mmids <= num_possible_cpus());
2788c2ecf20Sopenharmony_ci
2798c2ecf20Sopenharmony_ci	atomic64_set(&mmid_version, asid_first_version(0));
2808c2ecf20Sopenharmony_ci	mmid_map = kcalloc(BITS_TO_LONGS(num_mmids), sizeof(*mmid_map),
2818c2ecf20Sopenharmony_ci			   GFP_KERNEL);
2828c2ecf20Sopenharmony_ci	if (!mmid_map)
2838c2ecf20Sopenharmony_ci		panic("Failed to allocate bitmap for %u MMIDs\n", num_mmids);
2848c2ecf20Sopenharmony_ci
2858c2ecf20Sopenharmony_ci	/* Reserve an MMID for kmap/wired entries */
2868c2ecf20Sopenharmony_ci	__set_bit(MMID_KERNEL_WIRED, mmid_map);
2878c2ecf20Sopenharmony_ci
2888c2ecf20Sopenharmony_ci	pr_info("MMID allocator initialised with %u entries\n", num_mmids);
2898c2ecf20Sopenharmony_ci	return 0;
2908c2ecf20Sopenharmony_ci}
2918c2ecf20Sopenharmony_ciearly_initcall(mmid_init);
292