18c2ecf20Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0
28c2ecf20Sopenharmony_ci/*
38c2ecf20Sopenharmony_ci * Copyright (C) 2012 Regents of the University of California
48c2ecf20Sopenharmony_ci * Copyright (C) 2017 SiFive
58c2ecf20Sopenharmony_ci */
68c2ecf20Sopenharmony_ci
78c2ecf20Sopenharmony_ci#include <linux/mm.h>
88c2ecf20Sopenharmony_ci#include <asm/tlbflush.h>
98c2ecf20Sopenharmony_ci#include <asm/cacheflush.h>
108c2ecf20Sopenharmony_ci#include <asm/mmu_context.h>
118c2ecf20Sopenharmony_ci
128c2ecf20Sopenharmony_ci/*
138c2ecf20Sopenharmony_ci * When necessary, performs a deferred icache flush for the given MM context,
148c2ecf20Sopenharmony_ci * on the local CPU.  RISC-V has no direct mechanism for instruction cache
158c2ecf20Sopenharmony_ci * shoot downs, so instead we send an IPI that informs the remote harts they
168c2ecf20Sopenharmony_ci * need to flush their local instruction caches.  To avoid pathologically slow
178c2ecf20Sopenharmony_ci * behavior in a common case (a bunch of single-hart processes on a many-hart
188c2ecf20Sopenharmony_ci * machine, ie 'make -j') we avoid the IPIs for harts that are not currently
198c2ecf20Sopenharmony_ci * executing a MM context and instead schedule a deferred local instruction
208c2ecf20Sopenharmony_ci * cache flush to be performed before execution resumes on each hart.  This
218c2ecf20Sopenharmony_ci * actually performs that local instruction cache flush, which implicitly only
228c2ecf20Sopenharmony_ci * refers to the current hart.
238c2ecf20Sopenharmony_ci */
248c2ecf20Sopenharmony_cistatic inline void flush_icache_deferred(struct mm_struct *mm)
258c2ecf20Sopenharmony_ci{
268c2ecf20Sopenharmony_ci#ifdef CONFIG_SMP
278c2ecf20Sopenharmony_ci	unsigned int cpu = smp_processor_id();
288c2ecf20Sopenharmony_ci	cpumask_t *mask = &mm->context.icache_stale_mask;
298c2ecf20Sopenharmony_ci
308c2ecf20Sopenharmony_ci	if (cpumask_test_cpu(cpu, mask)) {
318c2ecf20Sopenharmony_ci		cpumask_clear_cpu(cpu, mask);
328c2ecf20Sopenharmony_ci		/*
338c2ecf20Sopenharmony_ci		 * Ensure the remote hart's writes are visible to this hart.
348c2ecf20Sopenharmony_ci		 * This pairs with a barrier in flush_icache_mm.
358c2ecf20Sopenharmony_ci		 */
368c2ecf20Sopenharmony_ci		smp_mb();
378c2ecf20Sopenharmony_ci		local_flush_icache_all();
388c2ecf20Sopenharmony_ci	}
398c2ecf20Sopenharmony_ci
408c2ecf20Sopenharmony_ci#endif
418c2ecf20Sopenharmony_ci}
428c2ecf20Sopenharmony_ci
438c2ecf20Sopenharmony_civoid switch_mm(struct mm_struct *prev, struct mm_struct *next,
448c2ecf20Sopenharmony_ci	struct task_struct *task)
458c2ecf20Sopenharmony_ci{
468c2ecf20Sopenharmony_ci	unsigned int cpu;
478c2ecf20Sopenharmony_ci
488c2ecf20Sopenharmony_ci	if (unlikely(prev == next))
498c2ecf20Sopenharmony_ci		return;
508c2ecf20Sopenharmony_ci
518c2ecf20Sopenharmony_ci	/*
528c2ecf20Sopenharmony_ci	 * Mark the current MM context as inactive, and the next as
538c2ecf20Sopenharmony_ci	 * active.  This is at least used by the icache flushing
548c2ecf20Sopenharmony_ci	 * routines in order to determine who should be flushed.
558c2ecf20Sopenharmony_ci	 */
568c2ecf20Sopenharmony_ci	cpu = smp_processor_id();
578c2ecf20Sopenharmony_ci
588c2ecf20Sopenharmony_ci	cpumask_clear_cpu(cpu, mm_cpumask(prev));
598c2ecf20Sopenharmony_ci	cpumask_set_cpu(cpu, mm_cpumask(next));
608c2ecf20Sopenharmony_ci
618c2ecf20Sopenharmony_ci#ifdef CONFIG_MMU
628c2ecf20Sopenharmony_ci	csr_write(CSR_SATP, virt_to_pfn(next->pgd) | SATP_MODE);
638c2ecf20Sopenharmony_ci	local_flush_tlb_all();
648c2ecf20Sopenharmony_ci#endif
658c2ecf20Sopenharmony_ci
668c2ecf20Sopenharmony_ci	flush_icache_deferred(next);
678c2ecf20Sopenharmony_ci}
68