162306a36Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0-only
262306a36Sopenharmony_ci/*
362306a36Sopenharmony_ci * TLB Management (flush/create/diagnostics) for MMUv3 and MMUv4
462306a36Sopenharmony_ci *
562306a36Sopenharmony_ci * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
662306a36Sopenharmony_ci *
762306a36Sopenharmony_ci */
862306a36Sopenharmony_ci
962306a36Sopenharmony_ci#include <linux/module.h>
1062306a36Sopenharmony_ci#include <linux/bug.h>
1162306a36Sopenharmony_ci#include <linux/mm_types.h>
1262306a36Sopenharmony_ci
1362306a36Sopenharmony_ci#include <asm/arcregs.h>
1462306a36Sopenharmony_ci#include <asm/setup.h>
1562306a36Sopenharmony_ci#include <asm/mmu_context.h>
1662306a36Sopenharmony_ci#include <asm/mmu.h>
1762306a36Sopenharmony_ci
1862306a36Sopenharmony_ci/* A copy of the ASID from the PID reg is kept in asid_cache */
1962306a36Sopenharmony_ciDEFINE_PER_CPU(unsigned int, asid_cache) = MM_CTXT_FIRST_CYCLE;
2062306a36Sopenharmony_ci
2162306a36Sopenharmony_cistatic struct cpuinfo_arc_mmu {
2262306a36Sopenharmony_ci	unsigned int ver, pg_sz_k, s_pg_sz_m, pae, sets, ways;
2362306a36Sopenharmony_ci} mmuinfo;
2462306a36Sopenharmony_ci
2562306a36Sopenharmony_ci/*
2662306a36Sopenharmony_ci * Utility Routine to erase a J-TLB entry
2762306a36Sopenharmony_ci * Caller needs to setup Index Reg (manually or via getIndex)
2862306a36Sopenharmony_ci */
2962306a36Sopenharmony_cistatic inline void __tlb_entry_erase(void)
3062306a36Sopenharmony_ci{
3162306a36Sopenharmony_ci	write_aux_reg(ARC_REG_TLBPD1, 0);
3262306a36Sopenharmony_ci
3362306a36Sopenharmony_ci	if (is_pae40_enabled())
3462306a36Sopenharmony_ci		write_aux_reg(ARC_REG_TLBPD1HI, 0);
3562306a36Sopenharmony_ci
3662306a36Sopenharmony_ci	write_aux_reg(ARC_REG_TLBPD0, 0);
3762306a36Sopenharmony_ci	write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
3862306a36Sopenharmony_ci}
3962306a36Sopenharmony_ci
4062306a36Sopenharmony_cistatic void utlb_invalidate(void)
4162306a36Sopenharmony_ci{
4262306a36Sopenharmony_ci	write_aux_reg(ARC_REG_TLBCOMMAND, TLBIVUTLB);
4362306a36Sopenharmony_ci}
4462306a36Sopenharmony_ci
4562306a36Sopenharmony_ci#ifdef CONFIG_ARC_MMU_V3
4662306a36Sopenharmony_ci
4762306a36Sopenharmony_cistatic inline unsigned int tlb_entry_lkup(unsigned long vaddr_n_asid)
4862306a36Sopenharmony_ci{
4962306a36Sopenharmony_ci	unsigned int idx;
5062306a36Sopenharmony_ci
5162306a36Sopenharmony_ci	write_aux_reg(ARC_REG_TLBPD0, vaddr_n_asid);
5262306a36Sopenharmony_ci
5362306a36Sopenharmony_ci	write_aux_reg(ARC_REG_TLBCOMMAND, TLBProbe);
5462306a36Sopenharmony_ci	idx = read_aux_reg(ARC_REG_TLBINDEX);
5562306a36Sopenharmony_ci
5662306a36Sopenharmony_ci	return idx;
5762306a36Sopenharmony_ci}
5862306a36Sopenharmony_ci
5962306a36Sopenharmony_cistatic void tlb_entry_erase(unsigned int vaddr_n_asid)
6062306a36Sopenharmony_ci{
6162306a36Sopenharmony_ci	unsigned int idx;
6262306a36Sopenharmony_ci
6362306a36Sopenharmony_ci	/* Locate the TLB entry for this vaddr + ASID */
6462306a36Sopenharmony_ci	idx = tlb_entry_lkup(vaddr_n_asid);
6562306a36Sopenharmony_ci
6662306a36Sopenharmony_ci	/* No error means entry found, zero it out */
6762306a36Sopenharmony_ci	if (likely(!(idx & TLB_LKUP_ERR))) {
6862306a36Sopenharmony_ci		__tlb_entry_erase();
6962306a36Sopenharmony_ci	} else {
7062306a36Sopenharmony_ci		/* Duplicate entry error */
7162306a36Sopenharmony_ci		WARN(idx == TLB_DUP_ERR, "Probe returned Dup PD for %x\n",
7262306a36Sopenharmony_ci					   vaddr_n_asid);
7362306a36Sopenharmony_ci	}
7462306a36Sopenharmony_ci}
7562306a36Sopenharmony_ci
7662306a36Sopenharmony_cistatic void tlb_entry_insert(unsigned int pd0, phys_addr_t pd1)
7762306a36Sopenharmony_ci{
7862306a36Sopenharmony_ci	unsigned int idx;
7962306a36Sopenharmony_ci
8062306a36Sopenharmony_ci	/*
8162306a36Sopenharmony_ci	 * First verify if entry for this vaddr+ASID already exists
8262306a36Sopenharmony_ci	 * This also sets up PD0 (vaddr, ASID..) for final commit
8362306a36Sopenharmony_ci	 */
8462306a36Sopenharmony_ci	idx = tlb_entry_lkup(pd0);
8562306a36Sopenharmony_ci
8662306a36Sopenharmony_ci	/*
8762306a36Sopenharmony_ci	 * If Not already present get a free slot from MMU.
8862306a36Sopenharmony_ci	 * Otherwise, Probe would have located the entry and set INDEX Reg
8962306a36Sopenharmony_ci	 * with existing location. This will cause Write CMD to over-write
9062306a36Sopenharmony_ci	 * existing entry with new PD0 and PD1
9162306a36Sopenharmony_ci	 */
9262306a36Sopenharmony_ci	if (likely(idx & TLB_LKUP_ERR))
9362306a36Sopenharmony_ci		write_aux_reg(ARC_REG_TLBCOMMAND, TLBGetIndex);
9462306a36Sopenharmony_ci
9562306a36Sopenharmony_ci	/* setup the other half of TLB entry (pfn, rwx..) */
9662306a36Sopenharmony_ci	write_aux_reg(ARC_REG_TLBPD1, pd1);
9762306a36Sopenharmony_ci
9862306a36Sopenharmony_ci	/*
9962306a36Sopenharmony_ci	 * Commit the Entry to MMU
10062306a36Sopenharmony_ci	 * It doesn't sound safe to use the TLBWriteNI cmd here
10162306a36Sopenharmony_ci	 * which doesn't flush uTLBs. I'd rather be safe than sorry.
10262306a36Sopenharmony_ci	 */
10362306a36Sopenharmony_ci	write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite);
10462306a36Sopenharmony_ci}
10562306a36Sopenharmony_ci
10662306a36Sopenharmony_ci#else	/* MMUv4 */
10762306a36Sopenharmony_ci
10862306a36Sopenharmony_cistatic void tlb_entry_erase(unsigned int vaddr_n_asid)
10962306a36Sopenharmony_ci{
11062306a36Sopenharmony_ci	write_aux_reg(ARC_REG_TLBPD0, vaddr_n_asid | _PAGE_PRESENT);
11162306a36Sopenharmony_ci	write_aux_reg(ARC_REG_TLBCOMMAND, TLBDeleteEntry);
11262306a36Sopenharmony_ci}
11362306a36Sopenharmony_ci
11462306a36Sopenharmony_cistatic void tlb_entry_insert(unsigned int pd0, phys_addr_t pd1)
11562306a36Sopenharmony_ci{
11662306a36Sopenharmony_ci	write_aux_reg(ARC_REG_TLBPD0, pd0);
11762306a36Sopenharmony_ci
11862306a36Sopenharmony_ci	if (!is_pae40_enabled()) {
11962306a36Sopenharmony_ci		write_aux_reg(ARC_REG_TLBPD1, pd1);
12062306a36Sopenharmony_ci	} else {
12162306a36Sopenharmony_ci		write_aux_reg(ARC_REG_TLBPD1, pd1 & 0xFFFFFFFF);
12262306a36Sopenharmony_ci		write_aux_reg(ARC_REG_TLBPD1HI, (u64)pd1 >> 32);
12362306a36Sopenharmony_ci	}
12462306a36Sopenharmony_ci
12562306a36Sopenharmony_ci	write_aux_reg(ARC_REG_TLBCOMMAND, TLBInsertEntry);
12662306a36Sopenharmony_ci}
12762306a36Sopenharmony_ci
12862306a36Sopenharmony_ci#endif
12962306a36Sopenharmony_ci
13062306a36Sopenharmony_ci/*
13162306a36Sopenharmony_ci * Un-conditionally (without lookup) erase the entire MMU contents
13262306a36Sopenharmony_ci */
13362306a36Sopenharmony_ci
13462306a36Sopenharmony_cinoinline void local_flush_tlb_all(void)
13562306a36Sopenharmony_ci{
13662306a36Sopenharmony_ci	struct cpuinfo_arc_mmu *mmu = &mmuinfo;
13762306a36Sopenharmony_ci	unsigned long flags;
13862306a36Sopenharmony_ci	unsigned int entry;
13962306a36Sopenharmony_ci	int num_tlb = mmu->sets * mmu->ways;
14062306a36Sopenharmony_ci
14162306a36Sopenharmony_ci	local_irq_save(flags);
14262306a36Sopenharmony_ci
14362306a36Sopenharmony_ci	/* Load PD0 and PD1 with template for a Blank Entry */
14462306a36Sopenharmony_ci	write_aux_reg(ARC_REG_TLBPD1, 0);
14562306a36Sopenharmony_ci
14662306a36Sopenharmony_ci	if (is_pae40_enabled())
14762306a36Sopenharmony_ci		write_aux_reg(ARC_REG_TLBPD1HI, 0);
14862306a36Sopenharmony_ci
14962306a36Sopenharmony_ci	write_aux_reg(ARC_REG_TLBPD0, 0);
15062306a36Sopenharmony_ci
15162306a36Sopenharmony_ci	for (entry = 0; entry < num_tlb; entry++) {
15262306a36Sopenharmony_ci		/* write this entry to the TLB */
15362306a36Sopenharmony_ci		write_aux_reg(ARC_REG_TLBINDEX, entry);
15462306a36Sopenharmony_ci		write_aux_reg(ARC_REG_TLBCOMMAND, TLBWriteNI);
15562306a36Sopenharmony_ci	}
15662306a36Sopenharmony_ci
15762306a36Sopenharmony_ci	if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
15862306a36Sopenharmony_ci		const int stlb_idx = 0x800;
15962306a36Sopenharmony_ci
16062306a36Sopenharmony_ci		/* Blank sTLB entry */
16162306a36Sopenharmony_ci		write_aux_reg(ARC_REG_TLBPD0, _PAGE_HW_SZ);
16262306a36Sopenharmony_ci
16362306a36Sopenharmony_ci		for (entry = stlb_idx; entry < stlb_idx + 16; entry++) {
16462306a36Sopenharmony_ci			write_aux_reg(ARC_REG_TLBINDEX, entry);
16562306a36Sopenharmony_ci			write_aux_reg(ARC_REG_TLBCOMMAND, TLBWriteNI);
16662306a36Sopenharmony_ci		}
16762306a36Sopenharmony_ci	}
16862306a36Sopenharmony_ci
16962306a36Sopenharmony_ci	utlb_invalidate();
17062306a36Sopenharmony_ci
17162306a36Sopenharmony_ci	local_irq_restore(flags);
17262306a36Sopenharmony_ci}
17362306a36Sopenharmony_ci
17462306a36Sopenharmony_ci/*
17562306a36Sopenharmony_ci * Flush the entire MM for userland. The fastest way is to move to Next ASID
17662306a36Sopenharmony_ci */
17762306a36Sopenharmony_cinoinline void local_flush_tlb_mm(struct mm_struct *mm)
17862306a36Sopenharmony_ci{
17962306a36Sopenharmony_ci	/*
18062306a36Sopenharmony_ci	 * Small optimisation courtesy IA64
18162306a36Sopenharmony_ci	 * flush_mm called during fork,exit,munmap etc, multiple times as well.
18262306a36Sopenharmony_ci	 * Only for fork( ) do we need to move parent to a new MMU ctxt,
18362306a36Sopenharmony_ci	 * all other cases are NOPs, hence this check.
18462306a36Sopenharmony_ci	 */
18562306a36Sopenharmony_ci	if (atomic_read(&mm->mm_users) == 0)
18662306a36Sopenharmony_ci		return;
18762306a36Sopenharmony_ci
18862306a36Sopenharmony_ci	/*
18962306a36Sopenharmony_ci	 * - Move to a new ASID, but only if the mm is still wired in
19062306a36Sopenharmony_ci	 *   (Android Binder ended up calling this for vma->mm != tsk->mm,
19162306a36Sopenharmony_ci	 *    causing h/w - s/w ASID to get out of sync)
19262306a36Sopenharmony_ci	 * - Also get_new_mmu_context() new implementation allocates a new
19362306a36Sopenharmony_ci	 *   ASID only if it is not allocated already - so unallocate first
19462306a36Sopenharmony_ci	 */
19562306a36Sopenharmony_ci	destroy_context(mm);
19662306a36Sopenharmony_ci	if (current->mm == mm)
19762306a36Sopenharmony_ci		get_new_mmu_context(mm);
19862306a36Sopenharmony_ci}
19962306a36Sopenharmony_ci
20062306a36Sopenharmony_ci/*
20162306a36Sopenharmony_ci * Flush a Range of TLB entries for userland.
20262306a36Sopenharmony_ci * @start is inclusive, while @end is exclusive
20362306a36Sopenharmony_ci * Difference between this and Kernel Range Flush is
20462306a36Sopenharmony_ci *  -Here the fastest way (if range is too large) is to move to next ASID
20562306a36Sopenharmony_ci *      without doing any explicit Shootdown
20662306a36Sopenharmony_ci *  -In case of kernel Flush, entry has to be shot down explicitly
20762306a36Sopenharmony_ci */
20862306a36Sopenharmony_civoid local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
20962306a36Sopenharmony_ci			   unsigned long end)
21062306a36Sopenharmony_ci{
21162306a36Sopenharmony_ci	const unsigned int cpu = smp_processor_id();
21262306a36Sopenharmony_ci	unsigned long flags;
21362306a36Sopenharmony_ci
21462306a36Sopenharmony_ci	/* If range @start to @end is more than 32 TLB entries deep,
21562306a36Sopenharmony_ci	 * its better to move to a new ASID rather than searching for
21662306a36Sopenharmony_ci	 * individual entries and then shooting them down
21762306a36Sopenharmony_ci	 *
21862306a36Sopenharmony_ci	 * The calc above is rough, doesn't account for unaligned parts,
21962306a36Sopenharmony_ci	 * since this is heuristics based anyways
22062306a36Sopenharmony_ci	 */
22162306a36Sopenharmony_ci	if (unlikely((end - start) >= PAGE_SIZE * 32)) {
22262306a36Sopenharmony_ci		local_flush_tlb_mm(vma->vm_mm);
22362306a36Sopenharmony_ci		return;
22462306a36Sopenharmony_ci	}
22562306a36Sopenharmony_ci
22662306a36Sopenharmony_ci	/*
22762306a36Sopenharmony_ci	 * @start moved to page start: this alone suffices for checking
22862306a36Sopenharmony_ci	 * loop end condition below, w/o need for aligning @end to end
22962306a36Sopenharmony_ci	 * e.g. 2000 to 4001 will anyhow loop twice
23062306a36Sopenharmony_ci	 */
23162306a36Sopenharmony_ci	start &= PAGE_MASK;
23262306a36Sopenharmony_ci
23362306a36Sopenharmony_ci	local_irq_save(flags);
23462306a36Sopenharmony_ci
23562306a36Sopenharmony_ci	if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) {
23662306a36Sopenharmony_ci		while (start < end) {
23762306a36Sopenharmony_ci			tlb_entry_erase(start | hw_pid(vma->vm_mm, cpu));
23862306a36Sopenharmony_ci			start += PAGE_SIZE;
23962306a36Sopenharmony_ci		}
24062306a36Sopenharmony_ci	}
24162306a36Sopenharmony_ci
24262306a36Sopenharmony_ci	local_irq_restore(flags);
24362306a36Sopenharmony_ci}
24462306a36Sopenharmony_ci
24562306a36Sopenharmony_ci/* Flush the kernel TLB entries - vmalloc/modules (Global from MMU perspective)
24662306a36Sopenharmony_ci *  @start, @end interpreted as kvaddr
24762306a36Sopenharmony_ci * Interestingly, shared TLB entries can also be flushed using just
24862306a36Sopenharmony_ci * @start,@end alone (interpreted as user vaddr), although technically SASID
24962306a36Sopenharmony_ci * is also needed. However our smart TLbProbe lookup takes care of that.
25062306a36Sopenharmony_ci */
25162306a36Sopenharmony_civoid local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
25262306a36Sopenharmony_ci{
25362306a36Sopenharmony_ci	unsigned long flags;
25462306a36Sopenharmony_ci
25562306a36Sopenharmony_ci	/* exactly same as above, except for TLB entry not taking ASID */
25662306a36Sopenharmony_ci
25762306a36Sopenharmony_ci	if (unlikely((end - start) >= PAGE_SIZE * 32)) {
25862306a36Sopenharmony_ci		local_flush_tlb_all();
25962306a36Sopenharmony_ci		return;
26062306a36Sopenharmony_ci	}
26162306a36Sopenharmony_ci
26262306a36Sopenharmony_ci	start &= PAGE_MASK;
26362306a36Sopenharmony_ci
26462306a36Sopenharmony_ci	local_irq_save(flags);
26562306a36Sopenharmony_ci	while (start < end) {
26662306a36Sopenharmony_ci		tlb_entry_erase(start);
26762306a36Sopenharmony_ci		start += PAGE_SIZE;
26862306a36Sopenharmony_ci	}
26962306a36Sopenharmony_ci
27062306a36Sopenharmony_ci	local_irq_restore(flags);
27162306a36Sopenharmony_ci}
27262306a36Sopenharmony_ci
27362306a36Sopenharmony_ci/*
27462306a36Sopenharmony_ci * Delete TLB entry in MMU for a given page (??? address)
27562306a36Sopenharmony_ci * NOTE One TLB entry contains translation for single PAGE
27662306a36Sopenharmony_ci */
27762306a36Sopenharmony_ci
27862306a36Sopenharmony_civoid local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
27962306a36Sopenharmony_ci{
28062306a36Sopenharmony_ci	const unsigned int cpu = smp_processor_id();
28162306a36Sopenharmony_ci	unsigned long flags;
28262306a36Sopenharmony_ci
28362306a36Sopenharmony_ci	/* Note that it is critical that interrupts are DISABLED between
28462306a36Sopenharmony_ci	 * checking the ASID and using it flush the TLB entry
28562306a36Sopenharmony_ci	 */
28662306a36Sopenharmony_ci	local_irq_save(flags);
28762306a36Sopenharmony_ci
28862306a36Sopenharmony_ci	if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) {
28962306a36Sopenharmony_ci		tlb_entry_erase((page & PAGE_MASK) | hw_pid(vma->vm_mm, cpu));
29062306a36Sopenharmony_ci	}
29162306a36Sopenharmony_ci
29262306a36Sopenharmony_ci	local_irq_restore(flags);
29362306a36Sopenharmony_ci}
29462306a36Sopenharmony_ci
29562306a36Sopenharmony_ci#ifdef CONFIG_SMP
29662306a36Sopenharmony_ci
29762306a36Sopenharmony_cistruct tlb_args {
29862306a36Sopenharmony_ci	struct vm_area_struct *ta_vma;
29962306a36Sopenharmony_ci	unsigned long ta_start;
30062306a36Sopenharmony_ci	unsigned long ta_end;
30162306a36Sopenharmony_ci};
30262306a36Sopenharmony_ci
30362306a36Sopenharmony_cistatic inline void ipi_flush_tlb_page(void *arg)
30462306a36Sopenharmony_ci{
30562306a36Sopenharmony_ci	struct tlb_args *ta = arg;
30662306a36Sopenharmony_ci
30762306a36Sopenharmony_ci	local_flush_tlb_page(ta->ta_vma, ta->ta_start);
30862306a36Sopenharmony_ci}
30962306a36Sopenharmony_ci
31062306a36Sopenharmony_cistatic inline void ipi_flush_tlb_range(void *arg)
31162306a36Sopenharmony_ci{
31262306a36Sopenharmony_ci	struct tlb_args *ta = arg;
31362306a36Sopenharmony_ci
31462306a36Sopenharmony_ci	local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
31562306a36Sopenharmony_ci}
31662306a36Sopenharmony_ci
31762306a36Sopenharmony_ci#ifdef CONFIG_TRANSPARENT_HUGEPAGE
31862306a36Sopenharmony_cistatic inline void ipi_flush_pmd_tlb_range(void *arg)
31962306a36Sopenharmony_ci{
32062306a36Sopenharmony_ci	struct tlb_args *ta = arg;
32162306a36Sopenharmony_ci
32262306a36Sopenharmony_ci	local_flush_pmd_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
32362306a36Sopenharmony_ci}
32462306a36Sopenharmony_ci#endif
32562306a36Sopenharmony_ci
32662306a36Sopenharmony_cistatic inline void ipi_flush_tlb_kernel_range(void *arg)
32762306a36Sopenharmony_ci{
32862306a36Sopenharmony_ci	struct tlb_args *ta = (struct tlb_args *)arg;
32962306a36Sopenharmony_ci
33062306a36Sopenharmony_ci	local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end);
33162306a36Sopenharmony_ci}
33262306a36Sopenharmony_ci
33362306a36Sopenharmony_civoid flush_tlb_all(void)
33462306a36Sopenharmony_ci{
33562306a36Sopenharmony_ci	on_each_cpu((smp_call_func_t)local_flush_tlb_all, NULL, 1);
33662306a36Sopenharmony_ci}
33762306a36Sopenharmony_ci
33862306a36Sopenharmony_civoid flush_tlb_mm(struct mm_struct *mm)
33962306a36Sopenharmony_ci{
34062306a36Sopenharmony_ci	on_each_cpu_mask(mm_cpumask(mm), (smp_call_func_t)local_flush_tlb_mm,
34162306a36Sopenharmony_ci			 mm, 1);
34262306a36Sopenharmony_ci}
34362306a36Sopenharmony_ci
34462306a36Sopenharmony_civoid flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
34562306a36Sopenharmony_ci{
34662306a36Sopenharmony_ci	struct tlb_args ta = {
34762306a36Sopenharmony_ci		.ta_vma = vma,
34862306a36Sopenharmony_ci		.ta_start = uaddr
34962306a36Sopenharmony_ci	};
35062306a36Sopenharmony_ci
35162306a36Sopenharmony_ci	on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_page, &ta, 1);
35262306a36Sopenharmony_ci}
35362306a36Sopenharmony_ci
35462306a36Sopenharmony_civoid flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
35562306a36Sopenharmony_ci		     unsigned long end)
35662306a36Sopenharmony_ci{
35762306a36Sopenharmony_ci	struct tlb_args ta = {
35862306a36Sopenharmony_ci		.ta_vma = vma,
35962306a36Sopenharmony_ci		.ta_start = start,
36062306a36Sopenharmony_ci		.ta_end = end
36162306a36Sopenharmony_ci	};
36262306a36Sopenharmony_ci
36362306a36Sopenharmony_ci	on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_range, &ta, 1);
36462306a36Sopenharmony_ci}
36562306a36Sopenharmony_ci
36662306a36Sopenharmony_ci#ifdef CONFIG_TRANSPARENT_HUGEPAGE
36762306a36Sopenharmony_civoid flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
36862306a36Sopenharmony_ci			 unsigned long end)
36962306a36Sopenharmony_ci{
37062306a36Sopenharmony_ci	struct tlb_args ta = {
37162306a36Sopenharmony_ci		.ta_vma = vma,
37262306a36Sopenharmony_ci		.ta_start = start,
37362306a36Sopenharmony_ci		.ta_end = end
37462306a36Sopenharmony_ci	};
37562306a36Sopenharmony_ci
37662306a36Sopenharmony_ci	on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_pmd_tlb_range, &ta, 1);
37762306a36Sopenharmony_ci}
37862306a36Sopenharmony_ci#endif
37962306a36Sopenharmony_ci
38062306a36Sopenharmony_civoid flush_tlb_kernel_range(unsigned long start, unsigned long end)
38162306a36Sopenharmony_ci{
38262306a36Sopenharmony_ci	struct tlb_args ta = {
38362306a36Sopenharmony_ci		.ta_start = start,
38462306a36Sopenharmony_ci		.ta_end = end
38562306a36Sopenharmony_ci	};
38662306a36Sopenharmony_ci
38762306a36Sopenharmony_ci	on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1);
38862306a36Sopenharmony_ci}
38962306a36Sopenharmony_ci#endif
39062306a36Sopenharmony_ci
39162306a36Sopenharmony_ci/*
39262306a36Sopenharmony_ci * Routine to create a TLB entry
39362306a36Sopenharmony_ci */
39462306a36Sopenharmony_cistatic void create_tlb(struct vm_area_struct *vma, unsigned long vaddr, pte_t *ptep)
39562306a36Sopenharmony_ci{
39662306a36Sopenharmony_ci	unsigned long flags;
39762306a36Sopenharmony_ci	unsigned int asid_or_sasid, rwx;
39862306a36Sopenharmony_ci	unsigned long pd0;
39962306a36Sopenharmony_ci	phys_addr_t pd1;
40062306a36Sopenharmony_ci
40162306a36Sopenharmony_ci	/*
40262306a36Sopenharmony_ci	 * create_tlb() assumes that current->mm == vma->mm, since
40362306a36Sopenharmony_ci	 * -it ASID for TLB entry is fetched from MMU ASID reg (valid for curr)
40462306a36Sopenharmony_ci	 * -completes the lazy write to SASID reg (again valid for curr tsk)
40562306a36Sopenharmony_ci	 *
40662306a36Sopenharmony_ci	 * Removing the assumption involves
40762306a36Sopenharmony_ci	 * -Using vma->mm->context{ASID,SASID}, as opposed to MMU reg.
40862306a36Sopenharmony_ci	 * -More importantly it makes this handler inconsistent with fast-path
40962306a36Sopenharmony_ci	 *  TLB Refill handler which always deals with "current"
41062306a36Sopenharmony_ci	 *
41162306a36Sopenharmony_ci	 * Lets see the use cases when current->mm != vma->mm and we land here
41262306a36Sopenharmony_ci	 *  1. execve->copy_strings()->__get_user_pages->handle_mm_fault
41362306a36Sopenharmony_ci	 *     Here VM wants to pre-install a TLB entry for user stack while
41462306a36Sopenharmony_ci	 *     current->mm still points to pre-execve mm (hence the condition).
41562306a36Sopenharmony_ci	 *     However the stack vaddr is soon relocated (randomization) and
41662306a36Sopenharmony_ci	 *     move_page_tables() tries to undo that TLB entry.
41762306a36Sopenharmony_ci	 *     Thus not creating TLB entry is not any worse.
41862306a36Sopenharmony_ci	 *
41962306a36Sopenharmony_ci	 *  2. ptrace(POKETEXT) causes a CoW - debugger(current) inserting a
42062306a36Sopenharmony_ci	 *     breakpoint in debugged task. Not creating a TLB now is not
42162306a36Sopenharmony_ci	 *     performance critical.
42262306a36Sopenharmony_ci	 *
42362306a36Sopenharmony_ci	 * Both the cases above are not good enough for code churn.
42462306a36Sopenharmony_ci	 */
42562306a36Sopenharmony_ci	if (current->active_mm != vma->vm_mm)
42662306a36Sopenharmony_ci		return;
42762306a36Sopenharmony_ci
42862306a36Sopenharmony_ci	local_irq_save(flags);
42962306a36Sopenharmony_ci
43062306a36Sopenharmony_ci	vaddr &= PAGE_MASK;
43162306a36Sopenharmony_ci
43262306a36Sopenharmony_ci	/* update this PTE credentials */
43362306a36Sopenharmony_ci	pte_val(*ptep) |= (_PAGE_PRESENT | _PAGE_ACCESSED);
43462306a36Sopenharmony_ci
43562306a36Sopenharmony_ci	/* Create HW TLB(PD0,PD1) from PTE  */
43662306a36Sopenharmony_ci
43762306a36Sopenharmony_ci	/* ASID for this task */
43862306a36Sopenharmony_ci	asid_or_sasid = read_aux_reg(ARC_REG_PID) & 0xff;
43962306a36Sopenharmony_ci
44062306a36Sopenharmony_ci	pd0 = vaddr | asid_or_sasid | (pte_val(*ptep) & PTE_BITS_IN_PD0);
44162306a36Sopenharmony_ci
44262306a36Sopenharmony_ci	/*
44362306a36Sopenharmony_ci	 * ARC MMU provides fully orthogonal access bits for K/U mode,
44462306a36Sopenharmony_ci	 * however Linux only saves 1 set to save PTE real-estate
44562306a36Sopenharmony_ci	 * Here we convert 3 PTE bits into 6 MMU bits:
44662306a36Sopenharmony_ci	 * -Kernel only entries have Kr Kw Kx 0 0 0
44762306a36Sopenharmony_ci	 * -User entries have mirrored K and U bits
44862306a36Sopenharmony_ci	 */
44962306a36Sopenharmony_ci	rwx = pte_val(*ptep) & PTE_BITS_RWX;
45062306a36Sopenharmony_ci
45162306a36Sopenharmony_ci	if (pte_val(*ptep) & _PAGE_GLOBAL)
45262306a36Sopenharmony_ci		rwx <<= 3;		/* r w x => Kr Kw Kx 0 0 0 */
45362306a36Sopenharmony_ci	else
45462306a36Sopenharmony_ci		rwx |= (rwx << 3);	/* r w x => Kr Kw Kx Ur Uw Ux */
45562306a36Sopenharmony_ci
45662306a36Sopenharmony_ci	pd1 = rwx | (pte_val(*ptep) & PTE_BITS_NON_RWX_IN_PD1);
45762306a36Sopenharmony_ci
45862306a36Sopenharmony_ci	tlb_entry_insert(pd0, pd1);
45962306a36Sopenharmony_ci
46062306a36Sopenharmony_ci	local_irq_restore(flags);
46162306a36Sopenharmony_ci}
46262306a36Sopenharmony_ci
46362306a36Sopenharmony_ci/*
46462306a36Sopenharmony_ci * Called at the end of pagefault, for a userspace mapped page
46562306a36Sopenharmony_ci *  -pre-install the corresponding TLB entry into MMU
46662306a36Sopenharmony_ci *  -Finalize the delayed D-cache flush of kernel mapping of page due to
46762306a36Sopenharmony_ci *  	flush_dcache_page(), copy_user_page()
46862306a36Sopenharmony_ci *
46962306a36Sopenharmony_ci * Note that flush (when done) involves both WBACK - so physical page is
47062306a36Sopenharmony_ci * in sync as well as INV - so any non-congruent aliases don't remain
47162306a36Sopenharmony_ci */
47262306a36Sopenharmony_civoid update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
47362306a36Sopenharmony_ci		unsigned long vaddr_unaligned, pte_t *ptep, unsigned int nr)
47462306a36Sopenharmony_ci{
47562306a36Sopenharmony_ci	unsigned long vaddr = vaddr_unaligned & PAGE_MASK;
47662306a36Sopenharmony_ci	phys_addr_t paddr = pte_val(*ptep) & PAGE_MASK_PHYS;
47762306a36Sopenharmony_ci	struct page *page = pfn_to_page(pte_pfn(*ptep));
47862306a36Sopenharmony_ci
47962306a36Sopenharmony_ci	create_tlb(vma, vaddr, ptep);
48062306a36Sopenharmony_ci
48162306a36Sopenharmony_ci	if (page == ZERO_PAGE(0)) {
48262306a36Sopenharmony_ci		return;
48362306a36Sopenharmony_ci	}
48462306a36Sopenharmony_ci
48562306a36Sopenharmony_ci	/*
48662306a36Sopenharmony_ci	 * Exec page : Independent of aliasing/page-color considerations,
48762306a36Sopenharmony_ci	 *	       since icache doesn't snoop dcache on ARC, any dirty
48862306a36Sopenharmony_ci	 *	       K-mapping of a code page needs to be wback+inv so that
48962306a36Sopenharmony_ci	 *	       icache fetch by userspace sees code correctly.
49062306a36Sopenharmony_ci	 * !EXEC page: If K-mapping is NOT congruent to U-mapping, flush it
49162306a36Sopenharmony_ci	 *	       so userspace sees the right data.
49262306a36Sopenharmony_ci	 *  (Avoids the flush for Non-exec + congruent mapping case)
49362306a36Sopenharmony_ci	 */
49462306a36Sopenharmony_ci	if ((vma->vm_flags & VM_EXEC) ||
49562306a36Sopenharmony_ci	     addr_not_cache_congruent(paddr, vaddr)) {
49662306a36Sopenharmony_ci		struct folio *folio = page_folio(page);
49762306a36Sopenharmony_ci		int dirty = !test_and_set_bit(PG_dc_clean, &folio->flags);
49862306a36Sopenharmony_ci		if (dirty) {
49962306a36Sopenharmony_ci			unsigned long offset = offset_in_folio(folio, paddr);
50062306a36Sopenharmony_ci			nr = folio_nr_pages(folio);
50162306a36Sopenharmony_ci			paddr -= offset;
50262306a36Sopenharmony_ci			vaddr -= offset;
50362306a36Sopenharmony_ci			/* wback + inv dcache lines (K-mapping) */
50462306a36Sopenharmony_ci			__flush_dcache_pages(paddr, paddr, nr);
50562306a36Sopenharmony_ci
50662306a36Sopenharmony_ci			/* invalidate any existing icache lines (U-mapping) */
50762306a36Sopenharmony_ci			if (vma->vm_flags & VM_EXEC)
50862306a36Sopenharmony_ci				__inv_icache_pages(paddr, vaddr, nr);
50962306a36Sopenharmony_ci		}
51062306a36Sopenharmony_ci	}
51162306a36Sopenharmony_ci}
51262306a36Sopenharmony_ci
51362306a36Sopenharmony_ci#ifdef CONFIG_TRANSPARENT_HUGEPAGE
51462306a36Sopenharmony_ci
51562306a36Sopenharmony_ci/*
51662306a36Sopenharmony_ci * MMUv4 in HS38x cores supports Super Pages which are basis for Linux THP
51762306a36Sopenharmony_ci * support.
51862306a36Sopenharmony_ci *
51962306a36Sopenharmony_ci * Normal and Super pages can co-exist (ofcourse not overlap) in TLB with a
52062306a36Sopenharmony_ci * new bit "SZ" in TLB page descriptor to distinguish between them.
52162306a36Sopenharmony_ci * Super Page size is configurable in hardware (4K to 16M), but fixed once
52262306a36Sopenharmony_ci * RTL builds.
52362306a36Sopenharmony_ci *
52462306a36Sopenharmony_ci * The exact THP size a Linux configuration will support is a function of:
52562306a36Sopenharmony_ci *  - MMU page size (typical 8K, RTL fixed)
52662306a36Sopenharmony_ci *  - software page walker address split between PGD:PTE:PFN (typical
52762306a36Sopenharmony_ci *    11:8:13, but can be changed with 1 line)
52862306a36Sopenharmony_ci * So for above default, THP size supported is 8K * (2^8) = 2M
52962306a36Sopenharmony_ci *
53062306a36Sopenharmony_ci * Default Page Walker is 2 levels, PGD:PTE:PFN, which in THP regime
53162306a36Sopenharmony_ci * reduces to 1 level (as PTE is folded into PGD and canonically referred
53262306a36Sopenharmony_ci * to as PMD).
53362306a36Sopenharmony_ci * Thus THP PMD accessors are implemented in terms of PTE (just like sparc)
53462306a36Sopenharmony_ci */
53562306a36Sopenharmony_ci
53662306a36Sopenharmony_civoid update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
53762306a36Sopenharmony_ci				 pmd_t *pmd)
53862306a36Sopenharmony_ci{
53962306a36Sopenharmony_ci	pte_t pte = __pte(pmd_val(*pmd));
54062306a36Sopenharmony_ci	update_mmu_cache_range(NULL, vma, addr, &pte, HPAGE_PMD_NR);
54162306a36Sopenharmony_ci}
54262306a36Sopenharmony_ci
54362306a36Sopenharmony_civoid local_flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
54462306a36Sopenharmony_ci			       unsigned long end)
54562306a36Sopenharmony_ci{
54662306a36Sopenharmony_ci	unsigned int cpu;
54762306a36Sopenharmony_ci	unsigned long flags;
54862306a36Sopenharmony_ci
54962306a36Sopenharmony_ci	local_irq_save(flags);
55062306a36Sopenharmony_ci
55162306a36Sopenharmony_ci	cpu = smp_processor_id();
55262306a36Sopenharmony_ci
55362306a36Sopenharmony_ci	if (likely(asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID)) {
55462306a36Sopenharmony_ci		unsigned int asid = hw_pid(vma->vm_mm, cpu);
55562306a36Sopenharmony_ci
55662306a36Sopenharmony_ci		/* No need to loop here: this will always be for 1 Huge Page */
55762306a36Sopenharmony_ci		tlb_entry_erase(start | _PAGE_HW_SZ | asid);
55862306a36Sopenharmony_ci	}
55962306a36Sopenharmony_ci
56062306a36Sopenharmony_ci	local_irq_restore(flags);
56162306a36Sopenharmony_ci}
56262306a36Sopenharmony_ci
56362306a36Sopenharmony_ci#endif
56462306a36Sopenharmony_ci
56562306a36Sopenharmony_ci/* Read the Cache Build Configuration Registers, Decode them and save into
56662306a36Sopenharmony_ci * the cpuinfo structure for later use.
56762306a36Sopenharmony_ci * No Validation is done here, simply read/convert the BCRs
56862306a36Sopenharmony_ci */
56962306a36Sopenharmony_ciint arc_mmu_mumbojumbo(int c, char *buf, int len)
57062306a36Sopenharmony_ci{
57162306a36Sopenharmony_ci	struct cpuinfo_arc_mmu *mmu = &mmuinfo;
57262306a36Sopenharmony_ci	unsigned int bcr, u_dtlb, u_itlb, sasid;
57362306a36Sopenharmony_ci	struct bcr_mmu_3 *mmu3;
57462306a36Sopenharmony_ci	struct bcr_mmu_4 *mmu4;
57562306a36Sopenharmony_ci	char super_pg[64] = "";
57662306a36Sopenharmony_ci	int n = 0;
57762306a36Sopenharmony_ci
57862306a36Sopenharmony_ci	bcr = read_aux_reg(ARC_REG_MMU_BCR);
57962306a36Sopenharmony_ci	mmu->ver = (bcr >> 24);
58062306a36Sopenharmony_ci
58162306a36Sopenharmony_ci	if (is_isa_arcompact() && mmu->ver == 3) {
58262306a36Sopenharmony_ci		mmu3 = (struct bcr_mmu_3 *)&bcr;
58362306a36Sopenharmony_ci		mmu->pg_sz_k = 1 << (mmu3->pg_sz - 1);
58462306a36Sopenharmony_ci		mmu->sets = 1 << mmu3->sets;
58562306a36Sopenharmony_ci		mmu->ways = 1 << mmu3->ways;
58662306a36Sopenharmony_ci		u_dtlb = mmu3->u_dtlb;
58762306a36Sopenharmony_ci		u_itlb = mmu3->u_itlb;
58862306a36Sopenharmony_ci		sasid = mmu3->sasid;
58962306a36Sopenharmony_ci	} else {
59062306a36Sopenharmony_ci		mmu4 = (struct bcr_mmu_4 *)&bcr;
59162306a36Sopenharmony_ci		mmu->pg_sz_k = 1 << (mmu4->sz0 - 1);
59262306a36Sopenharmony_ci		mmu->s_pg_sz_m = 1 << (mmu4->sz1 - 11);
59362306a36Sopenharmony_ci		mmu->sets = 64 << mmu4->n_entry;
59462306a36Sopenharmony_ci		mmu->ways = mmu4->n_ways * 2;
59562306a36Sopenharmony_ci		u_dtlb = mmu4->u_dtlb * 4;
59662306a36Sopenharmony_ci		u_itlb = mmu4->u_itlb * 4;
59762306a36Sopenharmony_ci		sasid = mmu4->sasid;
59862306a36Sopenharmony_ci		mmu->pae = mmu4->pae;
59962306a36Sopenharmony_ci	}
60062306a36Sopenharmony_ci
60162306a36Sopenharmony_ci	if (mmu->s_pg_sz_m)
60262306a36Sopenharmony_ci		scnprintf(super_pg, 64, "/%dM%s",
60362306a36Sopenharmony_ci			  mmu->s_pg_sz_m,
60462306a36Sopenharmony_ci			  IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) ? " (THP enabled)":"");
60562306a36Sopenharmony_ci
60662306a36Sopenharmony_ci	n += scnprintf(buf + n, len - n,
60762306a36Sopenharmony_ci		      "MMU [v%x]\t: %dk%s, swalk %d lvl, JTLB %dx%d, uDTLB %d, uITLB %d%s%s%s\n",
60862306a36Sopenharmony_ci		       mmu->ver, mmu->pg_sz_k, super_pg, CONFIG_PGTABLE_LEVELS,
60962306a36Sopenharmony_ci		       mmu->sets, mmu->ways,
61062306a36Sopenharmony_ci		       u_dtlb, u_itlb,
61162306a36Sopenharmony_ci		       IS_AVAIL1(sasid, ", SASID"),
61262306a36Sopenharmony_ci		       IS_AVAIL2(mmu->pae, ", PAE40 ", CONFIG_ARC_HAS_PAE40));
61362306a36Sopenharmony_ci
61462306a36Sopenharmony_ci	return n;
61562306a36Sopenharmony_ci}
61662306a36Sopenharmony_ci
61762306a36Sopenharmony_ciint pae40_exist_but_not_enab(void)
61862306a36Sopenharmony_ci{
61962306a36Sopenharmony_ci	return mmuinfo.pae && !is_pae40_enabled();
62062306a36Sopenharmony_ci}
62162306a36Sopenharmony_ci
62262306a36Sopenharmony_civoid arc_mmu_init(void)
62362306a36Sopenharmony_ci{
62462306a36Sopenharmony_ci	struct cpuinfo_arc_mmu *mmu = &mmuinfo;
62562306a36Sopenharmony_ci	int compat = 0;
62662306a36Sopenharmony_ci
62762306a36Sopenharmony_ci	/*
62862306a36Sopenharmony_ci	 * Can't be done in processor.h due to header include dependencies
62962306a36Sopenharmony_ci	 */
63062306a36Sopenharmony_ci	BUILD_BUG_ON(!IS_ALIGNED((CONFIG_ARC_KVADDR_SIZE << 20), PMD_SIZE));
63162306a36Sopenharmony_ci
63262306a36Sopenharmony_ci	/*
63362306a36Sopenharmony_ci	 * stack top size sanity check,
63462306a36Sopenharmony_ci	 * Can't be done in processor.h due to header include dependencies
63562306a36Sopenharmony_ci	 */
63662306a36Sopenharmony_ci	BUILD_BUG_ON(!IS_ALIGNED(STACK_TOP, PMD_SIZE));
63762306a36Sopenharmony_ci
63862306a36Sopenharmony_ci	/*
63962306a36Sopenharmony_ci	 * Ensure that MMU features assumed by kernel exist in hardware.
64062306a36Sopenharmony_ci	 *  - For older ARC700 cpus, only v3 supported
64162306a36Sopenharmony_ci	 *  - For HS cpus, v4 was baseline and v5 is backwards compatible
64262306a36Sopenharmony_ci	 *    (will run older software).
64362306a36Sopenharmony_ci	 */
64462306a36Sopenharmony_ci	if (is_isa_arcompact() && mmu->ver == 3)
64562306a36Sopenharmony_ci		compat = 1;
64662306a36Sopenharmony_ci	else if (is_isa_arcv2() && mmu->ver >= 4)
64762306a36Sopenharmony_ci		compat = 1;
64862306a36Sopenharmony_ci
64962306a36Sopenharmony_ci	if (!compat)
65062306a36Sopenharmony_ci		panic("MMU ver %d doesn't match kernel built for\n", mmu->ver);
65162306a36Sopenharmony_ci
65262306a36Sopenharmony_ci	if (mmu->pg_sz_k != TO_KB(PAGE_SIZE))
65362306a36Sopenharmony_ci		panic("MMU pg size != PAGE_SIZE (%luk)\n", TO_KB(PAGE_SIZE));
65462306a36Sopenharmony_ci
65562306a36Sopenharmony_ci	if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
65662306a36Sopenharmony_ci	    mmu->s_pg_sz_m != TO_MB(HPAGE_PMD_SIZE))
65762306a36Sopenharmony_ci		panic("MMU Super pg size != Linux HPAGE_PMD_SIZE (%luM)\n",
65862306a36Sopenharmony_ci		      (unsigned long)TO_MB(HPAGE_PMD_SIZE));
65962306a36Sopenharmony_ci
66062306a36Sopenharmony_ci	if (IS_ENABLED(CONFIG_ARC_HAS_PAE40) && !mmu->pae)
66162306a36Sopenharmony_ci		panic("Hardware doesn't support PAE40\n");
66262306a36Sopenharmony_ci
66362306a36Sopenharmony_ci	/* Enable the MMU with ASID 0 */
66462306a36Sopenharmony_ci	mmu_setup_asid(NULL, 0);
66562306a36Sopenharmony_ci
66662306a36Sopenharmony_ci	/* cache the pgd pointer in MMU SCRATCH reg (ARCv2 only) */
66762306a36Sopenharmony_ci	mmu_setup_pgd(NULL, swapper_pg_dir);
66862306a36Sopenharmony_ci
66962306a36Sopenharmony_ci	if (pae40_exist_but_not_enab())
67062306a36Sopenharmony_ci		write_aux_reg(ARC_REG_TLBPD1HI, 0);
67162306a36Sopenharmony_ci}
67262306a36Sopenharmony_ci
67362306a36Sopenharmony_ci/*
67462306a36Sopenharmony_ci * TLB Programmer's Model uses Linear Indexes: 0 to {255, 511} for 128 x {2,4}
67562306a36Sopenharmony_ci * The mapping is Column-first.
67662306a36Sopenharmony_ci *		---------------------	-----------
67762306a36Sopenharmony_ci *		|way0|way1|way2|way3|	|way0|way1|
67862306a36Sopenharmony_ci *		---------------------	-----------
67962306a36Sopenharmony_ci * [set0]	|  0 |  1 |  2 |  3 |	|  0 |  1 |
68062306a36Sopenharmony_ci * [set1]	|  4 |  5 |  6 |  7 |	|  2 |  3 |
68162306a36Sopenharmony_ci *		~		    ~	~	  ~
68262306a36Sopenharmony_ci * [set127]	| 508| 509| 510| 511|	| 254| 255|
68362306a36Sopenharmony_ci *		---------------------	-----------
68462306a36Sopenharmony_ci * For normal operations we don't(must not) care how above works since
68562306a36Sopenharmony_ci * MMU cmd getIndex(vaddr) abstracts that out.
68662306a36Sopenharmony_ci * However for walking WAYS of a SET, we need to know this
68762306a36Sopenharmony_ci */
68862306a36Sopenharmony_ci#define SET_WAY_TO_IDX(mmu, set, way)  ((set) * mmu->ways + (way))
68962306a36Sopenharmony_ci
69062306a36Sopenharmony_ci/* Handling of Duplicate PD (TLB entry) in MMU.
69162306a36Sopenharmony_ci * -Could be due to buggy customer tapeouts or obscure kernel bugs
69262306a36Sopenharmony_ci * -MMU complaints not at the time of duplicate PD installation, but at the
69362306a36Sopenharmony_ci *      time of lookup matching multiple ways.
69462306a36Sopenharmony_ci * -Ideally these should never happen - but if they do - workaround by deleting
69562306a36Sopenharmony_ci *      the duplicate one.
69662306a36Sopenharmony_ci * -Knob to be verbose abt it.(TODO: hook them up to debugfs)
69762306a36Sopenharmony_ci */
69862306a36Sopenharmony_civolatile int dup_pd_silent; /* Be silent abt it or complain (default) */
69962306a36Sopenharmony_ci
70062306a36Sopenharmony_civoid do_tlb_overlap_fault(unsigned long cause, unsigned long address,
70162306a36Sopenharmony_ci			  struct pt_regs *regs)
70262306a36Sopenharmony_ci{
70362306a36Sopenharmony_ci	struct cpuinfo_arc_mmu *mmu = &mmuinfo;
70462306a36Sopenharmony_ci	unsigned long flags;
70562306a36Sopenharmony_ci	int set, n_ways = mmu->ways;
70662306a36Sopenharmony_ci
70762306a36Sopenharmony_ci	n_ways = min(n_ways, 4);
70862306a36Sopenharmony_ci	BUG_ON(mmu->ways > 4);
70962306a36Sopenharmony_ci
71062306a36Sopenharmony_ci	local_irq_save(flags);
71162306a36Sopenharmony_ci
71262306a36Sopenharmony_ci	/* loop thru all sets of TLB */
71362306a36Sopenharmony_ci	for (set = 0; set < mmu->sets; set++) {
71462306a36Sopenharmony_ci
71562306a36Sopenharmony_ci		int is_valid, way;
71662306a36Sopenharmony_ci		unsigned int pd0[4];
71762306a36Sopenharmony_ci
71862306a36Sopenharmony_ci		/* read out all the ways of current set */
71962306a36Sopenharmony_ci		for (way = 0, is_valid = 0; way < n_ways; way++) {
72062306a36Sopenharmony_ci			write_aux_reg(ARC_REG_TLBINDEX,
72162306a36Sopenharmony_ci					  SET_WAY_TO_IDX(mmu, set, way));
72262306a36Sopenharmony_ci			write_aux_reg(ARC_REG_TLBCOMMAND, TLBRead);
72362306a36Sopenharmony_ci			pd0[way] = read_aux_reg(ARC_REG_TLBPD0);
72462306a36Sopenharmony_ci			is_valid |= pd0[way] & _PAGE_PRESENT;
72562306a36Sopenharmony_ci			pd0[way] &= PAGE_MASK;
72662306a36Sopenharmony_ci		}
72762306a36Sopenharmony_ci
72862306a36Sopenharmony_ci		/* If all the WAYS in SET are empty, skip to next SET */
72962306a36Sopenharmony_ci		if (!is_valid)
73062306a36Sopenharmony_ci			continue;
73162306a36Sopenharmony_ci
73262306a36Sopenharmony_ci		/* Scan the set for duplicate ways: needs a nested loop */
73362306a36Sopenharmony_ci		for (way = 0; way < n_ways - 1; way++) {
73462306a36Sopenharmony_ci
73562306a36Sopenharmony_ci			int n;
73662306a36Sopenharmony_ci
73762306a36Sopenharmony_ci			if (!pd0[way])
73862306a36Sopenharmony_ci				continue;
73962306a36Sopenharmony_ci
74062306a36Sopenharmony_ci			for (n = way + 1; n < n_ways; n++) {
74162306a36Sopenharmony_ci				if (pd0[way] != pd0[n])
74262306a36Sopenharmony_ci					continue;
74362306a36Sopenharmony_ci
74462306a36Sopenharmony_ci				if (!dup_pd_silent)
74562306a36Sopenharmony_ci					pr_info("Dup TLB PD0 %08x @ set %d ways %d,%d\n",
74662306a36Sopenharmony_ci						pd0[way], set, way, n);
74762306a36Sopenharmony_ci
74862306a36Sopenharmony_ci				/*
74962306a36Sopenharmony_ci				 * clear entry @way and not @n.
75062306a36Sopenharmony_ci				 * This is critical to our optimised loop
75162306a36Sopenharmony_ci				 */
75262306a36Sopenharmony_ci				pd0[way] = 0;
75362306a36Sopenharmony_ci				write_aux_reg(ARC_REG_TLBINDEX,
75462306a36Sopenharmony_ci						SET_WAY_TO_IDX(mmu, set, way));
75562306a36Sopenharmony_ci				__tlb_entry_erase();
75662306a36Sopenharmony_ci			}
75762306a36Sopenharmony_ci		}
75862306a36Sopenharmony_ci	}
75962306a36Sopenharmony_ci
76062306a36Sopenharmony_ci	local_irq_restore(flags);
76162306a36Sopenharmony_ci}
762