162306a36Sopenharmony_ci/* SPDX-License-Identifier: GPL-2.0-only */
262306a36Sopenharmony_ci/*
362306a36Sopenharmony_ci * Based on arch/arm/include/asm/tlbflush.h
462306a36Sopenharmony_ci *
562306a36Sopenharmony_ci * Copyright (C) 1999-2003 Russell King
662306a36Sopenharmony_ci * Copyright (C) 2012 ARM Ltd.
762306a36Sopenharmony_ci */
862306a36Sopenharmony_ci#ifndef __ASM_TLBFLUSH_H
962306a36Sopenharmony_ci#define __ASM_TLBFLUSH_H
1062306a36Sopenharmony_ci
1162306a36Sopenharmony_ci#ifndef __ASSEMBLY__
1262306a36Sopenharmony_ci
1362306a36Sopenharmony_ci#include <linux/bitfield.h>
1462306a36Sopenharmony_ci#include <linux/mm_types.h>
1562306a36Sopenharmony_ci#include <linux/sched.h>
1662306a36Sopenharmony_ci#include <linux/mmu_notifier.h>
1762306a36Sopenharmony_ci#include <asm/cputype.h>
1862306a36Sopenharmony_ci#include <asm/mmu.h>
1962306a36Sopenharmony_ci
2062306a36Sopenharmony_ci/*
2162306a36Sopenharmony_ci * Raw TLBI operations.
2262306a36Sopenharmony_ci *
2362306a36Sopenharmony_ci * Where necessary, use the __tlbi() macro to avoid asm()
2462306a36Sopenharmony_ci * boilerplate. Drivers and most kernel code should use the TLB
2562306a36Sopenharmony_ci * management routines in preference to the macro below.
2662306a36Sopenharmony_ci *
2762306a36Sopenharmony_ci * The macro can be used as __tlbi(op) or __tlbi(op, arg), depending
2862306a36Sopenharmony_ci * on whether a particular TLBI operation takes an argument or
2962306a36Sopenharmony_ci * not. The macros handles invoking the asm with or without the
3062306a36Sopenharmony_ci * register argument as appropriate.
3162306a36Sopenharmony_ci */
3262306a36Sopenharmony_ci#define __TLBI_0(op, arg) asm (ARM64_ASM_PREAMBLE			       \
3362306a36Sopenharmony_ci			       "tlbi " #op "\n"				       \
3462306a36Sopenharmony_ci		   ALTERNATIVE("nop\n			nop",		       \
3562306a36Sopenharmony_ci			       "dsb ish\n		tlbi " #op,	       \
3662306a36Sopenharmony_ci			       ARM64_WORKAROUND_REPEAT_TLBI,		       \
3762306a36Sopenharmony_ci			       CONFIG_ARM64_WORKAROUND_REPEAT_TLBI)	       \
3862306a36Sopenharmony_ci			    : : )
3962306a36Sopenharmony_ci
4062306a36Sopenharmony_ci#define __TLBI_1(op, arg) asm (ARM64_ASM_PREAMBLE			       \
4162306a36Sopenharmony_ci			       "tlbi " #op ", %0\n"			       \
4262306a36Sopenharmony_ci		   ALTERNATIVE("nop\n			nop",		       \
4362306a36Sopenharmony_ci			       "dsb ish\n		tlbi " #op ", %0",     \
4462306a36Sopenharmony_ci			       ARM64_WORKAROUND_REPEAT_TLBI,		       \
4562306a36Sopenharmony_ci			       CONFIG_ARM64_WORKAROUND_REPEAT_TLBI)	       \
4662306a36Sopenharmony_ci			    : : "r" (arg))
4762306a36Sopenharmony_ci
4862306a36Sopenharmony_ci#define __TLBI_N(op, arg, n, ...) __TLBI_##n(op, arg)
4962306a36Sopenharmony_ci
5062306a36Sopenharmony_ci#define __tlbi(op, ...)		__TLBI_N(op, ##__VA_ARGS__, 1, 0)
5162306a36Sopenharmony_ci
5262306a36Sopenharmony_ci#define __tlbi_user(op, arg) do {						\
5362306a36Sopenharmony_ci	if (arm64_kernel_unmapped_at_el0())					\
5462306a36Sopenharmony_ci		__tlbi(op, (arg) | USER_ASID_FLAG);				\
5562306a36Sopenharmony_ci} while (0)
5662306a36Sopenharmony_ci
5762306a36Sopenharmony_ci/* This macro creates a properly formatted VA operand for the TLBI */
5862306a36Sopenharmony_ci#define __TLBI_VADDR(addr, asid)				\
5962306a36Sopenharmony_ci	({							\
6062306a36Sopenharmony_ci		unsigned long __ta = (addr) >> 12;		\
6162306a36Sopenharmony_ci		__ta &= GENMASK_ULL(43, 0);			\
6262306a36Sopenharmony_ci		__ta |= (unsigned long)(asid) << 48;		\
6362306a36Sopenharmony_ci		__ta;						\
6462306a36Sopenharmony_ci	})
6562306a36Sopenharmony_ci
6662306a36Sopenharmony_ci/*
6762306a36Sopenharmony_ci * Get translation granule of the system, which is decided by
6862306a36Sopenharmony_ci * PAGE_SIZE.  Used by TTL.
6962306a36Sopenharmony_ci *  - 4KB	: 1
7062306a36Sopenharmony_ci *  - 16KB	: 2
7162306a36Sopenharmony_ci *  - 64KB	: 3
7262306a36Sopenharmony_ci */
7362306a36Sopenharmony_ci#define TLBI_TTL_TG_4K		1
7462306a36Sopenharmony_ci#define TLBI_TTL_TG_16K		2
7562306a36Sopenharmony_ci#define TLBI_TTL_TG_64K		3
7662306a36Sopenharmony_ci
7762306a36Sopenharmony_cistatic inline unsigned long get_trans_granule(void)
7862306a36Sopenharmony_ci{
7962306a36Sopenharmony_ci	switch (PAGE_SIZE) {
8062306a36Sopenharmony_ci	case SZ_4K:
8162306a36Sopenharmony_ci		return TLBI_TTL_TG_4K;
8262306a36Sopenharmony_ci	case SZ_16K:
8362306a36Sopenharmony_ci		return TLBI_TTL_TG_16K;
8462306a36Sopenharmony_ci	case SZ_64K:
8562306a36Sopenharmony_ci		return TLBI_TTL_TG_64K;
8662306a36Sopenharmony_ci	default:
8762306a36Sopenharmony_ci		return 0;
8862306a36Sopenharmony_ci	}
8962306a36Sopenharmony_ci}
9062306a36Sopenharmony_ci
9162306a36Sopenharmony_ci/*
9262306a36Sopenharmony_ci * Level-based TLBI operations.
9362306a36Sopenharmony_ci *
9462306a36Sopenharmony_ci * When ARMv8.4-TTL exists, TLBI operations take an additional hint for
9562306a36Sopenharmony_ci * the level at which the invalidation must take place. If the level is
9662306a36Sopenharmony_ci * wrong, no invalidation may take place. In the case where the level
9762306a36Sopenharmony_ci * cannot be easily determined, a 0 value for the level parameter will
9862306a36Sopenharmony_ci * perform a non-hinted invalidation.
9962306a36Sopenharmony_ci *
10062306a36Sopenharmony_ci * For Stage-2 invalidation, use the level values provided to that effect
10162306a36Sopenharmony_ci * in asm/stage2_pgtable.h.
10262306a36Sopenharmony_ci */
10362306a36Sopenharmony_ci#define TLBI_TTL_MASK		GENMASK_ULL(47, 44)
10462306a36Sopenharmony_ci
10562306a36Sopenharmony_ci#define __tlbi_level(op, addr, level) do {				\
10662306a36Sopenharmony_ci	u64 arg = addr;							\
10762306a36Sopenharmony_ci									\
10862306a36Sopenharmony_ci	if (cpus_have_const_cap(ARM64_HAS_ARMv8_4_TTL) &&		\
10962306a36Sopenharmony_ci	    level) {							\
11062306a36Sopenharmony_ci		u64 ttl = level & 3;					\
11162306a36Sopenharmony_ci		ttl |= get_trans_granule() << 2;			\
11262306a36Sopenharmony_ci		arg &= ~TLBI_TTL_MASK;					\
11362306a36Sopenharmony_ci		arg |= FIELD_PREP(TLBI_TTL_MASK, ttl);			\
11462306a36Sopenharmony_ci	}								\
11562306a36Sopenharmony_ci									\
11662306a36Sopenharmony_ci	__tlbi(op, arg);						\
11762306a36Sopenharmony_ci} while(0)
11862306a36Sopenharmony_ci
11962306a36Sopenharmony_ci#define __tlbi_user_level(op, arg, level) do {				\
12062306a36Sopenharmony_ci	if (arm64_kernel_unmapped_at_el0())				\
12162306a36Sopenharmony_ci		__tlbi_level(op, (arg | USER_ASID_FLAG), level);	\
12262306a36Sopenharmony_ci} while (0)
12362306a36Sopenharmony_ci
12462306a36Sopenharmony_ci/*
12562306a36Sopenharmony_ci * This macro creates a properly formatted VA operand for the TLB RANGE.
12662306a36Sopenharmony_ci * The value bit assignments are:
12762306a36Sopenharmony_ci *
12862306a36Sopenharmony_ci * +----------+------+-------+-------+-------+----------------------+
12962306a36Sopenharmony_ci * |   ASID   |  TG  | SCALE |  NUM  |  TTL  |        BADDR         |
13062306a36Sopenharmony_ci * +-----------------+-------+-------+-------+----------------------+
13162306a36Sopenharmony_ci * |63      48|47  46|45   44|43   39|38   37|36                   0|
13262306a36Sopenharmony_ci *
13362306a36Sopenharmony_ci * The address range is determined by below formula:
13462306a36Sopenharmony_ci * [BADDR, BADDR + (NUM + 1) * 2^(5*SCALE + 1) * PAGESIZE)
13562306a36Sopenharmony_ci *
13662306a36Sopenharmony_ci */
13762306a36Sopenharmony_ci#define __TLBI_VADDR_RANGE(addr, asid, scale, num, ttl)		\
13862306a36Sopenharmony_ci	({							\
13962306a36Sopenharmony_ci		unsigned long __ta = (addr) >> PAGE_SHIFT;	\
14062306a36Sopenharmony_ci		__ta &= GENMASK_ULL(36, 0);			\
14162306a36Sopenharmony_ci		__ta |= (unsigned long)(ttl) << 37;		\
14262306a36Sopenharmony_ci		__ta |= (unsigned long)(num) << 39;		\
14362306a36Sopenharmony_ci		__ta |= (unsigned long)(scale) << 44;		\
14462306a36Sopenharmony_ci		__ta |= get_trans_granule() << 46;		\
14562306a36Sopenharmony_ci		__ta |= (unsigned long)(asid) << 48;		\
14662306a36Sopenharmony_ci		__ta;						\
14762306a36Sopenharmony_ci	})
14862306a36Sopenharmony_ci
14962306a36Sopenharmony_ci/* These macros are used by the TLBI RANGE feature. */
15062306a36Sopenharmony_ci#define __TLBI_RANGE_PAGES(num, scale)	\
15162306a36Sopenharmony_ci	((unsigned long)((num) + 1) << (5 * (scale) + 1))
15262306a36Sopenharmony_ci#define MAX_TLBI_RANGE_PAGES		__TLBI_RANGE_PAGES(31, 3)
15362306a36Sopenharmony_ci
15462306a36Sopenharmony_ci/*
15562306a36Sopenharmony_ci * Generate 'num' values from -1 to 30 with -1 rejected by the
15662306a36Sopenharmony_ci * __flush_tlb_range() loop below.
15762306a36Sopenharmony_ci */
15862306a36Sopenharmony_ci#define TLBI_RANGE_MASK			GENMASK_ULL(4, 0)
15962306a36Sopenharmony_ci#define __TLBI_RANGE_NUM(pages, scale)	\
16062306a36Sopenharmony_ci	((((pages) >> (5 * (scale) + 1)) & TLBI_RANGE_MASK) - 1)
16162306a36Sopenharmony_ci
16262306a36Sopenharmony_ci/*
16362306a36Sopenharmony_ci *	TLB Invalidation
16462306a36Sopenharmony_ci *	================
16562306a36Sopenharmony_ci *
16662306a36Sopenharmony_ci * 	This header file implements the low-level TLB invalidation routines
16762306a36Sopenharmony_ci *	(sometimes referred to as "flushing" in the kernel) for arm64.
16862306a36Sopenharmony_ci *
16962306a36Sopenharmony_ci *	Every invalidation operation uses the following template:
17062306a36Sopenharmony_ci *
17162306a36Sopenharmony_ci *	DSB ISHST	// Ensure prior page-table updates have completed
17262306a36Sopenharmony_ci *	TLBI ...	// Invalidate the TLB
17362306a36Sopenharmony_ci *	DSB ISH		// Ensure the TLB invalidation has completed
17462306a36Sopenharmony_ci *      if (invalidated kernel mappings)
17562306a36Sopenharmony_ci *		ISB	// Discard any instructions fetched from the old mapping
17662306a36Sopenharmony_ci *
17762306a36Sopenharmony_ci *
17862306a36Sopenharmony_ci *	The following functions form part of the "core" TLB invalidation API,
17962306a36Sopenharmony_ci *	as documented in Documentation/core-api/cachetlb.rst:
18062306a36Sopenharmony_ci *
18162306a36Sopenharmony_ci *	flush_tlb_all()
18262306a36Sopenharmony_ci *		Invalidate the entire TLB (kernel + user) on all CPUs
18362306a36Sopenharmony_ci *
18462306a36Sopenharmony_ci *	flush_tlb_mm(mm)
18562306a36Sopenharmony_ci *		Invalidate an entire user address space on all CPUs.
18662306a36Sopenharmony_ci *		The 'mm' argument identifies the ASID to invalidate.
18762306a36Sopenharmony_ci *
18862306a36Sopenharmony_ci *	flush_tlb_range(vma, start, end)
18962306a36Sopenharmony_ci *		Invalidate the virtual-address range '[start, end)' on all
19062306a36Sopenharmony_ci *		CPUs for the user address space corresponding to 'vma->mm'.
19162306a36Sopenharmony_ci *		Note that this operation also invalidates any walk-cache
19262306a36Sopenharmony_ci *		entries associated with translations for the specified address
19362306a36Sopenharmony_ci *		range.
19462306a36Sopenharmony_ci *
19562306a36Sopenharmony_ci *	flush_tlb_kernel_range(start, end)
19662306a36Sopenharmony_ci *		Same as flush_tlb_range(..., start, end), but applies to
19762306a36Sopenharmony_ci * 		kernel mappings rather than a particular user address space.
19862306a36Sopenharmony_ci *		Whilst not explicitly documented, this function is used when
19962306a36Sopenharmony_ci *		unmapping pages from vmalloc/io space.
20062306a36Sopenharmony_ci *
20162306a36Sopenharmony_ci *	flush_tlb_page(vma, addr)
20262306a36Sopenharmony_ci *		Invalidate a single user mapping for address 'addr' in the
20362306a36Sopenharmony_ci *		address space corresponding to 'vma->mm'.  Note that this
20462306a36Sopenharmony_ci *		operation only invalidates a single, last-level page-table
20562306a36Sopenharmony_ci *		entry and therefore does not affect any walk-caches.
20662306a36Sopenharmony_ci *
20762306a36Sopenharmony_ci *
20862306a36Sopenharmony_ci *	Next, we have some undocumented invalidation routines that you probably
20962306a36Sopenharmony_ci *	don't want to call unless you know what you're doing:
21062306a36Sopenharmony_ci *
21162306a36Sopenharmony_ci *	local_flush_tlb_all()
21262306a36Sopenharmony_ci *		Same as flush_tlb_all(), but only applies to the calling CPU.
21362306a36Sopenharmony_ci *
21462306a36Sopenharmony_ci *	__flush_tlb_kernel_pgtable(addr)
21562306a36Sopenharmony_ci *		Invalidate a single kernel mapping for address 'addr' on all
21662306a36Sopenharmony_ci *		CPUs, ensuring that any walk-cache entries associated with the
21762306a36Sopenharmony_ci *		translation are also invalidated.
21862306a36Sopenharmony_ci *
21962306a36Sopenharmony_ci *	__flush_tlb_range(vma, start, end, stride, last_level)
22062306a36Sopenharmony_ci *		Invalidate the virtual-address range '[start, end)' on all
22162306a36Sopenharmony_ci *		CPUs for the user address space corresponding to 'vma->mm'.
22262306a36Sopenharmony_ci *		The invalidation operations are issued at a granularity
22362306a36Sopenharmony_ci *		determined by 'stride' and only affect any walk-cache entries
22462306a36Sopenharmony_ci *		if 'last_level' is equal to false.
22562306a36Sopenharmony_ci *
22662306a36Sopenharmony_ci *
22762306a36Sopenharmony_ci *	Finally, take a look at asm/tlb.h to see how tlb_flush() is implemented
22862306a36Sopenharmony_ci *	on top of these routines, since that is our interface to the mmu_gather
22962306a36Sopenharmony_ci *	API as used by munmap() and friends.
23062306a36Sopenharmony_ci */
23162306a36Sopenharmony_cistatic inline void local_flush_tlb_all(void)
23262306a36Sopenharmony_ci{
23362306a36Sopenharmony_ci	dsb(nshst);
23462306a36Sopenharmony_ci	__tlbi(vmalle1);
23562306a36Sopenharmony_ci	dsb(nsh);
23662306a36Sopenharmony_ci	isb();
23762306a36Sopenharmony_ci}
23862306a36Sopenharmony_ci
23962306a36Sopenharmony_cistatic inline void flush_tlb_all(void)
24062306a36Sopenharmony_ci{
24162306a36Sopenharmony_ci	dsb(ishst);
24262306a36Sopenharmony_ci	__tlbi(vmalle1is);
24362306a36Sopenharmony_ci	dsb(ish);
24462306a36Sopenharmony_ci	isb();
24562306a36Sopenharmony_ci}
24662306a36Sopenharmony_ci
24762306a36Sopenharmony_cistatic inline void flush_tlb_mm(struct mm_struct *mm)
24862306a36Sopenharmony_ci{
24962306a36Sopenharmony_ci	unsigned long asid;
25062306a36Sopenharmony_ci
25162306a36Sopenharmony_ci	dsb(ishst);
25262306a36Sopenharmony_ci	asid = __TLBI_VADDR(0, ASID(mm));
25362306a36Sopenharmony_ci	__tlbi(aside1is, asid);
25462306a36Sopenharmony_ci	__tlbi_user(aside1is, asid);
25562306a36Sopenharmony_ci	dsb(ish);
25662306a36Sopenharmony_ci	mmu_notifier_arch_invalidate_secondary_tlbs(mm, 0, -1UL);
25762306a36Sopenharmony_ci}
25862306a36Sopenharmony_ci
25962306a36Sopenharmony_cistatic inline void __flush_tlb_page_nosync(struct mm_struct *mm,
26062306a36Sopenharmony_ci					   unsigned long uaddr)
26162306a36Sopenharmony_ci{
26262306a36Sopenharmony_ci	unsigned long addr;
26362306a36Sopenharmony_ci
26462306a36Sopenharmony_ci	dsb(ishst);
26562306a36Sopenharmony_ci	addr = __TLBI_VADDR(uaddr, ASID(mm));
26662306a36Sopenharmony_ci	__tlbi(vale1is, addr);
26762306a36Sopenharmony_ci	__tlbi_user(vale1is, addr);
26862306a36Sopenharmony_ci	mmu_notifier_arch_invalidate_secondary_tlbs(mm, uaddr & PAGE_MASK,
26962306a36Sopenharmony_ci						(uaddr & PAGE_MASK) + PAGE_SIZE);
27062306a36Sopenharmony_ci}
27162306a36Sopenharmony_ci
27262306a36Sopenharmony_cistatic inline void flush_tlb_page_nosync(struct vm_area_struct *vma,
27362306a36Sopenharmony_ci					 unsigned long uaddr)
27462306a36Sopenharmony_ci{
27562306a36Sopenharmony_ci	return __flush_tlb_page_nosync(vma->vm_mm, uaddr);
27662306a36Sopenharmony_ci}
27762306a36Sopenharmony_ci
27862306a36Sopenharmony_cistatic inline void flush_tlb_page(struct vm_area_struct *vma,
27962306a36Sopenharmony_ci				  unsigned long uaddr)
28062306a36Sopenharmony_ci{
28162306a36Sopenharmony_ci	flush_tlb_page_nosync(vma, uaddr);
28262306a36Sopenharmony_ci	dsb(ish);
28362306a36Sopenharmony_ci}
28462306a36Sopenharmony_ci
28562306a36Sopenharmony_cistatic inline bool arch_tlbbatch_should_defer(struct mm_struct *mm)
28662306a36Sopenharmony_ci{
28762306a36Sopenharmony_ci#ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
28862306a36Sopenharmony_ci	/*
28962306a36Sopenharmony_ci	 * TLB flush deferral is not required on systems which are affected by
29062306a36Sopenharmony_ci	 * ARM64_WORKAROUND_REPEAT_TLBI, as __tlbi()/__tlbi_user() implementation
29162306a36Sopenharmony_ci	 * will have two consecutive TLBI instructions with a dsb(ish) in between
29262306a36Sopenharmony_ci	 * defeating the purpose (i.e save overall 'dsb ish' cost).
29362306a36Sopenharmony_ci	 */
29462306a36Sopenharmony_ci	if (unlikely(cpus_have_const_cap(ARM64_WORKAROUND_REPEAT_TLBI)))
29562306a36Sopenharmony_ci		return false;
29662306a36Sopenharmony_ci#endif
29762306a36Sopenharmony_ci	return true;
29862306a36Sopenharmony_ci}
29962306a36Sopenharmony_ci
30062306a36Sopenharmony_cistatic inline void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
30162306a36Sopenharmony_ci					     struct mm_struct *mm,
30262306a36Sopenharmony_ci					     unsigned long uaddr)
30362306a36Sopenharmony_ci{
30462306a36Sopenharmony_ci	__flush_tlb_page_nosync(mm, uaddr);
30562306a36Sopenharmony_ci}
30662306a36Sopenharmony_ci
30762306a36Sopenharmony_ci/*
30862306a36Sopenharmony_ci * If mprotect/munmap/etc occurs during TLB batched flushing, we need to
30962306a36Sopenharmony_ci * synchronise all the TLBI issued with a DSB to avoid the race mentioned in
31062306a36Sopenharmony_ci * flush_tlb_batched_pending().
31162306a36Sopenharmony_ci */
31262306a36Sopenharmony_cistatic inline void arch_flush_tlb_batched_pending(struct mm_struct *mm)
31362306a36Sopenharmony_ci{
31462306a36Sopenharmony_ci	dsb(ish);
31562306a36Sopenharmony_ci}
31662306a36Sopenharmony_ci
31762306a36Sopenharmony_ci/*
31862306a36Sopenharmony_ci * To support TLB batched flush for multiple pages unmapping, we only send
31962306a36Sopenharmony_ci * the TLBI for each page in arch_tlbbatch_add_pending() and wait for the
32062306a36Sopenharmony_ci * completion at the end in arch_tlbbatch_flush(). Since we've already issued
32162306a36Sopenharmony_ci * TLBI for each page so only a DSB is needed to synchronise its effect on the
32262306a36Sopenharmony_ci * other CPUs.
32362306a36Sopenharmony_ci *
32462306a36Sopenharmony_ci * This will save the time waiting on DSB comparing issuing a TLBI;DSB sequence
32562306a36Sopenharmony_ci * for each page.
32662306a36Sopenharmony_ci */
32762306a36Sopenharmony_cistatic inline void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
32862306a36Sopenharmony_ci{
32962306a36Sopenharmony_ci	dsb(ish);
33062306a36Sopenharmony_ci}
33162306a36Sopenharmony_ci
33262306a36Sopenharmony_ci/*
33362306a36Sopenharmony_ci * This is meant to avoid soft lock-ups on large TLB flushing ranges and not
33462306a36Sopenharmony_ci * necessarily a performance improvement.
33562306a36Sopenharmony_ci */
33662306a36Sopenharmony_ci#define MAX_TLBI_OPS	PTRS_PER_PTE
33762306a36Sopenharmony_ci
33862306a36Sopenharmony_ci/*
33962306a36Sopenharmony_ci * __flush_tlb_range_op - Perform TLBI operation upon a range
34062306a36Sopenharmony_ci *
34162306a36Sopenharmony_ci * @op:	TLBI instruction that operates on a range (has 'r' prefix)
34262306a36Sopenharmony_ci * @start:	The start address of the range
34362306a36Sopenharmony_ci * @pages:	Range as the number of pages from 'start'
34462306a36Sopenharmony_ci * @stride:	Flush granularity
34562306a36Sopenharmony_ci * @asid:	The ASID of the task (0 for IPA instructions)
34662306a36Sopenharmony_ci * @tlb_level:	Translation Table level hint, if known
34762306a36Sopenharmony_ci * @tlbi_user:	If 'true', call an additional __tlbi_user()
34862306a36Sopenharmony_ci *              (typically for user ASIDs). 'flase' for IPA instructions
34962306a36Sopenharmony_ci *
35062306a36Sopenharmony_ci * When the CPU does not support TLB range operations, flush the TLB
35162306a36Sopenharmony_ci * entries one by one at the granularity of 'stride'. If the TLB
35262306a36Sopenharmony_ci * range ops are supported, then:
35362306a36Sopenharmony_ci *
35462306a36Sopenharmony_ci * 1. If 'pages' is odd, flush the first page through non-range
35562306a36Sopenharmony_ci *    operations;
35662306a36Sopenharmony_ci *
35762306a36Sopenharmony_ci * 2. For remaining pages: the minimum range granularity is decided
35862306a36Sopenharmony_ci *    by 'scale', so multiple range TLBI operations may be required.
35962306a36Sopenharmony_ci *    Start from scale = 0, flush the corresponding number of pages
36062306a36Sopenharmony_ci *    ((num+1)*2^(5*scale+1) starting from 'addr'), then increase it
36162306a36Sopenharmony_ci *    until no pages left.
36262306a36Sopenharmony_ci *
36362306a36Sopenharmony_ci * Note that certain ranges can be represented by either num = 31 and
36462306a36Sopenharmony_ci * scale or num = 0 and scale + 1. The loop below favours the latter
36562306a36Sopenharmony_ci * since num is limited to 30 by the __TLBI_RANGE_NUM() macro.
36662306a36Sopenharmony_ci */
36762306a36Sopenharmony_ci#define __flush_tlb_range_op(op, start, pages, stride,			\
36862306a36Sopenharmony_ci				asid, tlb_level, tlbi_user)		\
36962306a36Sopenharmony_cido {									\
37062306a36Sopenharmony_ci	int num = 0;							\
37162306a36Sopenharmony_ci	int scale = 0;							\
37262306a36Sopenharmony_ci	unsigned long addr;						\
37362306a36Sopenharmony_ci									\
37462306a36Sopenharmony_ci	while (pages > 0) {						\
37562306a36Sopenharmony_ci		if (!system_supports_tlb_range() ||			\
37662306a36Sopenharmony_ci		    pages % 2 == 1) {					\
37762306a36Sopenharmony_ci			addr = __TLBI_VADDR(start, asid);		\
37862306a36Sopenharmony_ci			__tlbi_level(op, addr, tlb_level);		\
37962306a36Sopenharmony_ci			if (tlbi_user)					\
38062306a36Sopenharmony_ci				__tlbi_user_level(op, addr, tlb_level);	\
38162306a36Sopenharmony_ci			start += stride;				\
38262306a36Sopenharmony_ci			pages -= stride >> PAGE_SHIFT;			\
38362306a36Sopenharmony_ci			continue;					\
38462306a36Sopenharmony_ci		}							\
38562306a36Sopenharmony_ci									\
38662306a36Sopenharmony_ci		num = __TLBI_RANGE_NUM(pages, scale);			\
38762306a36Sopenharmony_ci		if (num >= 0) {						\
38862306a36Sopenharmony_ci			addr = __TLBI_VADDR_RANGE(start, asid, scale,	\
38962306a36Sopenharmony_ci						  num, tlb_level);	\
39062306a36Sopenharmony_ci			__tlbi(r##op, addr);				\
39162306a36Sopenharmony_ci			if (tlbi_user)					\
39262306a36Sopenharmony_ci				__tlbi_user(r##op, addr);		\
39362306a36Sopenharmony_ci			start += __TLBI_RANGE_PAGES(num, scale) << PAGE_SHIFT; \
39462306a36Sopenharmony_ci			pages -= __TLBI_RANGE_PAGES(num, scale);	\
39562306a36Sopenharmony_ci		}							\
39662306a36Sopenharmony_ci		scale++;						\
39762306a36Sopenharmony_ci	}								\
39862306a36Sopenharmony_ci} while (0)
39962306a36Sopenharmony_ci
40062306a36Sopenharmony_ci#define __flush_s2_tlb_range_op(op, start, pages, stride, tlb_level) \
40162306a36Sopenharmony_ci	__flush_tlb_range_op(op, start, pages, stride, 0, tlb_level, false)
40262306a36Sopenharmony_ci
40362306a36Sopenharmony_cistatic inline void __flush_tlb_range(struct vm_area_struct *vma,
40462306a36Sopenharmony_ci				     unsigned long start, unsigned long end,
40562306a36Sopenharmony_ci				     unsigned long stride, bool last_level,
40662306a36Sopenharmony_ci				     int tlb_level)
40762306a36Sopenharmony_ci{
40862306a36Sopenharmony_ci	unsigned long asid, pages;
40962306a36Sopenharmony_ci
41062306a36Sopenharmony_ci	start = round_down(start, stride);
41162306a36Sopenharmony_ci	end = round_up(end, stride);
41262306a36Sopenharmony_ci	pages = (end - start) >> PAGE_SHIFT;
41362306a36Sopenharmony_ci
41462306a36Sopenharmony_ci	/*
41562306a36Sopenharmony_ci	 * When not uses TLB range ops, we can handle up to
41662306a36Sopenharmony_ci	 * (MAX_TLBI_OPS - 1) pages;
41762306a36Sopenharmony_ci	 * When uses TLB range ops, we can handle up to
41862306a36Sopenharmony_ci	 * (MAX_TLBI_RANGE_PAGES - 1) pages.
41962306a36Sopenharmony_ci	 */
42062306a36Sopenharmony_ci	if ((!system_supports_tlb_range() &&
42162306a36Sopenharmony_ci	     (end - start) >= (MAX_TLBI_OPS * stride)) ||
42262306a36Sopenharmony_ci	    pages >= MAX_TLBI_RANGE_PAGES) {
42362306a36Sopenharmony_ci		flush_tlb_mm(vma->vm_mm);
42462306a36Sopenharmony_ci		return;
42562306a36Sopenharmony_ci	}
42662306a36Sopenharmony_ci
42762306a36Sopenharmony_ci	dsb(ishst);
42862306a36Sopenharmony_ci	asid = ASID(vma->vm_mm);
42962306a36Sopenharmony_ci
43062306a36Sopenharmony_ci	if (last_level)
43162306a36Sopenharmony_ci		__flush_tlb_range_op(vale1is, start, pages, stride, asid, tlb_level, true);
43262306a36Sopenharmony_ci	else
43362306a36Sopenharmony_ci		__flush_tlb_range_op(vae1is, start, pages, stride, asid, tlb_level, true);
43462306a36Sopenharmony_ci
43562306a36Sopenharmony_ci	dsb(ish);
43662306a36Sopenharmony_ci	mmu_notifier_arch_invalidate_secondary_tlbs(vma->vm_mm, start, end);
43762306a36Sopenharmony_ci}
43862306a36Sopenharmony_ci
43962306a36Sopenharmony_cistatic inline void flush_tlb_range(struct vm_area_struct *vma,
44062306a36Sopenharmony_ci				   unsigned long start, unsigned long end)
44162306a36Sopenharmony_ci{
44262306a36Sopenharmony_ci	/*
44362306a36Sopenharmony_ci	 * We cannot use leaf-only invalidation here, since we may be invalidating
44462306a36Sopenharmony_ci	 * table entries as part of collapsing hugepages or moving page tables.
44562306a36Sopenharmony_ci	 * Set the tlb_level to 0 because we can not get enough information here.
44662306a36Sopenharmony_ci	 */
44762306a36Sopenharmony_ci	__flush_tlb_range(vma, start, end, PAGE_SIZE, false, 0);
44862306a36Sopenharmony_ci}
44962306a36Sopenharmony_ci
45062306a36Sopenharmony_cistatic inline void flush_tlb_kernel_range(unsigned long start, unsigned long end)
45162306a36Sopenharmony_ci{
45262306a36Sopenharmony_ci	unsigned long addr;
45362306a36Sopenharmony_ci
45462306a36Sopenharmony_ci	if ((end - start) > (MAX_TLBI_OPS * PAGE_SIZE)) {
45562306a36Sopenharmony_ci		flush_tlb_all();
45662306a36Sopenharmony_ci		return;
45762306a36Sopenharmony_ci	}
45862306a36Sopenharmony_ci
45962306a36Sopenharmony_ci	start = __TLBI_VADDR(start, 0);
46062306a36Sopenharmony_ci	end = __TLBI_VADDR(end, 0);
46162306a36Sopenharmony_ci
46262306a36Sopenharmony_ci	dsb(ishst);
46362306a36Sopenharmony_ci	for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12))
46462306a36Sopenharmony_ci		__tlbi(vaale1is, addr);
46562306a36Sopenharmony_ci	dsb(ish);
46662306a36Sopenharmony_ci	isb();
46762306a36Sopenharmony_ci}
46862306a36Sopenharmony_ci
46962306a36Sopenharmony_ci/*
47062306a36Sopenharmony_ci * Used to invalidate the TLB (walk caches) corresponding to intermediate page
47162306a36Sopenharmony_ci * table levels (pgd/pud/pmd).
47262306a36Sopenharmony_ci */
47362306a36Sopenharmony_cistatic inline void __flush_tlb_kernel_pgtable(unsigned long kaddr)
47462306a36Sopenharmony_ci{
47562306a36Sopenharmony_ci	unsigned long addr = __TLBI_VADDR(kaddr, 0);
47662306a36Sopenharmony_ci
47762306a36Sopenharmony_ci	dsb(ishst);
47862306a36Sopenharmony_ci	__tlbi(vaae1is, addr);
47962306a36Sopenharmony_ci	dsb(ish);
48062306a36Sopenharmony_ci	isb();
48162306a36Sopenharmony_ci}
48262306a36Sopenharmony_ci#endif
48362306a36Sopenharmony_ci
48462306a36Sopenharmony_ci#endif
485