xref: /kernel/linux/linux-5.10/arch/arm/include/asm/tlb.h (revision 8c2ecf20)
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 *  arch/arm/include/asm/tlb.h
4 *
5 *  Copyright (C) 2002 Russell King
6 *
7 *  Experimentation shows that on a StrongARM, it appears to be faster
8 *  to use the "invalidate whole tlb" rather than "invalidate single
9 *  tlb" for this.
10 *
11 *  This appears true for both the process fork+exit case, as well as
12 *  the munmap-large-area case.
13 */
14#ifndef __ASMARM_TLB_H
15#define __ASMARM_TLB_H
16
17#include <asm/cacheflush.h>
18
19#ifndef CONFIG_MMU
20
21#include <linux/pagemap.h>
22
23#define tlb_flush(tlb)	((void) tlb)
24
25#include <asm-generic/tlb.h>
26
27#else /* !CONFIG_MMU */
28
29#include <linux/swap.h>
30#include <asm/tlbflush.h>
31
32static inline void __tlb_remove_table(void *_table)
33{
34	free_page_and_swap_cache((struct page *)_table);
35}
36
37#include <asm-generic/tlb.h>
38
39static inline void
40__pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, unsigned long addr)
41{
42	pgtable_pte_page_dtor(pte);
43
44#ifndef CONFIG_ARM_LPAE
45	/*
46	 * With the classic ARM MMU, a pte page has two corresponding pmd
47	 * entries, each covering 1MB.
48	 */
49	addr = (addr & PMD_MASK) + SZ_1M;
50	__tlb_adjust_range(tlb, addr - PAGE_SIZE, 2 * PAGE_SIZE);
51#endif
52
53	tlb_remove_table(tlb, pte);
54}
55
56static inline void
57__pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr)
58{
59#ifdef CONFIG_ARM_LPAE
60	struct page *page = virt_to_page(pmdp);
61
62	pgtable_pmd_page_dtor(page);
63	tlb_remove_table(tlb, page);
64#endif
65}
66
67#endif /* CONFIG_MMU */
68#endif
69