18c2ecf20Sopenharmony_ci/* SPDX-License-Identifier: GPL-2.0-or-later */
28c2ecf20Sopenharmony_ci/* include/asm-generic/tlb.h
38c2ecf20Sopenharmony_ci *
48c2ecf20Sopenharmony_ci *	Generic TLB shootdown code
58c2ecf20Sopenharmony_ci *
68c2ecf20Sopenharmony_ci * Copyright 2001 Red Hat, Inc.
78c2ecf20Sopenharmony_ci * Based on code from mm/memory.c Copyright Linus Torvalds and others.
88c2ecf20Sopenharmony_ci *
98c2ecf20Sopenharmony_ci * Copyright 2011 Red Hat, Inc., Peter Zijlstra
108c2ecf20Sopenharmony_ci */
118c2ecf20Sopenharmony_ci#ifndef _ASM_GENERIC__TLB_H
128c2ecf20Sopenharmony_ci#define _ASM_GENERIC__TLB_H
138c2ecf20Sopenharmony_ci
148c2ecf20Sopenharmony_ci#include <linux/mmu_notifier.h>
158c2ecf20Sopenharmony_ci#include <linux/swap.h>
168c2ecf20Sopenharmony_ci#include <linux/hugetlb_inline.h>
178c2ecf20Sopenharmony_ci#include <asm/tlbflush.h>
188c2ecf20Sopenharmony_ci#include <asm/cacheflush.h>
198c2ecf20Sopenharmony_ci
208c2ecf20Sopenharmony_ci/*
218c2ecf20Sopenharmony_ci * Blindly accessing user memory from NMI context can be dangerous
228c2ecf20Sopenharmony_ci * if we're in the middle of switching the current user task or switching
238c2ecf20Sopenharmony_ci * the loaded mm.
248c2ecf20Sopenharmony_ci */
258c2ecf20Sopenharmony_ci#ifndef nmi_uaccess_okay
268c2ecf20Sopenharmony_ci# define nmi_uaccess_okay() true
278c2ecf20Sopenharmony_ci#endif
288c2ecf20Sopenharmony_ci
298c2ecf20Sopenharmony_ci#ifdef CONFIG_MMU
308c2ecf20Sopenharmony_ci
318c2ecf20Sopenharmony_ci/*
328c2ecf20Sopenharmony_ci * Generic MMU-gather implementation.
338c2ecf20Sopenharmony_ci *
348c2ecf20Sopenharmony_ci * The mmu_gather data structure is used by the mm code to implement the
358c2ecf20Sopenharmony_ci * correct and efficient ordering of freeing pages and TLB invalidations.
368c2ecf20Sopenharmony_ci *
378c2ecf20Sopenharmony_ci * This correct ordering is:
388c2ecf20Sopenharmony_ci *
398c2ecf20Sopenharmony_ci *  1) unhook page
408c2ecf20Sopenharmony_ci *  2) TLB invalidate page
418c2ecf20Sopenharmony_ci *  3) free page
428c2ecf20Sopenharmony_ci *
438c2ecf20Sopenharmony_ci * That is, we must never free a page before we have ensured there are no live
448c2ecf20Sopenharmony_ci * translations left to it. Otherwise it might be possible to observe (or
458c2ecf20Sopenharmony_ci * worse, change) the page content after it has been reused.
468c2ecf20Sopenharmony_ci *
478c2ecf20Sopenharmony_ci * The mmu_gather API consists of:
488c2ecf20Sopenharmony_ci *
498c2ecf20Sopenharmony_ci *  - tlb_gather_mmu() / tlb_finish_mmu(); start and finish a mmu_gather
508c2ecf20Sopenharmony_ci *
518c2ecf20Sopenharmony_ci *    Finish in particular will issue a (final) TLB invalidate and free
528c2ecf20Sopenharmony_ci *    all (remaining) queued pages.
538c2ecf20Sopenharmony_ci *
548c2ecf20Sopenharmony_ci *  - tlb_start_vma() / tlb_end_vma(); marks the start / end of a VMA
558c2ecf20Sopenharmony_ci *
568c2ecf20Sopenharmony_ci *    Defaults to flushing at tlb_end_vma() to reset the range; helps when
578c2ecf20Sopenharmony_ci *    there's large holes between the VMAs.
588c2ecf20Sopenharmony_ci *
598c2ecf20Sopenharmony_ci *  - tlb_remove_table()
608c2ecf20Sopenharmony_ci *
618c2ecf20Sopenharmony_ci *    tlb_remove_table() is the basic primitive to free page-table directories
628c2ecf20Sopenharmony_ci *    (__p*_free_tlb()).  In it's most primitive form it is an alias for
638c2ecf20Sopenharmony_ci *    tlb_remove_page() below, for when page directories are pages and have no
648c2ecf20Sopenharmony_ci *    additional constraints.
658c2ecf20Sopenharmony_ci *
668c2ecf20Sopenharmony_ci *    See also MMU_GATHER_TABLE_FREE and MMU_GATHER_RCU_TABLE_FREE.
678c2ecf20Sopenharmony_ci *
688c2ecf20Sopenharmony_ci *  - tlb_remove_page() / __tlb_remove_page()
698c2ecf20Sopenharmony_ci *  - tlb_remove_page_size() / __tlb_remove_page_size()
708c2ecf20Sopenharmony_ci *
718c2ecf20Sopenharmony_ci *    __tlb_remove_page_size() is the basic primitive that queues a page for
728c2ecf20Sopenharmony_ci *    freeing. __tlb_remove_page() assumes PAGE_SIZE. Both will return a
738c2ecf20Sopenharmony_ci *    boolean indicating if the queue is (now) full and a call to
748c2ecf20Sopenharmony_ci *    tlb_flush_mmu() is required.
758c2ecf20Sopenharmony_ci *
768c2ecf20Sopenharmony_ci *    tlb_remove_page() and tlb_remove_page_size() imply the call to
778c2ecf20Sopenharmony_ci *    tlb_flush_mmu() when required and has no return value.
788c2ecf20Sopenharmony_ci *
798c2ecf20Sopenharmony_ci *  - tlb_change_page_size()
808c2ecf20Sopenharmony_ci *
818c2ecf20Sopenharmony_ci *    call before __tlb_remove_page*() to set the current page-size; implies a
828c2ecf20Sopenharmony_ci *    possible tlb_flush_mmu() call.
838c2ecf20Sopenharmony_ci *
848c2ecf20Sopenharmony_ci *  - tlb_flush_mmu() / tlb_flush_mmu_tlbonly()
858c2ecf20Sopenharmony_ci *
868c2ecf20Sopenharmony_ci *    tlb_flush_mmu_tlbonly() - does the TLB invalidate (and resets
878c2ecf20Sopenharmony_ci *                              related state, like the range)
888c2ecf20Sopenharmony_ci *
898c2ecf20Sopenharmony_ci *    tlb_flush_mmu() - in addition to the above TLB invalidate, also frees
908c2ecf20Sopenharmony_ci *			whatever pages are still batched.
918c2ecf20Sopenharmony_ci *
928c2ecf20Sopenharmony_ci *  - mmu_gather::fullmm
938c2ecf20Sopenharmony_ci *
948c2ecf20Sopenharmony_ci *    A flag set by tlb_gather_mmu() to indicate we're going to free
958c2ecf20Sopenharmony_ci *    the entire mm; this allows a number of optimizations.
968c2ecf20Sopenharmony_ci *
978c2ecf20Sopenharmony_ci *    - We can ignore tlb_{start,end}_vma(); because we don't
988c2ecf20Sopenharmony_ci *      care about ranges. Everything will be shot down.
998c2ecf20Sopenharmony_ci *
1008c2ecf20Sopenharmony_ci *    - (RISC) architectures that use ASIDs can cycle to a new ASID
1018c2ecf20Sopenharmony_ci *      and delay the invalidation until ASID space runs out.
1028c2ecf20Sopenharmony_ci *
1038c2ecf20Sopenharmony_ci *  - mmu_gather::need_flush_all
1048c2ecf20Sopenharmony_ci *
1058c2ecf20Sopenharmony_ci *    A flag that can be set by the arch code if it wants to force
1068c2ecf20Sopenharmony_ci *    flush the entire TLB irrespective of the range. For instance
1078c2ecf20Sopenharmony_ci *    x86-PAE needs this when changing top-level entries.
1088c2ecf20Sopenharmony_ci *
1098c2ecf20Sopenharmony_ci * And allows the architecture to provide and implement tlb_flush():
1108c2ecf20Sopenharmony_ci *
1118c2ecf20Sopenharmony_ci * tlb_flush() may, in addition to the above mentioned mmu_gather fields, make
1128c2ecf20Sopenharmony_ci * use of:
1138c2ecf20Sopenharmony_ci *
1148c2ecf20Sopenharmony_ci *  - mmu_gather::start / mmu_gather::end
1158c2ecf20Sopenharmony_ci *
1168c2ecf20Sopenharmony_ci *    which provides the range that needs to be flushed to cover the pages to
1178c2ecf20Sopenharmony_ci *    be freed.
1188c2ecf20Sopenharmony_ci *
1198c2ecf20Sopenharmony_ci *  - mmu_gather::freed_tables
1208c2ecf20Sopenharmony_ci *
1218c2ecf20Sopenharmony_ci *    set when we freed page table pages
1228c2ecf20Sopenharmony_ci *
1238c2ecf20Sopenharmony_ci *  - tlb_get_unmap_shift() / tlb_get_unmap_size()
1248c2ecf20Sopenharmony_ci *
1258c2ecf20Sopenharmony_ci *    returns the smallest TLB entry size unmapped in this range.
1268c2ecf20Sopenharmony_ci *
1278c2ecf20Sopenharmony_ci * If an architecture does not provide tlb_flush() a default implementation
1288c2ecf20Sopenharmony_ci * based on flush_tlb_range() will be used, unless MMU_GATHER_NO_RANGE is
1298c2ecf20Sopenharmony_ci * specified, in which case we'll default to flush_tlb_mm().
1308c2ecf20Sopenharmony_ci *
1318c2ecf20Sopenharmony_ci * Additionally there are a few opt-in features:
1328c2ecf20Sopenharmony_ci *
1338c2ecf20Sopenharmony_ci *  MMU_GATHER_PAGE_SIZE
1348c2ecf20Sopenharmony_ci *
1358c2ecf20Sopenharmony_ci *  This ensures we call tlb_flush() every time tlb_change_page_size() actually
1368c2ecf20Sopenharmony_ci *  changes the size and provides mmu_gather::page_size to tlb_flush().
1378c2ecf20Sopenharmony_ci *
1388c2ecf20Sopenharmony_ci *  This might be useful if your architecture has size specific TLB
1398c2ecf20Sopenharmony_ci *  invalidation instructions.
1408c2ecf20Sopenharmony_ci *
1418c2ecf20Sopenharmony_ci *  MMU_GATHER_TABLE_FREE
1428c2ecf20Sopenharmony_ci *
1438c2ecf20Sopenharmony_ci *  This provides tlb_remove_table(), to be used instead of tlb_remove_page()
1448c2ecf20Sopenharmony_ci *  for page directores (__p*_free_tlb()).
1458c2ecf20Sopenharmony_ci *
1468c2ecf20Sopenharmony_ci *  Useful if your architecture has non-page page directories.
1478c2ecf20Sopenharmony_ci *
1488c2ecf20Sopenharmony_ci *  When used, an architecture is expected to provide __tlb_remove_table()
1498c2ecf20Sopenharmony_ci *  which does the actual freeing of these pages.
1508c2ecf20Sopenharmony_ci *
1518c2ecf20Sopenharmony_ci *  MMU_GATHER_RCU_TABLE_FREE
1528c2ecf20Sopenharmony_ci *
1538c2ecf20Sopenharmony_ci *  Like MMU_GATHER_TABLE_FREE, and adds semi-RCU semantics to the free (see
1548c2ecf20Sopenharmony_ci *  comment below).
1558c2ecf20Sopenharmony_ci *
1568c2ecf20Sopenharmony_ci *  Useful if your architecture doesn't use IPIs for remote TLB invalidates
1578c2ecf20Sopenharmony_ci *  and therefore doesn't naturally serialize with software page-table walkers.
1588c2ecf20Sopenharmony_ci *
1598c2ecf20Sopenharmony_ci *  MMU_GATHER_NO_RANGE
1608c2ecf20Sopenharmony_ci *
1618c2ecf20Sopenharmony_ci *  Use this if your architecture lacks an efficient flush_tlb_range().
1628c2ecf20Sopenharmony_ci *
1638c2ecf20Sopenharmony_ci *  MMU_GATHER_NO_GATHER
1648c2ecf20Sopenharmony_ci *
1658c2ecf20Sopenharmony_ci *  If the option is set the mmu_gather will not track individual pages for
1668c2ecf20Sopenharmony_ci *  delayed page free anymore. A platform that enables the option needs to
1678c2ecf20Sopenharmony_ci *  provide its own implementation of the __tlb_remove_page_size() function to
1688c2ecf20Sopenharmony_ci *  free pages.
1698c2ecf20Sopenharmony_ci *
1708c2ecf20Sopenharmony_ci *  This is useful if your architecture already flushes TLB entries in the
1718c2ecf20Sopenharmony_ci *  various ptep_get_and_clear() functions.
1728c2ecf20Sopenharmony_ci */
1738c2ecf20Sopenharmony_ci
1748c2ecf20Sopenharmony_ci#ifdef CONFIG_MMU_GATHER_TABLE_FREE
1758c2ecf20Sopenharmony_ci
1768c2ecf20Sopenharmony_cistruct mmu_table_batch {
1778c2ecf20Sopenharmony_ci#ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE
1788c2ecf20Sopenharmony_ci	struct rcu_head		rcu;
1798c2ecf20Sopenharmony_ci#endif
1808c2ecf20Sopenharmony_ci	unsigned int		nr;
1818c2ecf20Sopenharmony_ci	void			*tables[0];
1828c2ecf20Sopenharmony_ci};
1838c2ecf20Sopenharmony_ci
1848c2ecf20Sopenharmony_ci#define MAX_TABLE_BATCH		\
1858c2ecf20Sopenharmony_ci	((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
1868c2ecf20Sopenharmony_ci
1878c2ecf20Sopenharmony_ciextern void tlb_remove_table(struct mmu_gather *tlb, void *table);
1888c2ecf20Sopenharmony_ci
1898c2ecf20Sopenharmony_ci#else /* !CONFIG_MMU_GATHER_HAVE_TABLE_FREE */
1908c2ecf20Sopenharmony_ci
1918c2ecf20Sopenharmony_ci/*
1928c2ecf20Sopenharmony_ci * Without MMU_GATHER_TABLE_FREE the architecture is assumed to have page based
1938c2ecf20Sopenharmony_ci * page directories and we can use the normal page batching to free them.
1948c2ecf20Sopenharmony_ci */
1958c2ecf20Sopenharmony_ci#define tlb_remove_table(tlb, page) tlb_remove_page((tlb), (page))
1968c2ecf20Sopenharmony_ci
1978c2ecf20Sopenharmony_ci#endif /* CONFIG_MMU_GATHER_TABLE_FREE */
1988c2ecf20Sopenharmony_ci
1998c2ecf20Sopenharmony_ci#ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE
2008c2ecf20Sopenharmony_ci/*
2018c2ecf20Sopenharmony_ci * This allows an architecture that does not use the linux page-tables for
2028c2ecf20Sopenharmony_ci * hardware to skip the TLBI when freeing page tables.
2038c2ecf20Sopenharmony_ci */
2048c2ecf20Sopenharmony_ci#ifndef tlb_needs_table_invalidate
2058c2ecf20Sopenharmony_ci#define tlb_needs_table_invalidate() (true)
2068c2ecf20Sopenharmony_ci#endif
2078c2ecf20Sopenharmony_ci
2088c2ecf20Sopenharmony_civoid tlb_remove_table_sync_one(void);
2098c2ecf20Sopenharmony_ci
2108c2ecf20Sopenharmony_ci#else
2118c2ecf20Sopenharmony_ci
2128c2ecf20Sopenharmony_ci#ifdef tlb_needs_table_invalidate
2138c2ecf20Sopenharmony_ci#error tlb_needs_table_invalidate() requires MMU_GATHER_RCU_TABLE_FREE
2148c2ecf20Sopenharmony_ci#endif
2158c2ecf20Sopenharmony_ci
2168c2ecf20Sopenharmony_cistatic inline void tlb_remove_table_sync_one(void) { }
2178c2ecf20Sopenharmony_ci
2188c2ecf20Sopenharmony_ci#endif /* CONFIG_MMU_GATHER_RCU_TABLE_FREE */
2198c2ecf20Sopenharmony_ci
2208c2ecf20Sopenharmony_ci
2218c2ecf20Sopenharmony_ci#ifndef CONFIG_MMU_GATHER_NO_GATHER
2228c2ecf20Sopenharmony_ci/*
2238c2ecf20Sopenharmony_ci * If we can't allocate a page to make a big batch of page pointers
2248c2ecf20Sopenharmony_ci * to work on, then just handle a few from the on-stack structure.
2258c2ecf20Sopenharmony_ci */
2268c2ecf20Sopenharmony_ci#define MMU_GATHER_BUNDLE	8
2278c2ecf20Sopenharmony_ci
2288c2ecf20Sopenharmony_cistruct mmu_gather_batch {
2298c2ecf20Sopenharmony_ci	struct mmu_gather_batch	*next;
2308c2ecf20Sopenharmony_ci	unsigned int		nr;
2318c2ecf20Sopenharmony_ci	unsigned int		max;
2328c2ecf20Sopenharmony_ci	struct page		*pages[0];
2338c2ecf20Sopenharmony_ci};
2348c2ecf20Sopenharmony_ci
2358c2ecf20Sopenharmony_ci#define MAX_GATHER_BATCH	\
2368c2ecf20Sopenharmony_ci	((PAGE_SIZE - sizeof(struct mmu_gather_batch)) / sizeof(void *))
2378c2ecf20Sopenharmony_ci
2388c2ecf20Sopenharmony_ci/*
2398c2ecf20Sopenharmony_ci * Limit the maximum number of mmu_gather batches to reduce a risk of soft
2408c2ecf20Sopenharmony_ci * lockups for non-preemptible kernels on huge machines when a lot of memory
2418c2ecf20Sopenharmony_ci * is zapped during unmapping.
2428c2ecf20Sopenharmony_ci * 10K pages freed at once should be safe even without a preemption point.
2438c2ecf20Sopenharmony_ci */
2448c2ecf20Sopenharmony_ci#define MAX_GATHER_BATCH_COUNT	(10000UL/MAX_GATHER_BATCH)
2458c2ecf20Sopenharmony_ci
2468c2ecf20Sopenharmony_ciextern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
2478c2ecf20Sopenharmony_ci				   int page_size);
2488c2ecf20Sopenharmony_ci#endif
2498c2ecf20Sopenharmony_ci
2508c2ecf20Sopenharmony_ci/*
2518c2ecf20Sopenharmony_ci * struct mmu_gather is an opaque type used by the mm code for passing around
2528c2ecf20Sopenharmony_ci * any data needed by arch specific code for tlb_remove_page.
2538c2ecf20Sopenharmony_ci */
2548c2ecf20Sopenharmony_cistruct mmu_gather {
2558c2ecf20Sopenharmony_ci	struct mm_struct	*mm;
2568c2ecf20Sopenharmony_ci
2578c2ecf20Sopenharmony_ci#ifdef CONFIG_MMU_GATHER_TABLE_FREE
2588c2ecf20Sopenharmony_ci	struct mmu_table_batch	*batch;
2598c2ecf20Sopenharmony_ci#endif
2608c2ecf20Sopenharmony_ci
2618c2ecf20Sopenharmony_ci	unsigned long		start;
2628c2ecf20Sopenharmony_ci	unsigned long		end;
2638c2ecf20Sopenharmony_ci	/*
2648c2ecf20Sopenharmony_ci	 * we are in the middle of an operation to clear
2658c2ecf20Sopenharmony_ci	 * a full mm and can make some optimizations
2668c2ecf20Sopenharmony_ci	 */
2678c2ecf20Sopenharmony_ci	unsigned int		fullmm : 1;
2688c2ecf20Sopenharmony_ci
2698c2ecf20Sopenharmony_ci	/*
2708c2ecf20Sopenharmony_ci	 * we have performed an operation which
2718c2ecf20Sopenharmony_ci	 * requires a complete flush of the tlb
2728c2ecf20Sopenharmony_ci	 */
2738c2ecf20Sopenharmony_ci	unsigned int		need_flush_all : 1;
2748c2ecf20Sopenharmony_ci
2758c2ecf20Sopenharmony_ci	/*
2768c2ecf20Sopenharmony_ci	 * we have removed page directories
2778c2ecf20Sopenharmony_ci	 */
2788c2ecf20Sopenharmony_ci	unsigned int		freed_tables : 1;
2798c2ecf20Sopenharmony_ci
2808c2ecf20Sopenharmony_ci	/*
2818c2ecf20Sopenharmony_ci	 * at which levels have we cleared entries?
2828c2ecf20Sopenharmony_ci	 */
2838c2ecf20Sopenharmony_ci	unsigned int		cleared_ptes : 1;
2848c2ecf20Sopenharmony_ci	unsigned int		cleared_pmds : 1;
2858c2ecf20Sopenharmony_ci	unsigned int		cleared_puds : 1;
2868c2ecf20Sopenharmony_ci	unsigned int		cleared_p4ds : 1;
2878c2ecf20Sopenharmony_ci
2888c2ecf20Sopenharmony_ci	/*
2898c2ecf20Sopenharmony_ci	 * tracks VM_EXEC | VM_HUGETLB in tlb_start_vma
2908c2ecf20Sopenharmony_ci	 */
2918c2ecf20Sopenharmony_ci	unsigned int		vma_exec : 1;
2928c2ecf20Sopenharmony_ci	unsigned int		vma_huge : 1;
2938c2ecf20Sopenharmony_ci
2948c2ecf20Sopenharmony_ci	unsigned int		batch_count;
2958c2ecf20Sopenharmony_ci
2968c2ecf20Sopenharmony_ci#ifndef CONFIG_MMU_GATHER_NO_GATHER
2978c2ecf20Sopenharmony_ci	struct mmu_gather_batch *active;
2988c2ecf20Sopenharmony_ci	struct mmu_gather_batch	local;
2998c2ecf20Sopenharmony_ci	struct page		*__pages[MMU_GATHER_BUNDLE];
3008c2ecf20Sopenharmony_ci
3018c2ecf20Sopenharmony_ci#ifdef CONFIG_MMU_GATHER_PAGE_SIZE
3028c2ecf20Sopenharmony_ci	unsigned int page_size;
3038c2ecf20Sopenharmony_ci#endif
3048c2ecf20Sopenharmony_ci#endif
3058c2ecf20Sopenharmony_ci};
3068c2ecf20Sopenharmony_ci
3078c2ecf20Sopenharmony_civoid tlb_flush_mmu(struct mmu_gather *tlb);
3088c2ecf20Sopenharmony_ci
3098c2ecf20Sopenharmony_cistatic inline void __tlb_adjust_range(struct mmu_gather *tlb,
3108c2ecf20Sopenharmony_ci				      unsigned long address,
3118c2ecf20Sopenharmony_ci				      unsigned int range_size)
3128c2ecf20Sopenharmony_ci{
3138c2ecf20Sopenharmony_ci	tlb->start = min(tlb->start, address);
3148c2ecf20Sopenharmony_ci	tlb->end = max(tlb->end, address + range_size);
3158c2ecf20Sopenharmony_ci}
3168c2ecf20Sopenharmony_ci
3178c2ecf20Sopenharmony_cistatic inline void __tlb_reset_range(struct mmu_gather *tlb)
3188c2ecf20Sopenharmony_ci{
3198c2ecf20Sopenharmony_ci	if (tlb->fullmm) {
3208c2ecf20Sopenharmony_ci		tlb->start = tlb->end = ~0;
3218c2ecf20Sopenharmony_ci	} else {
3228c2ecf20Sopenharmony_ci		tlb->start = TASK_SIZE;
3238c2ecf20Sopenharmony_ci		tlb->end = 0;
3248c2ecf20Sopenharmony_ci	}
3258c2ecf20Sopenharmony_ci	tlb->freed_tables = 0;
3268c2ecf20Sopenharmony_ci	tlb->cleared_ptes = 0;
3278c2ecf20Sopenharmony_ci	tlb->cleared_pmds = 0;
3288c2ecf20Sopenharmony_ci	tlb->cleared_puds = 0;
3298c2ecf20Sopenharmony_ci	tlb->cleared_p4ds = 0;
3308c2ecf20Sopenharmony_ci	/*
3318c2ecf20Sopenharmony_ci	 * Do not reset mmu_gather::vma_* fields here, we do not
3328c2ecf20Sopenharmony_ci	 * call into tlb_start_vma() again to set them if there is an
3338c2ecf20Sopenharmony_ci	 * intermediate flush.
3348c2ecf20Sopenharmony_ci	 */
3358c2ecf20Sopenharmony_ci}
3368c2ecf20Sopenharmony_ci
3378c2ecf20Sopenharmony_ci#ifdef CONFIG_MMU_GATHER_NO_RANGE
3388c2ecf20Sopenharmony_ci
3398c2ecf20Sopenharmony_ci#if defined(tlb_flush) || defined(tlb_start_vma) || defined(tlb_end_vma)
3408c2ecf20Sopenharmony_ci#error MMU_GATHER_NO_RANGE relies on default tlb_flush(), tlb_start_vma() and tlb_end_vma()
3418c2ecf20Sopenharmony_ci#endif
3428c2ecf20Sopenharmony_ci
3438c2ecf20Sopenharmony_ci/*
3448c2ecf20Sopenharmony_ci * When an architecture does not have efficient means of range flushing TLBs
3458c2ecf20Sopenharmony_ci * there is no point in doing intermediate flushes on tlb_end_vma() to keep the
3468c2ecf20Sopenharmony_ci * range small. We equally don't have to worry about page granularity or other
3478c2ecf20Sopenharmony_ci * things.
3488c2ecf20Sopenharmony_ci *
3498c2ecf20Sopenharmony_ci * All we need to do is issue a full flush for any !0 range.
3508c2ecf20Sopenharmony_ci */
3518c2ecf20Sopenharmony_cistatic inline void tlb_flush(struct mmu_gather *tlb)
3528c2ecf20Sopenharmony_ci{
3538c2ecf20Sopenharmony_ci	if (tlb->end)
3548c2ecf20Sopenharmony_ci		flush_tlb_mm(tlb->mm);
3558c2ecf20Sopenharmony_ci}
3568c2ecf20Sopenharmony_ci
3578c2ecf20Sopenharmony_cistatic inline void
3588c2ecf20Sopenharmony_citlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
3598c2ecf20Sopenharmony_ci
3608c2ecf20Sopenharmony_ci#define tlb_end_vma tlb_end_vma
3618c2ecf20Sopenharmony_cistatic inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
3628c2ecf20Sopenharmony_ci
3638c2ecf20Sopenharmony_ci#else /* CONFIG_MMU_GATHER_NO_RANGE */
3648c2ecf20Sopenharmony_ci
3658c2ecf20Sopenharmony_ci#ifndef tlb_flush
3668c2ecf20Sopenharmony_ci
3678c2ecf20Sopenharmony_ci#if defined(tlb_start_vma) || defined(tlb_end_vma)
3688c2ecf20Sopenharmony_ci#error Default tlb_flush() relies on default tlb_start_vma() and tlb_end_vma()
3698c2ecf20Sopenharmony_ci#endif
3708c2ecf20Sopenharmony_ci
3718c2ecf20Sopenharmony_ci/*
3728c2ecf20Sopenharmony_ci * When an architecture does not provide its own tlb_flush() implementation
3738c2ecf20Sopenharmony_ci * but does have a reasonably efficient flush_vma_range() implementation
3748c2ecf20Sopenharmony_ci * use that.
3758c2ecf20Sopenharmony_ci */
3768c2ecf20Sopenharmony_cistatic inline void tlb_flush(struct mmu_gather *tlb)
3778c2ecf20Sopenharmony_ci{
3788c2ecf20Sopenharmony_ci	if (tlb->fullmm || tlb->need_flush_all) {
3798c2ecf20Sopenharmony_ci		flush_tlb_mm(tlb->mm);
3808c2ecf20Sopenharmony_ci	} else if (tlb->end) {
3818c2ecf20Sopenharmony_ci		struct vm_area_struct vma = {
3828c2ecf20Sopenharmony_ci			.vm_mm = tlb->mm,
3838c2ecf20Sopenharmony_ci			.vm_flags = (tlb->vma_exec ? VM_EXEC    : 0) |
3848c2ecf20Sopenharmony_ci				    (tlb->vma_huge ? VM_HUGETLB : 0),
3858c2ecf20Sopenharmony_ci		};
3868c2ecf20Sopenharmony_ci
3878c2ecf20Sopenharmony_ci		flush_tlb_range(&vma, tlb->start, tlb->end);
3888c2ecf20Sopenharmony_ci	}
3898c2ecf20Sopenharmony_ci}
3908c2ecf20Sopenharmony_ci
3918c2ecf20Sopenharmony_cistatic inline void
3928c2ecf20Sopenharmony_citlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma)
3938c2ecf20Sopenharmony_ci{
3948c2ecf20Sopenharmony_ci	/*
3958c2ecf20Sopenharmony_ci	 * flush_tlb_range() implementations that look at VM_HUGETLB (tile,
3968c2ecf20Sopenharmony_ci	 * mips-4k) flush only large pages.
3978c2ecf20Sopenharmony_ci	 *
3988c2ecf20Sopenharmony_ci	 * flush_tlb_range() implementations that flush I-TLB also flush D-TLB
3998c2ecf20Sopenharmony_ci	 * (tile, xtensa, arm), so it's ok to just add VM_EXEC to an existing
4008c2ecf20Sopenharmony_ci	 * range.
4018c2ecf20Sopenharmony_ci	 *
4028c2ecf20Sopenharmony_ci	 * We rely on tlb_end_vma() to issue a flush, such that when we reset
4038c2ecf20Sopenharmony_ci	 * these values the batch is empty.
4048c2ecf20Sopenharmony_ci	 */
4058c2ecf20Sopenharmony_ci	tlb->vma_huge = is_vm_hugetlb_page(vma);
4068c2ecf20Sopenharmony_ci	tlb->vma_exec = !!(vma->vm_flags & VM_EXEC);
4078c2ecf20Sopenharmony_ci}
4088c2ecf20Sopenharmony_ci
4098c2ecf20Sopenharmony_ci#else
4108c2ecf20Sopenharmony_ci
4118c2ecf20Sopenharmony_cistatic inline void
4128c2ecf20Sopenharmony_citlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
4138c2ecf20Sopenharmony_ci
4148c2ecf20Sopenharmony_ci#endif
4158c2ecf20Sopenharmony_ci
4168c2ecf20Sopenharmony_ci#endif /* CONFIG_MMU_GATHER_NO_RANGE */
4178c2ecf20Sopenharmony_ci
4188c2ecf20Sopenharmony_cistatic inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
4198c2ecf20Sopenharmony_ci{
4208c2ecf20Sopenharmony_ci	/*
4218c2ecf20Sopenharmony_ci	 * Anything calling __tlb_adjust_range() also sets at least one of
4228c2ecf20Sopenharmony_ci	 * these bits.
4238c2ecf20Sopenharmony_ci	 */
4248c2ecf20Sopenharmony_ci	if (!(tlb->freed_tables || tlb->cleared_ptes || tlb->cleared_pmds ||
4258c2ecf20Sopenharmony_ci	      tlb->cleared_puds || tlb->cleared_p4ds))
4268c2ecf20Sopenharmony_ci		return;
4278c2ecf20Sopenharmony_ci
4288c2ecf20Sopenharmony_ci	tlb_flush(tlb);
4298c2ecf20Sopenharmony_ci	mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end);
4308c2ecf20Sopenharmony_ci	__tlb_reset_range(tlb);
4318c2ecf20Sopenharmony_ci}
4328c2ecf20Sopenharmony_ci
4338c2ecf20Sopenharmony_cistatic inline void tlb_remove_page_size(struct mmu_gather *tlb,
4348c2ecf20Sopenharmony_ci					struct page *page, int page_size)
4358c2ecf20Sopenharmony_ci{
4368c2ecf20Sopenharmony_ci	if (__tlb_remove_page_size(tlb, page, page_size))
4378c2ecf20Sopenharmony_ci		tlb_flush_mmu(tlb);
4388c2ecf20Sopenharmony_ci}
4398c2ecf20Sopenharmony_ci
4408c2ecf20Sopenharmony_cistatic inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
4418c2ecf20Sopenharmony_ci{
4428c2ecf20Sopenharmony_ci	return __tlb_remove_page_size(tlb, page, PAGE_SIZE);
4438c2ecf20Sopenharmony_ci}
4448c2ecf20Sopenharmony_ci
4458c2ecf20Sopenharmony_ci/* tlb_remove_page
4468c2ecf20Sopenharmony_ci *	Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when
4478c2ecf20Sopenharmony_ci *	required.
4488c2ecf20Sopenharmony_ci */
4498c2ecf20Sopenharmony_cistatic inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
4508c2ecf20Sopenharmony_ci{
4518c2ecf20Sopenharmony_ci	return tlb_remove_page_size(tlb, page, PAGE_SIZE);
4528c2ecf20Sopenharmony_ci}
4538c2ecf20Sopenharmony_ci
4548c2ecf20Sopenharmony_cistatic inline void tlb_change_page_size(struct mmu_gather *tlb,
4558c2ecf20Sopenharmony_ci						     unsigned int page_size)
4568c2ecf20Sopenharmony_ci{
4578c2ecf20Sopenharmony_ci#ifdef CONFIG_MMU_GATHER_PAGE_SIZE
4588c2ecf20Sopenharmony_ci	if (tlb->page_size && tlb->page_size != page_size) {
4598c2ecf20Sopenharmony_ci		if (!tlb->fullmm && !tlb->need_flush_all)
4608c2ecf20Sopenharmony_ci			tlb_flush_mmu(tlb);
4618c2ecf20Sopenharmony_ci	}
4628c2ecf20Sopenharmony_ci
4638c2ecf20Sopenharmony_ci	tlb->page_size = page_size;
4648c2ecf20Sopenharmony_ci#endif
4658c2ecf20Sopenharmony_ci}
4668c2ecf20Sopenharmony_ci
4678c2ecf20Sopenharmony_cistatic inline unsigned long tlb_get_unmap_shift(struct mmu_gather *tlb)
4688c2ecf20Sopenharmony_ci{
4698c2ecf20Sopenharmony_ci	if (tlb->cleared_ptes)
4708c2ecf20Sopenharmony_ci		return PAGE_SHIFT;
4718c2ecf20Sopenharmony_ci	if (tlb->cleared_pmds)
4728c2ecf20Sopenharmony_ci		return PMD_SHIFT;
4738c2ecf20Sopenharmony_ci	if (tlb->cleared_puds)
4748c2ecf20Sopenharmony_ci		return PUD_SHIFT;
4758c2ecf20Sopenharmony_ci	if (tlb->cleared_p4ds)
4768c2ecf20Sopenharmony_ci		return P4D_SHIFT;
4778c2ecf20Sopenharmony_ci
4788c2ecf20Sopenharmony_ci	return PAGE_SHIFT;
4798c2ecf20Sopenharmony_ci}
4808c2ecf20Sopenharmony_ci
4818c2ecf20Sopenharmony_cistatic inline unsigned long tlb_get_unmap_size(struct mmu_gather *tlb)
4828c2ecf20Sopenharmony_ci{
4838c2ecf20Sopenharmony_ci	return 1UL << tlb_get_unmap_shift(tlb);
4848c2ecf20Sopenharmony_ci}
4858c2ecf20Sopenharmony_ci
4868c2ecf20Sopenharmony_ci/*
4878c2ecf20Sopenharmony_ci * In the case of tlb vma handling, we can optimise these away in the
4888c2ecf20Sopenharmony_ci * case where we're doing a full MM flush.  When we're doing a munmap,
4898c2ecf20Sopenharmony_ci * the vmas are adjusted to only cover the region to be torn down.
4908c2ecf20Sopenharmony_ci */
4918c2ecf20Sopenharmony_ci#ifndef tlb_start_vma
4928c2ecf20Sopenharmony_cistatic inline void tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
4938c2ecf20Sopenharmony_ci{
4948c2ecf20Sopenharmony_ci	if (tlb->fullmm)
4958c2ecf20Sopenharmony_ci		return;
4968c2ecf20Sopenharmony_ci
4978c2ecf20Sopenharmony_ci	tlb_update_vma_flags(tlb, vma);
4988c2ecf20Sopenharmony_ci	flush_cache_range(vma, vma->vm_start, vma->vm_end);
4998c2ecf20Sopenharmony_ci}
5008c2ecf20Sopenharmony_ci#endif
5018c2ecf20Sopenharmony_ci
5028c2ecf20Sopenharmony_ci#ifndef tlb_end_vma
5038c2ecf20Sopenharmony_cistatic inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
5048c2ecf20Sopenharmony_ci{
5058c2ecf20Sopenharmony_ci	if (tlb->fullmm)
5068c2ecf20Sopenharmony_ci		return;
5078c2ecf20Sopenharmony_ci
5088c2ecf20Sopenharmony_ci	/*
5098c2ecf20Sopenharmony_ci	 * Do a TLB flush and reset the range at VMA boundaries; this avoids
5108c2ecf20Sopenharmony_ci	 * the ranges growing with the unused space between consecutive VMAs,
5118c2ecf20Sopenharmony_ci	 * but also the mmu_gather::vma_* flags from tlb_start_vma() rely on
5128c2ecf20Sopenharmony_ci	 * this.
5138c2ecf20Sopenharmony_ci	 */
5148c2ecf20Sopenharmony_ci	tlb_flush_mmu_tlbonly(tlb);
5158c2ecf20Sopenharmony_ci}
5168c2ecf20Sopenharmony_ci#endif
5178c2ecf20Sopenharmony_ci
5188c2ecf20Sopenharmony_ci/*
5198c2ecf20Sopenharmony_ci * tlb_flush_{pte|pmd|pud|p4d}_range() adjust the tlb->start and tlb->end,
5208c2ecf20Sopenharmony_ci * and set corresponding cleared_*.
5218c2ecf20Sopenharmony_ci */
5228c2ecf20Sopenharmony_cistatic inline void tlb_flush_pte_range(struct mmu_gather *tlb,
5238c2ecf20Sopenharmony_ci				     unsigned long address, unsigned long size)
5248c2ecf20Sopenharmony_ci{
5258c2ecf20Sopenharmony_ci	__tlb_adjust_range(tlb, address, size);
5268c2ecf20Sopenharmony_ci	tlb->cleared_ptes = 1;
5278c2ecf20Sopenharmony_ci}
5288c2ecf20Sopenharmony_ci
5298c2ecf20Sopenharmony_cistatic inline void tlb_flush_pmd_range(struct mmu_gather *tlb,
5308c2ecf20Sopenharmony_ci				     unsigned long address, unsigned long size)
5318c2ecf20Sopenharmony_ci{
5328c2ecf20Sopenharmony_ci	__tlb_adjust_range(tlb, address, size);
5338c2ecf20Sopenharmony_ci	tlb->cleared_pmds = 1;
5348c2ecf20Sopenharmony_ci}
5358c2ecf20Sopenharmony_ci
5368c2ecf20Sopenharmony_cistatic inline void tlb_flush_pud_range(struct mmu_gather *tlb,
5378c2ecf20Sopenharmony_ci				     unsigned long address, unsigned long size)
5388c2ecf20Sopenharmony_ci{
5398c2ecf20Sopenharmony_ci	__tlb_adjust_range(tlb, address, size);
5408c2ecf20Sopenharmony_ci	tlb->cleared_puds = 1;
5418c2ecf20Sopenharmony_ci}
5428c2ecf20Sopenharmony_ci
5438c2ecf20Sopenharmony_cistatic inline void tlb_flush_p4d_range(struct mmu_gather *tlb,
5448c2ecf20Sopenharmony_ci				     unsigned long address, unsigned long size)
5458c2ecf20Sopenharmony_ci{
5468c2ecf20Sopenharmony_ci	__tlb_adjust_range(tlb, address, size);
5478c2ecf20Sopenharmony_ci	tlb->cleared_p4ds = 1;
5488c2ecf20Sopenharmony_ci}
5498c2ecf20Sopenharmony_ci
5508c2ecf20Sopenharmony_ci#ifndef __tlb_remove_tlb_entry
5518c2ecf20Sopenharmony_ci#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0)
5528c2ecf20Sopenharmony_ci#endif
5538c2ecf20Sopenharmony_ci
5548c2ecf20Sopenharmony_ci/**
5558c2ecf20Sopenharmony_ci * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
5568c2ecf20Sopenharmony_ci *
5578c2ecf20Sopenharmony_ci * Record the fact that pte's were really unmapped by updating the range,
5588c2ecf20Sopenharmony_ci * so we can later optimise away the tlb invalidate.   This helps when
5598c2ecf20Sopenharmony_ci * userspace is unmapping already-unmapped pages, which happens quite a lot.
5608c2ecf20Sopenharmony_ci */
5618c2ecf20Sopenharmony_ci#define tlb_remove_tlb_entry(tlb, ptep, address)		\
5628c2ecf20Sopenharmony_ci	do {							\
5638c2ecf20Sopenharmony_ci		tlb_flush_pte_range(tlb, address, PAGE_SIZE);	\
5648c2ecf20Sopenharmony_ci		__tlb_remove_tlb_entry(tlb, ptep, address);	\
5658c2ecf20Sopenharmony_ci	} while (0)
5668c2ecf20Sopenharmony_ci
5678c2ecf20Sopenharmony_ci#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address)	\
5688c2ecf20Sopenharmony_ci	do {							\
5698c2ecf20Sopenharmony_ci		unsigned long _sz = huge_page_size(h);		\
5708c2ecf20Sopenharmony_ci		if (_sz >= P4D_SIZE)				\
5718c2ecf20Sopenharmony_ci			tlb_flush_p4d_range(tlb, address, _sz);	\
5728c2ecf20Sopenharmony_ci		else if (_sz >= PUD_SIZE)			\
5738c2ecf20Sopenharmony_ci			tlb_flush_pud_range(tlb, address, _sz);	\
5748c2ecf20Sopenharmony_ci		else if (_sz >= PMD_SIZE)			\
5758c2ecf20Sopenharmony_ci			tlb_flush_pmd_range(tlb, address, _sz);	\
5768c2ecf20Sopenharmony_ci		else						\
5778c2ecf20Sopenharmony_ci			tlb_flush_pte_range(tlb, address, _sz);	\
5788c2ecf20Sopenharmony_ci		__tlb_remove_tlb_entry(tlb, ptep, address);	\
5798c2ecf20Sopenharmony_ci	} while (0)
5808c2ecf20Sopenharmony_ci
5818c2ecf20Sopenharmony_ci/**
5828c2ecf20Sopenharmony_ci * tlb_remove_pmd_tlb_entry - remember a pmd mapping for later tlb invalidation
5838c2ecf20Sopenharmony_ci * This is a nop so far, because only x86 needs it.
5848c2ecf20Sopenharmony_ci */
5858c2ecf20Sopenharmony_ci#ifndef __tlb_remove_pmd_tlb_entry
5868c2ecf20Sopenharmony_ci#define __tlb_remove_pmd_tlb_entry(tlb, pmdp, address) do {} while (0)
5878c2ecf20Sopenharmony_ci#endif
5888c2ecf20Sopenharmony_ci
5898c2ecf20Sopenharmony_ci#define tlb_remove_pmd_tlb_entry(tlb, pmdp, address)			\
5908c2ecf20Sopenharmony_ci	do {								\
5918c2ecf20Sopenharmony_ci		tlb_flush_pmd_range(tlb, address, HPAGE_PMD_SIZE);	\
5928c2ecf20Sopenharmony_ci		__tlb_remove_pmd_tlb_entry(tlb, pmdp, address);		\
5938c2ecf20Sopenharmony_ci	} while (0)
5948c2ecf20Sopenharmony_ci
5958c2ecf20Sopenharmony_ci/**
5968c2ecf20Sopenharmony_ci * tlb_remove_pud_tlb_entry - remember a pud mapping for later tlb
5978c2ecf20Sopenharmony_ci * invalidation. This is a nop so far, because only x86 needs it.
5988c2ecf20Sopenharmony_ci */
5998c2ecf20Sopenharmony_ci#ifndef __tlb_remove_pud_tlb_entry
6008c2ecf20Sopenharmony_ci#define __tlb_remove_pud_tlb_entry(tlb, pudp, address) do {} while (0)
6018c2ecf20Sopenharmony_ci#endif
6028c2ecf20Sopenharmony_ci
6038c2ecf20Sopenharmony_ci#define tlb_remove_pud_tlb_entry(tlb, pudp, address)			\
6048c2ecf20Sopenharmony_ci	do {								\
6058c2ecf20Sopenharmony_ci		tlb_flush_pud_range(tlb, address, HPAGE_PUD_SIZE);	\
6068c2ecf20Sopenharmony_ci		__tlb_remove_pud_tlb_entry(tlb, pudp, address);		\
6078c2ecf20Sopenharmony_ci	} while (0)
6088c2ecf20Sopenharmony_ci
6098c2ecf20Sopenharmony_ci/*
6108c2ecf20Sopenharmony_ci * For things like page tables caches (ie caching addresses "inside" the
6118c2ecf20Sopenharmony_ci * page tables, like x86 does), for legacy reasons, flushing an
6128c2ecf20Sopenharmony_ci * individual page had better flush the page table caches behind it. This
6138c2ecf20Sopenharmony_ci * is definitely how x86 works, for example. And if you have an
6148c2ecf20Sopenharmony_ci * architected non-legacy page table cache (which I'm not aware of
6158c2ecf20Sopenharmony_ci * anybody actually doing), you're going to have some architecturally
6168c2ecf20Sopenharmony_ci * explicit flushing for that, likely *separate* from a regular TLB entry
6178c2ecf20Sopenharmony_ci * flush, and thus you'd need more than just some range expansion..
6188c2ecf20Sopenharmony_ci *
6198c2ecf20Sopenharmony_ci * So if we ever find an architecture
6208c2ecf20Sopenharmony_ci * that would want something that odd, I think it is up to that
6218c2ecf20Sopenharmony_ci * architecture to do its own odd thing, not cause pain for others
6228c2ecf20Sopenharmony_ci * http://lkml.kernel.org/r/CA+55aFzBggoXtNXQeng5d_mRoDnaMBE5Y+URs+PHR67nUpMtaw@mail.gmail.com
6238c2ecf20Sopenharmony_ci *
6248c2ecf20Sopenharmony_ci * For now w.r.t page table cache, mark the range_size as PAGE_SIZE
6258c2ecf20Sopenharmony_ci */
6268c2ecf20Sopenharmony_ci
6278c2ecf20Sopenharmony_ci#ifndef pte_free_tlb
6288c2ecf20Sopenharmony_ci#define pte_free_tlb(tlb, ptep, address)			\
6298c2ecf20Sopenharmony_ci	do {							\
6308c2ecf20Sopenharmony_ci		tlb_flush_pmd_range(tlb, address, PAGE_SIZE);	\
6318c2ecf20Sopenharmony_ci		tlb->freed_tables = 1;				\
6328c2ecf20Sopenharmony_ci		__pte_free_tlb(tlb, ptep, address);		\
6338c2ecf20Sopenharmony_ci	} while (0)
6348c2ecf20Sopenharmony_ci#endif
6358c2ecf20Sopenharmony_ci
6368c2ecf20Sopenharmony_ci#ifndef pmd_free_tlb
6378c2ecf20Sopenharmony_ci#define pmd_free_tlb(tlb, pmdp, address)			\
6388c2ecf20Sopenharmony_ci	do {							\
6398c2ecf20Sopenharmony_ci		tlb_flush_pud_range(tlb, address, PAGE_SIZE);	\
6408c2ecf20Sopenharmony_ci		tlb->freed_tables = 1;				\
6418c2ecf20Sopenharmony_ci		__pmd_free_tlb(tlb, pmdp, address);		\
6428c2ecf20Sopenharmony_ci	} while (0)
6438c2ecf20Sopenharmony_ci#endif
6448c2ecf20Sopenharmony_ci
6458c2ecf20Sopenharmony_ci#ifndef pud_free_tlb
6468c2ecf20Sopenharmony_ci#define pud_free_tlb(tlb, pudp, address)			\
6478c2ecf20Sopenharmony_ci	do {							\
6488c2ecf20Sopenharmony_ci		tlb_flush_p4d_range(tlb, address, PAGE_SIZE);	\
6498c2ecf20Sopenharmony_ci		tlb->freed_tables = 1;				\
6508c2ecf20Sopenharmony_ci		__pud_free_tlb(tlb, pudp, address);		\
6518c2ecf20Sopenharmony_ci	} while (0)
6528c2ecf20Sopenharmony_ci#endif
6538c2ecf20Sopenharmony_ci
6548c2ecf20Sopenharmony_ci#ifndef p4d_free_tlb
6558c2ecf20Sopenharmony_ci#define p4d_free_tlb(tlb, pudp, address)			\
6568c2ecf20Sopenharmony_ci	do {							\
6578c2ecf20Sopenharmony_ci		__tlb_adjust_range(tlb, address, PAGE_SIZE);	\
6588c2ecf20Sopenharmony_ci		tlb->freed_tables = 1;				\
6598c2ecf20Sopenharmony_ci		__p4d_free_tlb(tlb, pudp, address);		\
6608c2ecf20Sopenharmony_ci	} while (0)
6618c2ecf20Sopenharmony_ci#endif
6628c2ecf20Sopenharmony_ci
6638c2ecf20Sopenharmony_ci#endif /* CONFIG_MMU */
6648c2ecf20Sopenharmony_ci
6658c2ecf20Sopenharmony_ci#endif /* _ASM_GENERIC__TLB_H */
666