162306a36Sopenharmony_ci/* SPDX-License-Identifier: GPL-2.0-or-later */ 262306a36Sopenharmony_ci/* include/asm-generic/tlb.h 362306a36Sopenharmony_ci * 462306a36Sopenharmony_ci * Generic TLB shootdown code 562306a36Sopenharmony_ci * 662306a36Sopenharmony_ci * Copyright 2001 Red Hat, Inc. 762306a36Sopenharmony_ci * Based on code from mm/memory.c Copyright Linus Torvalds and others. 862306a36Sopenharmony_ci * 962306a36Sopenharmony_ci * Copyright 2011 Red Hat, Inc., Peter Zijlstra 1062306a36Sopenharmony_ci */ 1162306a36Sopenharmony_ci#ifndef _ASM_GENERIC__TLB_H 1262306a36Sopenharmony_ci#define _ASM_GENERIC__TLB_H 1362306a36Sopenharmony_ci 1462306a36Sopenharmony_ci#include <linux/mmu_notifier.h> 1562306a36Sopenharmony_ci#include <linux/swap.h> 1662306a36Sopenharmony_ci#include <linux/hugetlb_inline.h> 1762306a36Sopenharmony_ci#include <asm/tlbflush.h> 1862306a36Sopenharmony_ci#include <asm/cacheflush.h> 1962306a36Sopenharmony_ci 2062306a36Sopenharmony_ci/* 2162306a36Sopenharmony_ci * Blindly accessing user memory from NMI context can be dangerous 2262306a36Sopenharmony_ci * if we're in the middle of switching the current user task or switching 2362306a36Sopenharmony_ci * the loaded mm. 2462306a36Sopenharmony_ci */ 2562306a36Sopenharmony_ci#ifndef nmi_uaccess_okay 2662306a36Sopenharmony_ci# define nmi_uaccess_okay() true 2762306a36Sopenharmony_ci#endif 2862306a36Sopenharmony_ci 2962306a36Sopenharmony_ci#ifdef CONFIG_MMU 3062306a36Sopenharmony_ci 3162306a36Sopenharmony_ci/* 3262306a36Sopenharmony_ci * Generic MMU-gather implementation. 3362306a36Sopenharmony_ci * 3462306a36Sopenharmony_ci * The mmu_gather data structure is used by the mm code to implement the 3562306a36Sopenharmony_ci * correct and efficient ordering of freeing pages and TLB invalidations. 3662306a36Sopenharmony_ci * 3762306a36Sopenharmony_ci * This correct ordering is: 3862306a36Sopenharmony_ci * 3962306a36Sopenharmony_ci * 1) unhook page 4062306a36Sopenharmony_ci * 2) TLB invalidate page 4162306a36Sopenharmony_ci * 3) free page 4262306a36Sopenharmony_ci * 4362306a36Sopenharmony_ci * That is, we must never free a page before we have ensured there are no live 4462306a36Sopenharmony_ci * translations left to it. Otherwise it might be possible to observe (or 4562306a36Sopenharmony_ci * worse, change) the page content after it has been reused. 4662306a36Sopenharmony_ci * 4762306a36Sopenharmony_ci * The mmu_gather API consists of: 4862306a36Sopenharmony_ci * 4962306a36Sopenharmony_ci * - tlb_gather_mmu() / tlb_gather_mmu_fullmm() / tlb_finish_mmu() 5062306a36Sopenharmony_ci * 5162306a36Sopenharmony_ci * start and finish a mmu_gather 5262306a36Sopenharmony_ci * 5362306a36Sopenharmony_ci * Finish in particular will issue a (final) TLB invalidate and free 5462306a36Sopenharmony_ci * all (remaining) queued pages. 5562306a36Sopenharmony_ci * 5662306a36Sopenharmony_ci * - tlb_start_vma() / tlb_end_vma(); marks the start / end of a VMA 5762306a36Sopenharmony_ci * 5862306a36Sopenharmony_ci * Defaults to flushing at tlb_end_vma() to reset the range; helps when 5962306a36Sopenharmony_ci * there's large holes between the VMAs. 6062306a36Sopenharmony_ci * 6162306a36Sopenharmony_ci * - tlb_remove_table() 6262306a36Sopenharmony_ci * 6362306a36Sopenharmony_ci * tlb_remove_table() is the basic primitive to free page-table directories 6462306a36Sopenharmony_ci * (__p*_free_tlb()). In it's most primitive form it is an alias for 6562306a36Sopenharmony_ci * tlb_remove_page() below, for when page directories are pages and have no 6662306a36Sopenharmony_ci * additional constraints. 6762306a36Sopenharmony_ci * 6862306a36Sopenharmony_ci * See also MMU_GATHER_TABLE_FREE and MMU_GATHER_RCU_TABLE_FREE. 6962306a36Sopenharmony_ci * 7062306a36Sopenharmony_ci * - tlb_remove_page() / __tlb_remove_page() 7162306a36Sopenharmony_ci * - tlb_remove_page_size() / __tlb_remove_page_size() 7262306a36Sopenharmony_ci * 7362306a36Sopenharmony_ci * __tlb_remove_page_size() is the basic primitive that queues a page for 7462306a36Sopenharmony_ci * freeing. __tlb_remove_page() assumes PAGE_SIZE. Both will return a 7562306a36Sopenharmony_ci * boolean indicating if the queue is (now) full and a call to 7662306a36Sopenharmony_ci * tlb_flush_mmu() is required. 7762306a36Sopenharmony_ci * 7862306a36Sopenharmony_ci * tlb_remove_page() and tlb_remove_page_size() imply the call to 7962306a36Sopenharmony_ci * tlb_flush_mmu() when required and has no return value. 8062306a36Sopenharmony_ci * 8162306a36Sopenharmony_ci * - tlb_change_page_size() 8262306a36Sopenharmony_ci * 8362306a36Sopenharmony_ci * call before __tlb_remove_page*() to set the current page-size; implies a 8462306a36Sopenharmony_ci * possible tlb_flush_mmu() call. 8562306a36Sopenharmony_ci * 8662306a36Sopenharmony_ci * - tlb_flush_mmu() / tlb_flush_mmu_tlbonly() 8762306a36Sopenharmony_ci * 8862306a36Sopenharmony_ci * tlb_flush_mmu_tlbonly() - does the TLB invalidate (and resets 8962306a36Sopenharmony_ci * related state, like the range) 9062306a36Sopenharmony_ci * 9162306a36Sopenharmony_ci * tlb_flush_mmu() - in addition to the above TLB invalidate, also frees 9262306a36Sopenharmony_ci * whatever pages are still batched. 9362306a36Sopenharmony_ci * 9462306a36Sopenharmony_ci * - mmu_gather::fullmm 9562306a36Sopenharmony_ci * 9662306a36Sopenharmony_ci * A flag set by tlb_gather_mmu_fullmm() to indicate we're going to free 9762306a36Sopenharmony_ci * the entire mm; this allows a number of optimizations. 9862306a36Sopenharmony_ci * 9962306a36Sopenharmony_ci * - We can ignore tlb_{start,end}_vma(); because we don't 10062306a36Sopenharmony_ci * care about ranges. Everything will be shot down. 10162306a36Sopenharmony_ci * 10262306a36Sopenharmony_ci * - (RISC) architectures that use ASIDs can cycle to a new ASID 10362306a36Sopenharmony_ci * and delay the invalidation until ASID space runs out. 10462306a36Sopenharmony_ci * 10562306a36Sopenharmony_ci * - mmu_gather::need_flush_all 10662306a36Sopenharmony_ci * 10762306a36Sopenharmony_ci * A flag that can be set by the arch code if it wants to force 10862306a36Sopenharmony_ci * flush the entire TLB irrespective of the range. For instance 10962306a36Sopenharmony_ci * x86-PAE needs this when changing top-level entries. 11062306a36Sopenharmony_ci * 11162306a36Sopenharmony_ci * And allows the architecture to provide and implement tlb_flush(): 11262306a36Sopenharmony_ci * 11362306a36Sopenharmony_ci * tlb_flush() may, in addition to the above mentioned mmu_gather fields, make 11462306a36Sopenharmony_ci * use of: 11562306a36Sopenharmony_ci * 11662306a36Sopenharmony_ci * - mmu_gather::start / mmu_gather::end 11762306a36Sopenharmony_ci * 11862306a36Sopenharmony_ci * which provides the range that needs to be flushed to cover the pages to 11962306a36Sopenharmony_ci * be freed. 12062306a36Sopenharmony_ci * 12162306a36Sopenharmony_ci * - mmu_gather::freed_tables 12262306a36Sopenharmony_ci * 12362306a36Sopenharmony_ci * set when we freed page table pages 12462306a36Sopenharmony_ci * 12562306a36Sopenharmony_ci * - tlb_get_unmap_shift() / tlb_get_unmap_size() 12662306a36Sopenharmony_ci * 12762306a36Sopenharmony_ci * returns the smallest TLB entry size unmapped in this range. 12862306a36Sopenharmony_ci * 12962306a36Sopenharmony_ci * If an architecture does not provide tlb_flush() a default implementation 13062306a36Sopenharmony_ci * based on flush_tlb_range() will be used, unless MMU_GATHER_NO_RANGE is 13162306a36Sopenharmony_ci * specified, in which case we'll default to flush_tlb_mm(). 13262306a36Sopenharmony_ci * 13362306a36Sopenharmony_ci * Additionally there are a few opt-in features: 13462306a36Sopenharmony_ci * 13562306a36Sopenharmony_ci * MMU_GATHER_PAGE_SIZE 13662306a36Sopenharmony_ci * 13762306a36Sopenharmony_ci * This ensures we call tlb_flush() every time tlb_change_page_size() actually 13862306a36Sopenharmony_ci * changes the size and provides mmu_gather::page_size to tlb_flush(). 13962306a36Sopenharmony_ci * 14062306a36Sopenharmony_ci * This might be useful if your architecture has size specific TLB 14162306a36Sopenharmony_ci * invalidation instructions. 14262306a36Sopenharmony_ci * 14362306a36Sopenharmony_ci * MMU_GATHER_TABLE_FREE 14462306a36Sopenharmony_ci * 14562306a36Sopenharmony_ci * This provides tlb_remove_table(), to be used instead of tlb_remove_page() 14662306a36Sopenharmony_ci * for page directores (__p*_free_tlb()). 14762306a36Sopenharmony_ci * 14862306a36Sopenharmony_ci * Useful if your architecture has non-page page directories. 14962306a36Sopenharmony_ci * 15062306a36Sopenharmony_ci * When used, an architecture is expected to provide __tlb_remove_table() 15162306a36Sopenharmony_ci * which does the actual freeing of these pages. 15262306a36Sopenharmony_ci * 15362306a36Sopenharmony_ci * MMU_GATHER_RCU_TABLE_FREE 15462306a36Sopenharmony_ci * 15562306a36Sopenharmony_ci * Like MMU_GATHER_TABLE_FREE, and adds semi-RCU semantics to the free (see 15662306a36Sopenharmony_ci * comment below). 15762306a36Sopenharmony_ci * 15862306a36Sopenharmony_ci * Useful if your architecture doesn't use IPIs for remote TLB invalidates 15962306a36Sopenharmony_ci * and therefore doesn't naturally serialize with software page-table walkers. 16062306a36Sopenharmony_ci * 16162306a36Sopenharmony_ci * MMU_GATHER_NO_FLUSH_CACHE 16262306a36Sopenharmony_ci * 16362306a36Sopenharmony_ci * Indicates the architecture has flush_cache_range() but it needs *NOT* be called 16462306a36Sopenharmony_ci * before unmapping a VMA. 16562306a36Sopenharmony_ci * 16662306a36Sopenharmony_ci * NOTE: strictly speaking we shouldn't have this knob and instead rely on 16762306a36Sopenharmony_ci * flush_cache_range() being a NOP, except Sparc64 seems to be 16862306a36Sopenharmony_ci * different here. 16962306a36Sopenharmony_ci * 17062306a36Sopenharmony_ci * MMU_GATHER_MERGE_VMAS 17162306a36Sopenharmony_ci * 17262306a36Sopenharmony_ci * Indicates the architecture wants to merge ranges over VMAs; typical when 17362306a36Sopenharmony_ci * multiple range invalidates are more expensive than a full invalidate. 17462306a36Sopenharmony_ci * 17562306a36Sopenharmony_ci * MMU_GATHER_NO_RANGE 17662306a36Sopenharmony_ci * 17762306a36Sopenharmony_ci * Use this if your architecture lacks an efficient flush_tlb_range(). This 17862306a36Sopenharmony_ci * option implies MMU_GATHER_MERGE_VMAS above. 17962306a36Sopenharmony_ci * 18062306a36Sopenharmony_ci * MMU_GATHER_NO_GATHER 18162306a36Sopenharmony_ci * 18262306a36Sopenharmony_ci * If the option is set the mmu_gather will not track individual pages for 18362306a36Sopenharmony_ci * delayed page free anymore. A platform that enables the option needs to 18462306a36Sopenharmony_ci * provide its own implementation of the __tlb_remove_page_size() function to 18562306a36Sopenharmony_ci * free pages. 18662306a36Sopenharmony_ci * 18762306a36Sopenharmony_ci * This is useful if your architecture already flushes TLB entries in the 18862306a36Sopenharmony_ci * various ptep_get_and_clear() functions. 18962306a36Sopenharmony_ci */ 19062306a36Sopenharmony_ci 19162306a36Sopenharmony_ci#ifdef CONFIG_MMU_GATHER_TABLE_FREE 19262306a36Sopenharmony_ci 19362306a36Sopenharmony_cistruct mmu_table_batch { 19462306a36Sopenharmony_ci#ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE 19562306a36Sopenharmony_ci struct rcu_head rcu; 19662306a36Sopenharmony_ci#endif 19762306a36Sopenharmony_ci unsigned int nr; 19862306a36Sopenharmony_ci void *tables[]; 19962306a36Sopenharmony_ci}; 20062306a36Sopenharmony_ci 20162306a36Sopenharmony_ci#define MAX_TABLE_BATCH \ 20262306a36Sopenharmony_ci ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *)) 20362306a36Sopenharmony_ci 20462306a36Sopenharmony_ciextern void tlb_remove_table(struct mmu_gather *tlb, void *table); 20562306a36Sopenharmony_ci 20662306a36Sopenharmony_ci#else /* !CONFIG_MMU_GATHER_HAVE_TABLE_FREE */ 20762306a36Sopenharmony_ci 20862306a36Sopenharmony_ci/* 20962306a36Sopenharmony_ci * Without MMU_GATHER_TABLE_FREE the architecture is assumed to have page based 21062306a36Sopenharmony_ci * page directories and we can use the normal page batching to free them. 21162306a36Sopenharmony_ci */ 21262306a36Sopenharmony_ci#define tlb_remove_table(tlb, page) tlb_remove_page((tlb), (page)) 21362306a36Sopenharmony_ci 21462306a36Sopenharmony_ci#endif /* CONFIG_MMU_GATHER_TABLE_FREE */ 21562306a36Sopenharmony_ci 21662306a36Sopenharmony_ci#ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE 21762306a36Sopenharmony_ci/* 21862306a36Sopenharmony_ci * This allows an architecture that does not use the linux page-tables for 21962306a36Sopenharmony_ci * hardware to skip the TLBI when freeing page tables. 22062306a36Sopenharmony_ci */ 22162306a36Sopenharmony_ci#ifndef tlb_needs_table_invalidate 22262306a36Sopenharmony_ci#define tlb_needs_table_invalidate() (true) 22362306a36Sopenharmony_ci#endif 22462306a36Sopenharmony_ci 22562306a36Sopenharmony_civoid tlb_remove_table_sync_one(void); 22662306a36Sopenharmony_ci 22762306a36Sopenharmony_ci#else 22862306a36Sopenharmony_ci 22962306a36Sopenharmony_ci#ifdef tlb_needs_table_invalidate 23062306a36Sopenharmony_ci#error tlb_needs_table_invalidate() requires MMU_GATHER_RCU_TABLE_FREE 23162306a36Sopenharmony_ci#endif 23262306a36Sopenharmony_ci 23362306a36Sopenharmony_cistatic inline void tlb_remove_table_sync_one(void) { } 23462306a36Sopenharmony_ci 23562306a36Sopenharmony_ci#endif /* CONFIG_MMU_GATHER_RCU_TABLE_FREE */ 23662306a36Sopenharmony_ci 23762306a36Sopenharmony_ci 23862306a36Sopenharmony_ci#ifndef CONFIG_MMU_GATHER_NO_GATHER 23962306a36Sopenharmony_ci/* 24062306a36Sopenharmony_ci * If we can't allocate a page to make a big batch of page pointers 24162306a36Sopenharmony_ci * to work on, then just handle a few from the on-stack structure. 24262306a36Sopenharmony_ci */ 24362306a36Sopenharmony_ci#define MMU_GATHER_BUNDLE 8 24462306a36Sopenharmony_ci 24562306a36Sopenharmony_cistruct mmu_gather_batch { 24662306a36Sopenharmony_ci struct mmu_gather_batch *next; 24762306a36Sopenharmony_ci unsigned int nr; 24862306a36Sopenharmony_ci unsigned int max; 24962306a36Sopenharmony_ci struct encoded_page *encoded_pages[]; 25062306a36Sopenharmony_ci}; 25162306a36Sopenharmony_ci 25262306a36Sopenharmony_ci#define MAX_GATHER_BATCH \ 25362306a36Sopenharmony_ci ((PAGE_SIZE - sizeof(struct mmu_gather_batch)) / sizeof(void *)) 25462306a36Sopenharmony_ci 25562306a36Sopenharmony_ci/* 25662306a36Sopenharmony_ci * Limit the maximum number of mmu_gather batches to reduce a risk of soft 25762306a36Sopenharmony_ci * lockups for non-preemptible kernels on huge machines when a lot of memory 25862306a36Sopenharmony_ci * is zapped during unmapping. 25962306a36Sopenharmony_ci * 10K pages freed at once should be safe even without a preemption point. 26062306a36Sopenharmony_ci */ 26162306a36Sopenharmony_ci#define MAX_GATHER_BATCH_COUNT (10000UL/MAX_GATHER_BATCH) 26262306a36Sopenharmony_ci 26362306a36Sopenharmony_ciextern bool __tlb_remove_page_size(struct mmu_gather *tlb, 26462306a36Sopenharmony_ci struct encoded_page *page, 26562306a36Sopenharmony_ci int page_size); 26662306a36Sopenharmony_ci 26762306a36Sopenharmony_ci#ifdef CONFIG_SMP 26862306a36Sopenharmony_ci/* 26962306a36Sopenharmony_ci * This both sets 'delayed_rmap', and returns true. It would be an inline 27062306a36Sopenharmony_ci * function, except we define it before the 'struct mmu_gather'. 27162306a36Sopenharmony_ci */ 27262306a36Sopenharmony_ci#define tlb_delay_rmap(tlb) (((tlb)->delayed_rmap = 1), true) 27362306a36Sopenharmony_ciextern void tlb_flush_rmaps(struct mmu_gather *tlb, struct vm_area_struct *vma); 27462306a36Sopenharmony_ci#endif 27562306a36Sopenharmony_ci 27662306a36Sopenharmony_ci#endif 27762306a36Sopenharmony_ci 27862306a36Sopenharmony_ci/* 27962306a36Sopenharmony_ci * We have a no-op version of the rmap removal that doesn't 28062306a36Sopenharmony_ci * delay anything. That is used on S390, which flushes remote 28162306a36Sopenharmony_ci * TLBs synchronously, and on UP, which doesn't have any 28262306a36Sopenharmony_ci * remote TLBs to flush and is not preemptible due to this 28362306a36Sopenharmony_ci * all happening under the page table lock. 28462306a36Sopenharmony_ci */ 28562306a36Sopenharmony_ci#ifndef tlb_delay_rmap 28662306a36Sopenharmony_ci#define tlb_delay_rmap(tlb) (false) 28762306a36Sopenharmony_cistatic inline void tlb_flush_rmaps(struct mmu_gather *tlb, struct vm_area_struct *vma) { } 28862306a36Sopenharmony_ci#endif 28962306a36Sopenharmony_ci 29062306a36Sopenharmony_ci/* 29162306a36Sopenharmony_ci * struct mmu_gather is an opaque type used by the mm code for passing around 29262306a36Sopenharmony_ci * any data needed by arch specific code for tlb_remove_page. 29362306a36Sopenharmony_ci */ 29462306a36Sopenharmony_cistruct mmu_gather { 29562306a36Sopenharmony_ci struct mm_struct *mm; 29662306a36Sopenharmony_ci 29762306a36Sopenharmony_ci#ifdef CONFIG_MMU_GATHER_TABLE_FREE 29862306a36Sopenharmony_ci struct mmu_table_batch *batch; 29962306a36Sopenharmony_ci#endif 30062306a36Sopenharmony_ci 30162306a36Sopenharmony_ci unsigned long start; 30262306a36Sopenharmony_ci unsigned long end; 30362306a36Sopenharmony_ci /* 30462306a36Sopenharmony_ci * we are in the middle of an operation to clear 30562306a36Sopenharmony_ci * a full mm and can make some optimizations 30662306a36Sopenharmony_ci */ 30762306a36Sopenharmony_ci unsigned int fullmm : 1; 30862306a36Sopenharmony_ci 30962306a36Sopenharmony_ci /* 31062306a36Sopenharmony_ci * we have performed an operation which 31162306a36Sopenharmony_ci * requires a complete flush of the tlb 31262306a36Sopenharmony_ci */ 31362306a36Sopenharmony_ci unsigned int need_flush_all : 1; 31462306a36Sopenharmony_ci 31562306a36Sopenharmony_ci /* 31662306a36Sopenharmony_ci * we have removed page directories 31762306a36Sopenharmony_ci */ 31862306a36Sopenharmony_ci unsigned int freed_tables : 1; 31962306a36Sopenharmony_ci 32062306a36Sopenharmony_ci /* 32162306a36Sopenharmony_ci * Do we have pending delayed rmap removals? 32262306a36Sopenharmony_ci */ 32362306a36Sopenharmony_ci unsigned int delayed_rmap : 1; 32462306a36Sopenharmony_ci 32562306a36Sopenharmony_ci /* 32662306a36Sopenharmony_ci * at which levels have we cleared entries? 32762306a36Sopenharmony_ci */ 32862306a36Sopenharmony_ci unsigned int cleared_ptes : 1; 32962306a36Sopenharmony_ci unsigned int cleared_pmds : 1; 33062306a36Sopenharmony_ci unsigned int cleared_puds : 1; 33162306a36Sopenharmony_ci unsigned int cleared_p4ds : 1; 33262306a36Sopenharmony_ci 33362306a36Sopenharmony_ci /* 33462306a36Sopenharmony_ci * tracks VM_EXEC | VM_HUGETLB in tlb_start_vma 33562306a36Sopenharmony_ci */ 33662306a36Sopenharmony_ci unsigned int vma_exec : 1; 33762306a36Sopenharmony_ci unsigned int vma_huge : 1; 33862306a36Sopenharmony_ci unsigned int vma_pfn : 1; 33962306a36Sopenharmony_ci 34062306a36Sopenharmony_ci unsigned int batch_count; 34162306a36Sopenharmony_ci 34262306a36Sopenharmony_ci#ifndef CONFIG_MMU_GATHER_NO_GATHER 34362306a36Sopenharmony_ci struct mmu_gather_batch *active; 34462306a36Sopenharmony_ci struct mmu_gather_batch local; 34562306a36Sopenharmony_ci struct page *__pages[MMU_GATHER_BUNDLE]; 34662306a36Sopenharmony_ci 34762306a36Sopenharmony_ci#ifdef CONFIG_MMU_GATHER_PAGE_SIZE 34862306a36Sopenharmony_ci unsigned int page_size; 34962306a36Sopenharmony_ci#endif 35062306a36Sopenharmony_ci#endif 35162306a36Sopenharmony_ci}; 35262306a36Sopenharmony_ci 35362306a36Sopenharmony_civoid tlb_flush_mmu(struct mmu_gather *tlb); 35462306a36Sopenharmony_ci 35562306a36Sopenharmony_cistatic inline void __tlb_adjust_range(struct mmu_gather *tlb, 35662306a36Sopenharmony_ci unsigned long address, 35762306a36Sopenharmony_ci unsigned int range_size) 35862306a36Sopenharmony_ci{ 35962306a36Sopenharmony_ci tlb->start = min(tlb->start, address); 36062306a36Sopenharmony_ci tlb->end = max(tlb->end, address + range_size); 36162306a36Sopenharmony_ci} 36262306a36Sopenharmony_ci 36362306a36Sopenharmony_cistatic inline void __tlb_reset_range(struct mmu_gather *tlb) 36462306a36Sopenharmony_ci{ 36562306a36Sopenharmony_ci if (tlb->fullmm) { 36662306a36Sopenharmony_ci tlb->start = tlb->end = ~0; 36762306a36Sopenharmony_ci } else { 36862306a36Sopenharmony_ci tlb->start = TASK_SIZE; 36962306a36Sopenharmony_ci tlb->end = 0; 37062306a36Sopenharmony_ci } 37162306a36Sopenharmony_ci tlb->freed_tables = 0; 37262306a36Sopenharmony_ci tlb->cleared_ptes = 0; 37362306a36Sopenharmony_ci tlb->cleared_pmds = 0; 37462306a36Sopenharmony_ci tlb->cleared_puds = 0; 37562306a36Sopenharmony_ci tlb->cleared_p4ds = 0; 37662306a36Sopenharmony_ci /* 37762306a36Sopenharmony_ci * Do not reset mmu_gather::vma_* fields here, we do not 37862306a36Sopenharmony_ci * call into tlb_start_vma() again to set them if there is an 37962306a36Sopenharmony_ci * intermediate flush. 38062306a36Sopenharmony_ci */ 38162306a36Sopenharmony_ci} 38262306a36Sopenharmony_ci 38362306a36Sopenharmony_ci#ifdef CONFIG_MMU_GATHER_NO_RANGE 38462306a36Sopenharmony_ci 38562306a36Sopenharmony_ci#if defined(tlb_flush) 38662306a36Sopenharmony_ci#error MMU_GATHER_NO_RANGE relies on default tlb_flush() 38762306a36Sopenharmony_ci#endif 38862306a36Sopenharmony_ci 38962306a36Sopenharmony_ci/* 39062306a36Sopenharmony_ci * When an architecture does not have efficient means of range flushing TLBs 39162306a36Sopenharmony_ci * there is no point in doing intermediate flushes on tlb_end_vma() to keep the 39262306a36Sopenharmony_ci * range small. We equally don't have to worry about page granularity or other 39362306a36Sopenharmony_ci * things. 39462306a36Sopenharmony_ci * 39562306a36Sopenharmony_ci * All we need to do is issue a full flush for any !0 range. 39662306a36Sopenharmony_ci */ 39762306a36Sopenharmony_cistatic inline void tlb_flush(struct mmu_gather *tlb) 39862306a36Sopenharmony_ci{ 39962306a36Sopenharmony_ci if (tlb->end) 40062306a36Sopenharmony_ci flush_tlb_mm(tlb->mm); 40162306a36Sopenharmony_ci} 40262306a36Sopenharmony_ci 40362306a36Sopenharmony_ci#else /* CONFIG_MMU_GATHER_NO_RANGE */ 40462306a36Sopenharmony_ci 40562306a36Sopenharmony_ci#ifndef tlb_flush 40662306a36Sopenharmony_ci/* 40762306a36Sopenharmony_ci * When an architecture does not provide its own tlb_flush() implementation 40862306a36Sopenharmony_ci * but does have a reasonably efficient flush_vma_range() implementation 40962306a36Sopenharmony_ci * use that. 41062306a36Sopenharmony_ci */ 41162306a36Sopenharmony_cistatic inline void tlb_flush(struct mmu_gather *tlb) 41262306a36Sopenharmony_ci{ 41362306a36Sopenharmony_ci if (tlb->fullmm || tlb->need_flush_all) { 41462306a36Sopenharmony_ci flush_tlb_mm(tlb->mm); 41562306a36Sopenharmony_ci } else if (tlb->end) { 41662306a36Sopenharmony_ci struct vm_area_struct vma = { 41762306a36Sopenharmony_ci .vm_mm = tlb->mm, 41862306a36Sopenharmony_ci .vm_flags = (tlb->vma_exec ? VM_EXEC : 0) | 41962306a36Sopenharmony_ci (tlb->vma_huge ? VM_HUGETLB : 0), 42062306a36Sopenharmony_ci }; 42162306a36Sopenharmony_ci 42262306a36Sopenharmony_ci flush_tlb_range(&vma, tlb->start, tlb->end); 42362306a36Sopenharmony_ci } 42462306a36Sopenharmony_ci} 42562306a36Sopenharmony_ci#endif 42662306a36Sopenharmony_ci 42762306a36Sopenharmony_ci#endif /* CONFIG_MMU_GATHER_NO_RANGE */ 42862306a36Sopenharmony_ci 42962306a36Sopenharmony_cistatic inline void 43062306a36Sopenharmony_citlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) 43162306a36Sopenharmony_ci{ 43262306a36Sopenharmony_ci /* 43362306a36Sopenharmony_ci * flush_tlb_range() implementations that look at VM_HUGETLB (tile, 43462306a36Sopenharmony_ci * mips-4k) flush only large pages. 43562306a36Sopenharmony_ci * 43662306a36Sopenharmony_ci * flush_tlb_range() implementations that flush I-TLB also flush D-TLB 43762306a36Sopenharmony_ci * (tile, xtensa, arm), so it's ok to just add VM_EXEC to an existing 43862306a36Sopenharmony_ci * range. 43962306a36Sopenharmony_ci * 44062306a36Sopenharmony_ci * We rely on tlb_end_vma() to issue a flush, such that when we reset 44162306a36Sopenharmony_ci * these values the batch is empty. 44262306a36Sopenharmony_ci */ 44362306a36Sopenharmony_ci tlb->vma_huge = is_vm_hugetlb_page(vma); 44462306a36Sopenharmony_ci tlb->vma_exec = !!(vma->vm_flags & VM_EXEC); 44562306a36Sopenharmony_ci tlb->vma_pfn = !!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)); 44662306a36Sopenharmony_ci} 44762306a36Sopenharmony_ci 44862306a36Sopenharmony_cistatic inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) 44962306a36Sopenharmony_ci{ 45062306a36Sopenharmony_ci /* 45162306a36Sopenharmony_ci * Anything calling __tlb_adjust_range() also sets at least one of 45262306a36Sopenharmony_ci * these bits. 45362306a36Sopenharmony_ci */ 45462306a36Sopenharmony_ci if (!(tlb->freed_tables || tlb->cleared_ptes || tlb->cleared_pmds || 45562306a36Sopenharmony_ci tlb->cleared_puds || tlb->cleared_p4ds)) 45662306a36Sopenharmony_ci return; 45762306a36Sopenharmony_ci 45862306a36Sopenharmony_ci tlb_flush(tlb); 45962306a36Sopenharmony_ci __tlb_reset_range(tlb); 46062306a36Sopenharmony_ci} 46162306a36Sopenharmony_ci 46262306a36Sopenharmony_cistatic inline void tlb_remove_page_size(struct mmu_gather *tlb, 46362306a36Sopenharmony_ci struct page *page, int page_size) 46462306a36Sopenharmony_ci{ 46562306a36Sopenharmony_ci if (__tlb_remove_page_size(tlb, encode_page(page, 0), page_size)) 46662306a36Sopenharmony_ci tlb_flush_mmu(tlb); 46762306a36Sopenharmony_ci} 46862306a36Sopenharmony_ci 46962306a36Sopenharmony_cistatic __always_inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page, unsigned int flags) 47062306a36Sopenharmony_ci{ 47162306a36Sopenharmony_ci return __tlb_remove_page_size(tlb, encode_page(page, flags), PAGE_SIZE); 47262306a36Sopenharmony_ci} 47362306a36Sopenharmony_ci 47462306a36Sopenharmony_ci/* tlb_remove_page 47562306a36Sopenharmony_ci * Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when 47662306a36Sopenharmony_ci * required. 47762306a36Sopenharmony_ci */ 47862306a36Sopenharmony_cistatic inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) 47962306a36Sopenharmony_ci{ 48062306a36Sopenharmony_ci return tlb_remove_page_size(tlb, page, PAGE_SIZE); 48162306a36Sopenharmony_ci} 48262306a36Sopenharmony_ci 48362306a36Sopenharmony_cistatic inline void tlb_remove_ptdesc(struct mmu_gather *tlb, void *pt) 48462306a36Sopenharmony_ci{ 48562306a36Sopenharmony_ci tlb_remove_table(tlb, pt); 48662306a36Sopenharmony_ci} 48762306a36Sopenharmony_ci 48862306a36Sopenharmony_ci/* Like tlb_remove_ptdesc, but for page-like page directories. */ 48962306a36Sopenharmony_cistatic inline void tlb_remove_page_ptdesc(struct mmu_gather *tlb, struct ptdesc *pt) 49062306a36Sopenharmony_ci{ 49162306a36Sopenharmony_ci tlb_remove_page(tlb, ptdesc_page(pt)); 49262306a36Sopenharmony_ci} 49362306a36Sopenharmony_ci 49462306a36Sopenharmony_cistatic inline void tlb_change_page_size(struct mmu_gather *tlb, 49562306a36Sopenharmony_ci unsigned int page_size) 49662306a36Sopenharmony_ci{ 49762306a36Sopenharmony_ci#ifdef CONFIG_MMU_GATHER_PAGE_SIZE 49862306a36Sopenharmony_ci if (tlb->page_size && tlb->page_size != page_size) { 49962306a36Sopenharmony_ci if (!tlb->fullmm && !tlb->need_flush_all) 50062306a36Sopenharmony_ci tlb_flush_mmu(tlb); 50162306a36Sopenharmony_ci } 50262306a36Sopenharmony_ci 50362306a36Sopenharmony_ci tlb->page_size = page_size; 50462306a36Sopenharmony_ci#endif 50562306a36Sopenharmony_ci} 50662306a36Sopenharmony_ci 50762306a36Sopenharmony_cistatic inline unsigned long tlb_get_unmap_shift(struct mmu_gather *tlb) 50862306a36Sopenharmony_ci{ 50962306a36Sopenharmony_ci if (tlb->cleared_ptes) 51062306a36Sopenharmony_ci return PAGE_SHIFT; 51162306a36Sopenharmony_ci if (tlb->cleared_pmds) 51262306a36Sopenharmony_ci return PMD_SHIFT; 51362306a36Sopenharmony_ci if (tlb->cleared_puds) 51462306a36Sopenharmony_ci return PUD_SHIFT; 51562306a36Sopenharmony_ci if (tlb->cleared_p4ds) 51662306a36Sopenharmony_ci return P4D_SHIFT; 51762306a36Sopenharmony_ci 51862306a36Sopenharmony_ci return PAGE_SHIFT; 51962306a36Sopenharmony_ci} 52062306a36Sopenharmony_ci 52162306a36Sopenharmony_cistatic inline unsigned long tlb_get_unmap_size(struct mmu_gather *tlb) 52262306a36Sopenharmony_ci{ 52362306a36Sopenharmony_ci return 1UL << tlb_get_unmap_shift(tlb); 52462306a36Sopenharmony_ci} 52562306a36Sopenharmony_ci 52662306a36Sopenharmony_ci/* 52762306a36Sopenharmony_ci * In the case of tlb vma handling, we can optimise these away in the 52862306a36Sopenharmony_ci * case where we're doing a full MM flush. When we're doing a munmap, 52962306a36Sopenharmony_ci * the vmas are adjusted to only cover the region to be torn down. 53062306a36Sopenharmony_ci */ 53162306a36Sopenharmony_cistatic inline void tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) 53262306a36Sopenharmony_ci{ 53362306a36Sopenharmony_ci if (tlb->fullmm) 53462306a36Sopenharmony_ci return; 53562306a36Sopenharmony_ci 53662306a36Sopenharmony_ci tlb_update_vma_flags(tlb, vma); 53762306a36Sopenharmony_ci#ifndef CONFIG_MMU_GATHER_NO_FLUSH_CACHE 53862306a36Sopenharmony_ci flush_cache_range(vma, vma->vm_start, vma->vm_end); 53962306a36Sopenharmony_ci#endif 54062306a36Sopenharmony_ci} 54162306a36Sopenharmony_ci 54262306a36Sopenharmony_cistatic inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) 54362306a36Sopenharmony_ci{ 54462306a36Sopenharmony_ci if (tlb->fullmm) 54562306a36Sopenharmony_ci return; 54662306a36Sopenharmony_ci 54762306a36Sopenharmony_ci /* 54862306a36Sopenharmony_ci * VM_PFNMAP is more fragile because the core mm will not track the 54962306a36Sopenharmony_ci * page mapcount -- there might not be page-frames for these PFNs after 55062306a36Sopenharmony_ci * all. Force flush TLBs for such ranges to avoid munmap() vs 55162306a36Sopenharmony_ci * unmap_mapping_range() races. 55262306a36Sopenharmony_ci */ 55362306a36Sopenharmony_ci if (tlb->vma_pfn || !IS_ENABLED(CONFIG_MMU_GATHER_MERGE_VMAS)) { 55462306a36Sopenharmony_ci /* 55562306a36Sopenharmony_ci * Do a TLB flush and reset the range at VMA boundaries; this avoids 55662306a36Sopenharmony_ci * the ranges growing with the unused space between consecutive VMAs. 55762306a36Sopenharmony_ci */ 55862306a36Sopenharmony_ci tlb_flush_mmu_tlbonly(tlb); 55962306a36Sopenharmony_ci } 56062306a36Sopenharmony_ci} 56162306a36Sopenharmony_ci 56262306a36Sopenharmony_ci/* 56362306a36Sopenharmony_ci * tlb_flush_{pte|pmd|pud|p4d}_range() adjust the tlb->start and tlb->end, 56462306a36Sopenharmony_ci * and set corresponding cleared_*. 56562306a36Sopenharmony_ci */ 56662306a36Sopenharmony_cistatic inline void tlb_flush_pte_range(struct mmu_gather *tlb, 56762306a36Sopenharmony_ci unsigned long address, unsigned long size) 56862306a36Sopenharmony_ci{ 56962306a36Sopenharmony_ci __tlb_adjust_range(tlb, address, size); 57062306a36Sopenharmony_ci tlb->cleared_ptes = 1; 57162306a36Sopenharmony_ci} 57262306a36Sopenharmony_ci 57362306a36Sopenharmony_cistatic inline void tlb_flush_pmd_range(struct mmu_gather *tlb, 57462306a36Sopenharmony_ci unsigned long address, unsigned long size) 57562306a36Sopenharmony_ci{ 57662306a36Sopenharmony_ci __tlb_adjust_range(tlb, address, size); 57762306a36Sopenharmony_ci tlb->cleared_pmds = 1; 57862306a36Sopenharmony_ci} 57962306a36Sopenharmony_ci 58062306a36Sopenharmony_cistatic inline void tlb_flush_pud_range(struct mmu_gather *tlb, 58162306a36Sopenharmony_ci unsigned long address, unsigned long size) 58262306a36Sopenharmony_ci{ 58362306a36Sopenharmony_ci __tlb_adjust_range(tlb, address, size); 58462306a36Sopenharmony_ci tlb->cleared_puds = 1; 58562306a36Sopenharmony_ci} 58662306a36Sopenharmony_ci 58762306a36Sopenharmony_cistatic inline void tlb_flush_p4d_range(struct mmu_gather *tlb, 58862306a36Sopenharmony_ci unsigned long address, unsigned long size) 58962306a36Sopenharmony_ci{ 59062306a36Sopenharmony_ci __tlb_adjust_range(tlb, address, size); 59162306a36Sopenharmony_ci tlb->cleared_p4ds = 1; 59262306a36Sopenharmony_ci} 59362306a36Sopenharmony_ci 59462306a36Sopenharmony_ci#ifndef __tlb_remove_tlb_entry 59562306a36Sopenharmony_ci#define __tlb_remove_tlb_entry(tlb, ptep, address) do { } while (0) 59662306a36Sopenharmony_ci#endif 59762306a36Sopenharmony_ci 59862306a36Sopenharmony_ci/** 59962306a36Sopenharmony_ci * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation. 60062306a36Sopenharmony_ci * 60162306a36Sopenharmony_ci * Record the fact that pte's were really unmapped by updating the range, 60262306a36Sopenharmony_ci * so we can later optimise away the tlb invalidate. This helps when 60362306a36Sopenharmony_ci * userspace is unmapping already-unmapped pages, which happens quite a lot. 60462306a36Sopenharmony_ci */ 60562306a36Sopenharmony_ci#define tlb_remove_tlb_entry(tlb, ptep, address) \ 60662306a36Sopenharmony_ci do { \ 60762306a36Sopenharmony_ci tlb_flush_pte_range(tlb, address, PAGE_SIZE); \ 60862306a36Sopenharmony_ci __tlb_remove_tlb_entry(tlb, ptep, address); \ 60962306a36Sopenharmony_ci } while (0) 61062306a36Sopenharmony_ci 61162306a36Sopenharmony_ci#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \ 61262306a36Sopenharmony_ci do { \ 61362306a36Sopenharmony_ci unsigned long _sz = huge_page_size(h); \ 61462306a36Sopenharmony_ci if (_sz >= P4D_SIZE) \ 61562306a36Sopenharmony_ci tlb_flush_p4d_range(tlb, address, _sz); \ 61662306a36Sopenharmony_ci else if (_sz >= PUD_SIZE) \ 61762306a36Sopenharmony_ci tlb_flush_pud_range(tlb, address, _sz); \ 61862306a36Sopenharmony_ci else if (_sz >= PMD_SIZE) \ 61962306a36Sopenharmony_ci tlb_flush_pmd_range(tlb, address, _sz); \ 62062306a36Sopenharmony_ci else \ 62162306a36Sopenharmony_ci tlb_flush_pte_range(tlb, address, _sz); \ 62262306a36Sopenharmony_ci __tlb_remove_tlb_entry(tlb, ptep, address); \ 62362306a36Sopenharmony_ci } while (0) 62462306a36Sopenharmony_ci 62562306a36Sopenharmony_ci/** 62662306a36Sopenharmony_ci * tlb_remove_pmd_tlb_entry - remember a pmd mapping for later tlb invalidation 62762306a36Sopenharmony_ci * This is a nop so far, because only x86 needs it. 62862306a36Sopenharmony_ci */ 62962306a36Sopenharmony_ci#ifndef __tlb_remove_pmd_tlb_entry 63062306a36Sopenharmony_ci#define __tlb_remove_pmd_tlb_entry(tlb, pmdp, address) do {} while (0) 63162306a36Sopenharmony_ci#endif 63262306a36Sopenharmony_ci 63362306a36Sopenharmony_ci#define tlb_remove_pmd_tlb_entry(tlb, pmdp, address) \ 63462306a36Sopenharmony_ci do { \ 63562306a36Sopenharmony_ci tlb_flush_pmd_range(tlb, address, HPAGE_PMD_SIZE); \ 63662306a36Sopenharmony_ci __tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \ 63762306a36Sopenharmony_ci } while (0) 63862306a36Sopenharmony_ci 63962306a36Sopenharmony_ci/** 64062306a36Sopenharmony_ci * tlb_remove_pud_tlb_entry - remember a pud mapping for later tlb 64162306a36Sopenharmony_ci * invalidation. This is a nop so far, because only x86 needs it. 64262306a36Sopenharmony_ci */ 64362306a36Sopenharmony_ci#ifndef __tlb_remove_pud_tlb_entry 64462306a36Sopenharmony_ci#define __tlb_remove_pud_tlb_entry(tlb, pudp, address) do {} while (0) 64562306a36Sopenharmony_ci#endif 64662306a36Sopenharmony_ci 64762306a36Sopenharmony_ci#define tlb_remove_pud_tlb_entry(tlb, pudp, address) \ 64862306a36Sopenharmony_ci do { \ 64962306a36Sopenharmony_ci tlb_flush_pud_range(tlb, address, HPAGE_PUD_SIZE); \ 65062306a36Sopenharmony_ci __tlb_remove_pud_tlb_entry(tlb, pudp, address); \ 65162306a36Sopenharmony_ci } while (0) 65262306a36Sopenharmony_ci 65362306a36Sopenharmony_ci/* 65462306a36Sopenharmony_ci * For things like page tables caches (ie caching addresses "inside" the 65562306a36Sopenharmony_ci * page tables, like x86 does), for legacy reasons, flushing an 65662306a36Sopenharmony_ci * individual page had better flush the page table caches behind it. This 65762306a36Sopenharmony_ci * is definitely how x86 works, for example. And if you have an 65862306a36Sopenharmony_ci * architected non-legacy page table cache (which I'm not aware of 65962306a36Sopenharmony_ci * anybody actually doing), you're going to have some architecturally 66062306a36Sopenharmony_ci * explicit flushing for that, likely *separate* from a regular TLB entry 66162306a36Sopenharmony_ci * flush, and thus you'd need more than just some range expansion.. 66262306a36Sopenharmony_ci * 66362306a36Sopenharmony_ci * So if we ever find an architecture 66462306a36Sopenharmony_ci * that would want something that odd, I think it is up to that 66562306a36Sopenharmony_ci * architecture to do its own odd thing, not cause pain for others 66662306a36Sopenharmony_ci * http://lkml.kernel.org/r/CA+55aFzBggoXtNXQeng5d_mRoDnaMBE5Y+URs+PHR67nUpMtaw@mail.gmail.com 66762306a36Sopenharmony_ci * 66862306a36Sopenharmony_ci * For now w.r.t page table cache, mark the range_size as PAGE_SIZE 66962306a36Sopenharmony_ci */ 67062306a36Sopenharmony_ci 67162306a36Sopenharmony_ci#ifndef pte_free_tlb 67262306a36Sopenharmony_ci#define pte_free_tlb(tlb, ptep, address) \ 67362306a36Sopenharmony_ci do { \ 67462306a36Sopenharmony_ci tlb_flush_pmd_range(tlb, address, PAGE_SIZE); \ 67562306a36Sopenharmony_ci tlb->freed_tables = 1; \ 67662306a36Sopenharmony_ci __pte_free_tlb(tlb, ptep, address); \ 67762306a36Sopenharmony_ci } while (0) 67862306a36Sopenharmony_ci#endif 67962306a36Sopenharmony_ci 68062306a36Sopenharmony_ci#ifndef pmd_free_tlb 68162306a36Sopenharmony_ci#define pmd_free_tlb(tlb, pmdp, address) \ 68262306a36Sopenharmony_ci do { \ 68362306a36Sopenharmony_ci tlb_flush_pud_range(tlb, address, PAGE_SIZE); \ 68462306a36Sopenharmony_ci tlb->freed_tables = 1; \ 68562306a36Sopenharmony_ci __pmd_free_tlb(tlb, pmdp, address); \ 68662306a36Sopenharmony_ci } while (0) 68762306a36Sopenharmony_ci#endif 68862306a36Sopenharmony_ci 68962306a36Sopenharmony_ci#ifndef pud_free_tlb 69062306a36Sopenharmony_ci#define pud_free_tlb(tlb, pudp, address) \ 69162306a36Sopenharmony_ci do { \ 69262306a36Sopenharmony_ci tlb_flush_p4d_range(tlb, address, PAGE_SIZE); \ 69362306a36Sopenharmony_ci tlb->freed_tables = 1; \ 69462306a36Sopenharmony_ci __pud_free_tlb(tlb, pudp, address); \ 69562306a36Sopenharmony_ci } while (0) 69662306a36Sopenharmony_ci#endif 69762306a36Sopenharmony_ci 69862306a36Sopenharmony_ci#ifndef p4d_free_tlb 69962306a36Sopenharmony_ci#define p4d_free_tlb(tlb, pudp, address) \ 70062306a36Sopenharmony_ci do { \ 70162306a36Sopenharmony_ci __tlb_adjust_range(tlb, address, PAGE_SIZE); \ 70262306a36Sopenharmony_ci tlb->freed_tables = 1; \ 70362306a36Sopenharmony_ci __p4d_free_tlb(tlb, pudp, address); \ 70462306a36Sopenharmony_ci } while (0) 70562306a36Sopenharmony_ci#endif 70662306a36Sopenharmony_ci 70762306a36Sopenharmony_ci#ifndef pte_needs_flush 70862306a36Sopenharmony_cistatic inline bool pte_needs_flush(pte_t oldpte, pte_t newpte) 70962306a36Sopenharmony_ci{ 71062306a36Sopenharmony_ci return true; 71162306a36Sopenharmony_ci} 71262306a36Sopenharmony_ci#endif 71362306a36Sopenharmony_ci 71462306a36Sopenharmony_ci#ifndef huge_pmd_needs_flush 71562306a36Sopenharmony_cistatic inline bool huge_pmd_needs_flush(pmd_t oldpmd, pmd_t newpmd) 71662306a36Sopenharmony_ci{ 71762306a36Sopenharmony_ci return true; 71862306a36Sopenharmony_ci} 71962306a36Sopenharmony_ci#endif 72062306a36Sopenharmony_ci 72162306a36Sopenharmony_ci#endif /* CONFIG_MMU */ 72262306a36Sopenharmony_ci 72362306a36Sopenharmony_ci#endif /* _ASM_GENERIC__TLB_H */ 724