162306a36Sopenharmony_ci#include <linux/gfp.h>
262306a36Sopenharmony_ci#include <linux/highmem.h>
362306a36Sopenharmony_ci#include <linux/kernel.h>
462306a36Sopenharmony_ci#include <linux/mmdebug.h>
562306a36Sopenharmony_ci#include <linux/mm_types.h>
662306a36Sopenharmony_ci#include <linux/mm_inline.h>
762306a36Sopenharmony_ci#include <linux/pagemap.h>
862306a36Sopenharmony_ci#include <linux/rcupdate.h>
962306a36Sopenharmony_ci#include <linux/smp.h>
1062306a36Sopenharmony_ci#include <linux/swap.h>
1162306a36Sopenharmony_ci#include <linux/rmap.h>
1262306a36Sopenharmony_ci
1362306a36Sopenharmony_ci#include <asm/pgalloc.h>
1462306a36Sopenharmony_ci#include <asm/tlb.h>
1562306a36Sopenharmony_ci
1662306a36Sopenharmony_ci#ifndef CONFIG_MMU_GATHER_NO_GATHER
1762306a36Sopenharmony_ci
1862306a36Sopenharmony_cistatic bool tlb_next_batch(struct mmu_gather *tlb)
1962306a36Sopenharmony_ci{
2062306a36Sopenharmony_ci	struct mmu_gather_batch *batch;
2162306a36Sopenharmony_ci
2262306a36Sopenharmony_ci	/* Limit batching if we have delayed rmaps pending */
2362306a36Sopenharmony_ci	if (tlb->delayed_rmap && tlb->active != &tlb->local)
2462306a36Sopenharmony_ci		return false;
2562306a36Sopenharmony_ci
2662306a36Sopenharmony_ci	batch = tlb->active;
2762306a36Sopenharmony_ci	if (batch->next) {
2862306a36Sopenharmony_ci		tlb->active = batch->next;
2962306a36Sopenharmony_ci		return true;
3062306a36Sopenharmony_ci	}
3162306a36Sopenharmony_ci
3262306a36Sopenharmony_ci	if (tlb->batch_count == MAX_GATHER_BATCH_COUNT)
3362306a36Sopenharmony_ci		return false;
3462306a36Sopenharmony_ci
3562306a36Sopenharmony_ci	batch = (void *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
3662306a36Sopenharmony_ci	if (!batch)
3762306a36Sopenharmony_ci		return false;
3862306a36Sopenharmony_ci
3962306a36Sopenharmony_ci	tlb->batch_count++;
4062306a36Sopenharmony_ci	batch->next = NULL;
4162306a36Sopenharmony_ci	batch->nr   = 0;
4262306a36Sopenharmony_ci	batch->max  = MAX_GATHER_BATCH;
4362306a36Sopenharmony_ci
4462306a36Sopenharmony_ci	tlb->active->next = batch;
4562306a36Sopenharmony_ci	tlb->active = batch;
4662306a36Sopenharmony_ci
4762306a36Sopenharmony_ci	return true;
4862306a36Sopenharmony_ci}
4962306a36Sopenharmony_ci
5062306a36Sopenharmony_ci#ifdef CONFIG_SMP
5162306a36Sopenharmony_cistatic void tlb_flush_rmap_batch(struct mmu_gather_batch *batch, struct vm_area_struct *vma)
5262306a36Sopenharmony_ci{
5362306a36Sopenharmony_ci	for (int i = 0; i < batch->nr; i++) {
5462306a36Sopenharmony_ci		struct encoded_page *enc = batch->encoded_pages[i];
5562306a36Sopenharmony_ci
5662306a36Sopenharmony_ci		if (encoded_page_flags(enc)) {
5762306a36Sopenharmony_ci			struct page *page = encoded_page_ptr(enc);
5862306a36Sopenharmony_ci			page_remove_rmap(page, vma, false);
5962306a36Sopenharmony_ci		}
6062306a36Sopenharmony_ci	}
6162306a36Sopenharmony_ci}
6262306a36Sopenharmony_ci
6362306a36Sopenharmony_ci/**
6462306a36Sopenharmony_ci * tlb_flush_rmaps - do pending rmap removals after we have flushed the TLB
6562306a36Sopenharmony_ci * @tlb: the current mmu_gather
6662306a36Sopenharmony_ci * @vma: The memory area from which the pages are being removed.
6762306a36Sopenharmony_ci *
6862306a36Sopenharmony_ci * Note that because of how tlb_next_batch() above works, we will
6962306a36Sopenharmony_ci * never start multiple new batches with pending delayed rmaps, so
7062306a36Sopenharmony_ci * we only need to walk through the current active batch and the
7162306a36Sopenharmony_ci * original local one.
7262306a36Sopenharmony_ci */
7362306a36Sopenharmony_civoid tlb_flush_rmaps(struct mmu_gather *tlb, struct vm_area_struct *vma)
7462306a36Sopenharmony_ci{
7562306a36Sopenharmony_ci	if (!tlb->delayed_rmap)
7662306a36Sopenharmony_ci		return;
7762306a36Sopenharmony_ci
7862306a36Sopenharmony_ci	tlb_flush_rmap_batch(&tlb->local, vma);
7962306a36Sopenharmony_ci	if (tlb->active != &tlb->local)
8062306a36Sopenharmony_ci		tlb_flush_rmap_batch(tlb->active, vma);
8162306a36Sopenharmony_ci	tlb->delayed_rmap = 0;
8262306a36Sopenharmony_ci}
8362306a36Sopenharmony_ci#endif
8462306a36Sopenharmony_ci
8562306a36Sopenharmony_cistatic void tlb_batch_pages_flush(struct mmu_gather *tlb)
8662306a36Sopenharmony_ci{
8762306a36Sopenharmony_ci	struct mmu_gather_batch *batch;
8862306a36Sopenharmony_ci
8962306a36Sopenharmony_ci	for (batch = &tlb->local; batch && batch->nr; batch = batch->next) {
9062306a36Sopenharmony_ci		struct encoded_page **pages = batch->encoded_pages;
9162306a36Sopenharmony_ci
9262306a36Sopenharmony_ci		do {
9362306a36Sopenharmony_ci			/*
9462306a36Sopenharmony_ci			 * limit free batch count when PAGE_SIZE > 4K
9562306a36Sopenharmony_ci			 */
9662306a36Sopenharmony_ci			unsigned int nr = min(512U, batch->nr);
9762306a36Sopenharmony_ci
9862306a36Sopenharmony_ci			free_pages_and_swap_cache(pages, nr);
9962306a36Sopenharmony_ci			pages += nr;
10062306a36Sopenharmony_ci			batch->nr -= nr;
10162306a36Sopenharmony_ci
10262306a36Sopenharmony_ci			cond_resched();
10362306a36Sopenharmony_ci		} while (batch->nr);
10462306a36Sopenharmony_ci	}
10562306a36Sopenharmony_ci	tlb->active = &tlb->local;
10662306a36Sopenharmony_ci}
10762306a36Sopenharmony_ci
10862306a36Sopenharmony_cistatic void tlb_batch_list_free(struct mmu_gather *tlb)
10962306a36Sopenharmony_ci{
11062306a36Sopenharmony_ci	struct mmu_gather_batch *batch, *next;
11162306a36Sopenharmony_ci
11262306a36Sopenharmony_ci	for (batch = tlb->local.next; batch; batch = next) {
11362306a36Sopenharmony_ci		next = batch->next;
11462306a36Sopenharmony_ci		free_pages((unsigned long)batch, 0);
11562306a36Sopenharmony_ci	}
11662306a36Sopenharmony_ci	tlb->local.next = NULL;
11762306a36Sopenharmony_ci}
11862306a36Sopenharmony_ci
11962306a36Sopenharmony_cibool __tlb_remove_page_size(struct mmu_gather *tlb, struct encoded_page *page, int page_size)
12062306a36Sopenharmony_ci{
12162306a36Sopenharmony_ci	struct mmu_gather_batch *batch;
12262306a36Sopenharmony_ci
12362306a36Sopenharmony_ci	VM_BUG_ON(!tlb->end);
12462306a36Sopenharmony_ci
12562306a36Sopenharmony_ci#ifdef CONFIG_MMU_GATHER_PAGE_SIZE
12662306a36Sopenharmony_ci	VM_WARN_ON(tlb->page_size != page_size);
12762306a36Sopenharmony_ci#endif
12862306a36Sopenharmony_ci
12962306a36Sopenharmony_ci	batch = tlb->active;
13062306a36Sopenharmony_ci	/*
13162306a36Sopenharmony_ci	 * Add the page and check if we are full. If so
13262306a36Sopenharmony_ci	 * force a flush.
13362306a36Sopenharmony_ci	 */
13462306a36Sopenharmony_ci	batch->encoded_pages[batch->nr++] = page;
13562306a36Sopenharmony_ci	if (batch->nr == batch->max) {
13662306a36Sopenharmony_ci		if (!tlb_next_batch(tlb))
13762306a36Sopenharmony_ci			return true;
13862306a36Sopenharmony_ci		batch = tlb->active;
13962306a36Sopenharmony_ci	}
14062306a36Sopenharmony_ci	VM_BUG_ON_PAGE(batch->nr > batch->max, encoded_page_ptr(page));
14162306a36Sopenharmony_ci
14262306a36Sopenharmony_ci	return false;
14362306a36Sopenharmony_ci}
14462306a36Sopenharmony_ci
14562306a36Sopenharmony_ci#endif /* MMU_GATHER_NO_GATHER */
14662306a36Sopenharmony_ci
14762306a36Sopenharmony_ci#ifdef CONFIG_MMU_GATHER_TABLE_FREE
14862306a36Sopenharmony_ci
14962306a36Sopenharmony_cistatic void __tlb_remove_table_free(struct mmu_table_batch *batch)
15062306a36Sopenharmony_ci{
15162306a36Sopenharmony_ci	int i;
15262306a36Sopenharmony_ci
15362306a36Sopenharmony_ci	for (i = 0; i < batch->nr; i++)
15462306a36Sopenharmony_ci		__tlb_remove_table(batch->tables[i]);
15562306a36Sopenharmony_ci
15662306a36Sopenharmony_ci	free_page((unsigned long)batch);
15762306a36Sopenharmony_ci}
15862306a36Sopenharmony_ci
15962306a36Sopenharmony_ci#ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE
16062306a36Sopenharmony_ci
16162306a36Sopenharmony_ci/*
16262306a36Sopenharmony_ci * Semi RCU freeing of the page directories.
16362306a36Sopenharmony_ci *
16462306a36Sopenharmony_ci * This is needed by some architectures to implement software pagetable walkers.
16562306a36Sopenharmony_ci *
16662306a36Sopenharmony_ci * gup_fast() and other software pagetable walkers do a lockless page-table
16762306a36Sopenharmony_ci * walk and therefore needs some synchronization with the freeing of the page
16862306a36Sopenharmony_ci * directories. The chosen means to accomplish that is by disabling IRQs over
16962306a36Sopenharmony_ci * the walk.
17062306a36Sopenharmony_ci *
17162306a36Sopenharmony_ci * Architectures that use IPIs to flush TLBs will then automagically DTRT,
17262306a36Sopenharmony_ci * since we unlink the page, flush TLBs, free the page. Since the disabling of
17362306a36Sopenharmony_ci * IRQs delays the completion of the TLB flush we can never observe an already
17462306a36Sopenharmony_ci * freed page.
17562306a36Sopenharmony_ci *
17662306a36Sopenharmony_ci * Architectures that do not have this (PPC) need to delay the freeing by some
17762306a36Sopenharmony_ci * other means, this is that means.
17862306a36Sopenharmony_ci *
17962306a36Sopenharmony_ci * What we do is batch the freed directory pages (tables) and RCU free them.
18062306a36Sopenharmony_ci * We use the sched RCU variant, as that guarantees that IRQ/preempt disabling
18162306a36Sopenharmony_ci * holds off grace periods.
18262306a36Sopenharmony_ci *
18362306a36Sopenharmony_ci * However, in order to batch these pages we need to allocate storage, this
18462306a36Sopenharmony_ci * allocation is deep inside the MM code and can thus easily fail on memory
18562306a36Sopenharmony_ci * pressure. To guarantee progress we fall back to single table freeing, see
18662306a36Sopenharmony_ci * the implementation of tlb_remove_table_one().
18762306a36Sopenharmony_ci *
18862306a36Sopenharmony_ci */
18962306a36Sopenharmony_ci
19062306a36Sopenharmony_cistatic void tlb_remove_table_smp_sync(void *arg)
19162306a36Sopenharmony_ci{
19262306a36Sopenharmony_ci	/* Simply deliver the interrupt */
19362306a36Sopenharmony_ci}
19462306a36Sopenharmony_ci
19562306a36Sopenharmony_civoid tlb_remove_table_sync_one(void)
19662306a36Sopenharmony_ci{
19762306a36Sopenharmony_ci	/*
19862306a36Sopenharmony_ci	 * This isn't an RCU grace period and hence the page-tables cannot be
19962306a36Sopenharmony_ci	 * assumed to be actually RCU-freed.
20062306a36Sopenharmony_ci	 *
20162306a36Sopenharmony_ci	 * It is however sufficient for software page-table walkers that rely on
20262306a36Sopenharmony_ci	 * IRQ disabling.
20362306a36Sopenharmony_ci	 */
20462306a36Sopenharmony_ci	smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
20562306a36Sopenharmony_ci}
20662306a36Sopenharmony_ci
20762306a36Sopenharmony_cistatic void tlb_remove_table_rcu(struct rcu_head *head)
20862306a36Sopenharmony_ci{
20962306a36Sopenharmony_ci	__tlb_remove_table_free(container_of(head, struct mmu_table_batch, rcu));
21062306a36Sopenharmony_ci}
21162306a36Sopenharmony_ci
21262306a36Sopenharmony_cistatic void tlb_remove_table_free(struct mmu_table_batch *batch)
21362306a36Sopenharmony_ci{
21462306a36Sopenharmony_ci	call_rcu(&batch->rcu, tlb_remove_table_rcu);
21562306a36Sopenharmony_ci}
21662306a36Sopenharmony_ci
21762306a36Sopenharmony_ci#else /* !CONFIG_MMU_GATHER_RCU_TABLE_FREE */
21862306a36Sopenharmony_ci
21962306a36Sopenharmony_cistatic void tlb_remove_table_free(struct mmu_table_batch *batch)
22062306a36Sopenharmony_ci{
22162306a36Sopenharmony_ci	__tlb_remove_table_free(batch);
22262306a36Sopenharmony_ci}
22362306a36Sopenharmony_ci
22462306a36Sopenharmony_ci#endif /* CONFIG_MMU_GATHER_RCU_TABLE_FREE */
22562306a36Sopenharmony_ci
22662306a36Sopenharmony_ci/*
22762306a36Sopenharmony_ci * If we want tlb_remove_table() to imply TLB invalidates.
22862306a36Sopenharmony_ci */
22962306a36Sopenharmony_cistatic inline void tlb_table_invalidate(struct mmu_gather *tlb)
23062306a36Sopenharmony_ci{
23162306a36Sopenharmony_ci	if (tlb_needs_table_invalidate()) {
23262306a36Sopenharmony_ci		/*
23362306a36Sopenharmony_ci		 * Invalidate page-table caches used by hardware walkers. Then
23462306a36Sopenharmony_ci		 * we still need to RCU-sched wait while freeing the pages
23562306a36Sopenharmony_ci		 * because software walkers can still be in-flight.
23662306a36Sopenharmony_ci		 */
23762306a36Sopenharmony_ci		tlb_flush_mmu_tlbonly(tlb);
23862306a36Sopenharmony_ci	}
23962306a36Sopenharmony_ci}
24062306a36Sopenharmony_ci
24162306a36Sopenharmony_cistatic void tlb_remove_table_one(void *table)
24262306a36Sopenharmony_ci{
24362306a36Sopenharmony_ci	tlb_remove_table_sync_one();
24462306a36Sopenharmony_ci	__tlb_remove_table(table);
24562306a36Sopenharmony_ci}
24662306a36Sopenharmony_ci
24762306a36Sopenharmony_cistatic void tlb_table_flush(struct mmu_gather *tlb)
24862306a36Sopenharmony_ci{
24962306a36Sopenharmony_ci	struct mmu_table_batch **batch = &tlb->batch;
25062306a36Sopenharmony_ci
25162306a36Sopenharmony_ci	if (*batch) {
25262306a36Sopenharmony_ci		tlb_table_invalidate(tlb);
25362306a36Sopenharmony_ci		tlb_remove_table_free(*batch);
25462306a36Sopenharmony_ci		*batch = NULL;
25562306a36Sopenharmony_ci	}
25662306a36Sopenharmony_ci}
25762306a36Sopenharmony_ci
25862306a36Sopenharmony_civoid tlb_remove_table(struct mmu_gather *tlb, void *table)
25962306a36Sopenharmony_ci{
26062306a36Sopenharmony_ci	struct mmu_table_batch **batch = &tlb->batch;
26162306a36Sopenharmony_ci
26262306a36Sopenharmony_ci	if (*batch == NULL) {
26362306a36Sopenharmony_ci		*batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
26462306a36Sopenharmony_ci		if (*batch == NULL) {
26562306a36Sopenharmony_ci			tlb_table_invalidate(tlb);
26662306a36Sopenharmony_ci			tlb_remove_table_one(table);
26762306a36Sopenharmony_ci			return;
26862306a36Sopenharmony_ci		}
26962306a36Sopenharmony_ci		(*batch)->nr = 0;
27062306a36Sopenharmony_ci	}
27162306a36Sopenharmony_ci
27262306a36Sopenharmony_ci	(*batch)->tables[(*batch)->nr++] = table;
27362306a36Sopenharmony_ci	if ((*batch)->nr == MAX_TABLE_BATCH)
27462306a36Sopenharmony_ci		tlb_table_flush(tlb);
27562306a36Sopenharmony_ci}
27662306a36Sopenharmony_ci
27762306a36Sopenharmony_cistatic inline void tlb_table_init(struct mmu_gather *tlb)
27862306a36Sopenharmony_ci{
27962306a36Sopenharmony_ci	tlb->batch = NULL;
28062306a36Sopenharmony_ci}
28162306a36Sopenharmony_ci
28262306a36Sopenharmony_ci#else /* !CONFIG_MMU_GATHER_TABLE_FREE */
28362306a36Sopenharmony_ci
28462306a36Sopenharmony_cistatic inline void tlb_table_flush(struct mmu_gather *tlb) { }
28562306a36Sopenharmony_cistatic inline void tlb_table_init(struct mmu_gather *tlb) { }
28662306a36Sopenharmony_ci
28762306a36Sopenharmony_ci#endif /* CONFIG_MMU_GATHER_TABLE_FREE */
28862306a36Sopenharmony_ci
28962306a36Sopenharmony_cistatic void tlb_flush_mmu_free(struct mmu_gather *tlb)
29062306a36Sopenharmony_ci{
29162306a36Sopenharmony_ci	tlb_table_flush(tlb);
29262306a36Sopenharmony_ci#ifndef CONFIG_MMU_GATHER_NO_GATHER
29362306a36Sopenharmony_ci	tlb_batch_pages_flush(tlb);
29462306a36Sopenharmony_ci#endif
29562306a36Sopenharmony_ci}
29662306a36Sopenharmony_ci
29762306a36Sopenharmony_civoid tlb_flush_mmu(struct mmu_gather *tlb)
29862306a36Sopenharmony_ci{
29962306a36Sopenharmony_ci	tlb_flush_mmu_tlbonly(tlb);
30062306a36Sopenharmony_ci	tlb_flush_mmu_free(tlb);
30162306a36Sopenharmony_ci}
30262306a36Sopenharmony_ci
30362306a36Sopenharmony_cistatic void __tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
30462306a36Sopenharmony_ci			     bool fullmm)
30562306a36Sopenharmony_ci{
30662306a36Sopenharmony_ci	tlb->mm = mm;
30762306a36Sopenharmony_ci	tlb->fullmm = fullmm;
30862306a36Sopenharmony_ci
30962306a36Sopenharmony_ci#ifndef CONFIG_MMU_GATHER_NO_GATHER
31062306a36Sopenharmony_ci	tlb->need_flush_all = 0;
31162306a36Sopenharmony_ci	tlb->local.next = NULL;
31262306a36Sopenharmony_ci	tlb->local.nr   = 0;
31362306a36Sopenharmony_ci	tlb->local.max  = ARRAY_SIZE(tlb->__pages);
31462306a36Sopenharmony_ci	tlb->active     = &tlb->local;
31562306a36Sopenharmony_ci	tlb->batch_count = 0;
31662306a36Sopenharmony_ci#endif
31762306a36Sopenharmony_ci	tlb->delayed_rmap = 0;
31862306a36Sopenharmony_ci
31962306a36Sopenharmony_ci	tlb_table_init(tlb);
32062306a36Sopenharmony_ci#ifdef CONFIG_MMU_GATHER_PAGE_SIZE
32162306a36Sopenharmony_ci	tlb->page_size = 0;
32262306a36Sopenharmony_ci#endif
32362306a36Sopenharmony_ci
32462306a36Sopenharmony_ci	__tlb_reset_range(tlb);
32562306a36Sopenharmony_ci	inc_tlb_flush_pending(tlb->mm);
32662306a36Sopenharmony_ci}
32762306a36Sopenharmony_ci
32862306a36Sopenharmony_ci/**
32962306a36Sopenharmony_ci * tlb_gather_mmu - initialize an mmu_gather structure for page-table tear-down
33062306a36Sopenharmony_ci * @tlb: the mmu_gather structure to initialize
33162306a36Sopenharmony_ci * @mm: the mm_struct of the target address space
33262306a36Sopenharmony_ci *
33362306a36Sopenharmony_ci * Called to initialize an (on-stack) mmu_gather structure for page-table
33462306a36Sopenharmony_ci * tear-down from @mm.
33562306a36Sopenharmony_ci */
33662306a36Sopenharmony_civoid tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm)
33762306a36Sopenharmony_ci{
33862306a36Sopenharmony_ci	__tlb_gather_mmu(tlb, mm, false);
33962306a36Sopenharmony_ci}
34062306a36Sopenharmony_ci
34162306a36Sopenharmony_ci/**
34262306a36Sopenharmony_ci * tlb_gather_mmu_fullmm - initialize an mmu_gather structure for page-table tear-down
34362306a36Sopenharmony_ci * @tlb: the mmu_gather structure to initialize
34462306a36Sopenharmony_ci * @mm: the mm_struct of the target address space
34562306a36Sopenharmony_ci *
34662306a36Sopenharmony_ci * In this case, @mm is without users and we're going to destroy the
34762306a36Sopenharmony_ci * full address space (exit/execve).
34862306a36Sopenharmony_ci *
34962306a36Sopenharmony_ci * Called to initialize an (on-stack) mmu_gather structure for page-table
35062306a36Sopenharmony_ci * tear-down from @mm.
35162306a36Sopenharmony_ci */
35262306a36Sopenharmony_civoid tlb_gather_mmu_fullmm(struct mmu_gather *tlb, struct mm_struct *mm)
35362306a36Sopenharmony_ci{
35462306a36Sopenharmony_ci	__tlb_gather_mmu(tlb, mm, true);
35562306a36Sopenharmony_ci}
35662306a36Sopenharmony_ci
35762306a36Sopenharmony_ci/**
35862306a36Sopenharmony_ci * tlb_finish_mmu - finish an mmu_gather structure
35962306a36Sopenharmony_ci * @tlb: the mmu_gather structure to finish
36062306a36Sopenharmony_ci *
36162306a36Sopenharmony_ci * Called at the end of the shootdown operation to free up any resources that
36262306a36Sopenharmony_ci * were required.
36362306a36Sopenharmony_ci */
36462306a36Sopenharmony_civoid tlb_finish_mmu(struct mmu_gather *tlb)
36562306a36Sopenharmony_ci{
36662306a36Sopenharmony_ci	/*
36762306a36Sopenharmony_ci	 * If there are parallel threads are doing PTE changes on same range
36862306a36Sopenharmony_ci	 * under non-exclusive lock (e.g., mmap_lock read-side) but defer TLB
36962306a36Sopenharmony_ci	 * flush by batching, one thread may end up seeing inconsistent PTEs
37062306a36Sopenharmony_ci	 * and result in having stale TLB entries.  So flush TLB forcefully
37162306a36Sopenharmony_ci	 * if we detect parallel PTE batching threads.
37262306a36Sopenharmony_ci	 *
37362306a36Sopenharmony_ci	 * However, some syscalls, e.g. munmap(), may free page tables, this
37462306a36Sopenharmony_ci	 * needs force flush everything in the given range. Otherwise this
37562306a36Sopenharmony_ci	 * may result in having stale TLB entries for some architectures,
37662306a36Sopenharmony_ci	 * e.g. aarch64, that could specify flush what level TLB.
37762306a36Sopenharmony_ci	 */
37862306a36Sopenharmony_ci	if (mm_tlb_flush_nested(tlb->mm)) {
37962306a36Sopenharmony_ci		/*
38062306a36Sopenharmony_ci		 * The aarch64 yields better performance with fullmm by
38162306a36Sopenharmony_ci		 * avoiding multiple CPUs spamming TLBI messages at the
38262306a36Sopenharmony_ci		 * same time.
38362306a36Sopenharmony_ci		 *
38462306a36Sopenharmony_ci		 * On x86 non-fullmm doesn't yield significant difference
38562306a36Sopenharmony_ci		 * against fullmm.
38662306a36Sopenharmony_ci		 */
38762306a36Sopenharmony_ci		tlb->fullmm = 1;
38862306a36Sopenharmony_ci		__tlb_reset_range(tlb);
38962306a36Sopenharmony_ci		tlb->freed_tables = 1;
39062306a36Sopenharmony_ci	}
39162306a36Sopenharmony_ci
39262306a36Sopenharmony_ci	tlb_flush_mmu(tlb);
39362306a36Sopenharmony_ci
39462306a36Sopenharmony_ci#ifndef CONFIG_MMU_GATHER_NO_GATHER
39562306a36Sopenharmony_ci	tlb_batch_list_free(tlb);
39662306a36Sopenharmony_ci#endif
39762306a36Sopenharmony_ci	dec_tlb_flush_pending(tlb->mm);
39862306a36Sopenharmony_ci}
399