162306a36Sopenharmony_ci/* SPDX-License-Identifier: GPL-2.0-only */
262306a36Sopenharmony_ci/*
362306a36Sopenharmony_ci *  arch/arm/include/asm/mmu_context.h
462306a36Sopenharmony_ci *
562306a36Sopenharmony_ci *  Copyright (C) 1996 Russell King.
662306a36Sopenharmony_ci *
762306a36Sopenharmony_ci *  Changelog:
862306a36Sopenharmony_ci *   27-06-1996	RMK	Created
962306a36Sopenharmony_ci */
1062306a36Sopenharmony_ci#ifndef __ASM_ARM_MMU_CONTEXT_H
1162306a36Sopenharmony_ci#define __ASM_ARM_MMU_CONTEXT_H
1262306a36Sopenharmony_ci
1362306a36Sopenharmony_ci#include <linux/compiler.h>
1462306a36Sopenharmony_ci#include <linux/sched.h>
1562306a36Sopenharmony_ci#include <linux/mm_types.h>
1662306a36Sopenharmony_ci#include <linux/preempt.h>
1762306a36Sopenharmony_ci
1862306a36Sopenharmony_ci#include <asm/cacheflush.h>
1962306a36Sopenharmony_ci#include <asm/cachetype.h>
2062306a36Sopenharmony_ci#include <asm/proc-fns.h>
2162306a36Sopenharmony_ci#include <asm/smp_plat.h>
2262306a36Sopenharmony_ci#include <asm-generic/mm_hooks.h>
2362306a36Sopenharmony_ci
2462306a36Sopenharmony_civoid __check_vmalloc_seq(struct mm_struct *mm);
2562306a36Sopenharmony_ci
2662306a36Sopenharmony_ci#ifdef CONFIG_MMU
2762306a36Sopenharmony_cistatic inline void check_vmalloc_seq(struct mm_struct *mm)
2862306a36Sopenharmony_ci{
2962306a36Sopenharmony_ci	if (!IS_ENABLED(CONFIG_ARM_LPAE) &&
3062306a36Sopenharmony_ci	    unlikely(atomic_read(&mm->context.vmalloc_seq) !=
3162306a36Sopenharmony_ci		     atomic_read(&init_mm.context.vmalloc_seq)))
3262306a36Sopenharmony_ci		__check_vmalloc_seq(mm);
3362306a36Sopenharmony_ci}
3462306a36Sopenharmony_ci#endif
3562306a36Sopenharmony_ci
3662306a36Sopenharmony_ci#ifdef CONFIG_CPU_HAS_ASID
3762306a36Sopenharmony_ci
3862306a36Sopenharmony_civoid check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk);
3962306a36Sopenharmony_ci
4062306a36Sopenharmony_ci#define init_new_context init_new_context
4162306a36Sopenharmony_cistatic inline int
4262306a36Sopenharmony_ciinit_new_context(struct task_struct *tsk, struct mm_struct *mm)
4362306a36Sopenharmony_ci{
4462306a36Sopenharmony_ci	atomic64_set(&mm->context.id, 0);
4562306a36Sopenharmony_ci	return 0;
4662306a36Sopenharmony_ci}
4762306a36Sopenharmony_ci
4862306a36Sopenharmony_ci#ifdef CONFIG_ARM_ERRATA_798181
4962306a36Sopenharmony_civoid a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
5062306a36Sopenharmony_ci			     cpumask_t *mask);
5162306a36Sopenharmony_ci#else  /* !CONFIG_ARM_ERRATA_798181 */
5262306a36Sopenharmony_cistatic inline void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
5362306a36Sopenharmony_ci					   cpumask_t *mask)
5462306a36Sopenharmony_ci{
5562306a36Sopenharmony_ci}
5662306a36Sopenharmony_ci#endif /* CONFIG_ARM_ERRATA_798181 */
5762306a36Sopenharmony_ci
5862306a36Sopenharmony_ci#else	/* !CONFIG_CPU_HAS_ASID */
5962306a36Sopenharmony_ci
6062306a36Sopenharmony_ci#ifdef CONFIG_MMU
6162306a36Sopenharmony_ci
6262306a36Sopenharmony_cistatic inline void check_and_switch_context(struct mm_struct *mm,
6362306a36Sopenharmony_ci					    struct task_struct *tsk)
6462306a36Sopenharmony_ci{
6562306a36Sopenharmony_ci	check_vmalloc_seq(mm);
6662306a36Sopenharmony_ci
6762306a36Sopenharmony_ci	if (irqs_disabled())
6862306a36Sopenharmony_ci		/*
6962306a36Sopenharmony_ci		 * cpu_switch_mm() needs to flush the VIVT caches. To avoid
7062306a36Sopenharmony_ci		 * high interrupt latencies, defer the call and continue
7162306a36Sopenharmony_ci		 * running with the old mm. Since we only support UP systems
7262306a36Sopenharmony_ci		 * on non-ASID CPUs, the old mm will remain valid until the
7362306a36Sopenharmony_ci		 * finish_arch_post_lock_switch() call.
7462306a36Sopenharmony_ci		 */
7562306a36Sopenharmony_ci		mm->context.switch_pending = 1;
7662306a36Sopenharmony_ci	else
7762306a36Sopenharmony_ci		cpu_switch_mm(mm->pgd, mm);
7862306a36Sopenharmony_ci}
7962306a36Sopenharmony_ci
8062306a36Sopenharmony_ci#ifndef MODULE
8162306a36Sopenharmony_ci#define finish_arch_post_lock_switch \
8262306a36Sopenharmony_ci	finish_arch_post_lock_switch
8362306a36Sopenharmony_cistatic inline void finish_arch_post_lock_switch(void)
8462306a36Sopenharmony_ci{
8562306a36Sopenharmony_ci	struct mm_struct *mm = current->mm;
8662306a36Sopenharmony_ci
8762306a36Sopenharmony_ci	if (mm && mm->context.switch_pending) {
8862306a36Sopenharmony_ci		/*
8962306a36Sopenharmony_ci		 * Preemption must be disabled during cpu_switch_mm() as we
9062306a36Sopenharmony_ci		 * have some stateful cache flush implementations. Check
9162306a36Sopenharmony_ci		 * switch_pending again in case we were preempted and the
9262306a36Sopenharmony_ci		 * switch to this mm was already done.
9362306a36Sopenharmony_ci		 */
9462306a36Sopenharmony_ci		preempt_disable();
9562306a36Sopenharmony_ci		if (mm->context.switch_pending) {
9662306a36Sopenharmony_ci			mm->context.switch_pending = 0;
9762306a36Sopenharmony_ci			cpu_switch_mm(mm->pgd, mm);
9862306a36Sopenharmony_ci		}
9962306a36Sopenharmony_ci		preempt_enable_no_resched();
10062306a36Sopenharmony_ci	}
10162306a36Sopenharmony_ci}
10262306a36Sopenharmony_ci#endif /* !MODULE */
10362306a36Sopenharmony_ci
10462306a36Sopenharmony_ci#endif	/* CONFIG_MMU */
10562306a36Sopenharmony_ci
10662306a36Sopenharmony_ci#endif	/* CONFIG_CPU_HAS_ASID */
10762306a36Sopenharmony_ci
10862306a36Sopenharmony_ci#define activate_mm(prev,next)		switch_mm(prev, next, NULL)
10962306a36Sopenharmony_ci
11062306a36Sopenharmony_ci/*
11162306a36Sopenharmony_ci * This is the actual mm switch as far as the scheduler
11262306a36Sopenharmony_ci * is concerned.  No registers are touched.  We avoid
11362306a36Sopenharmony_ci * calling the CPU specific function when the mm hasn't
11462306a36Sopenharmony_ci * actually changed.
11562306a36Sopenharmony_ci */
11662306a36Sopenharmony_cistatic inline void
11762306a36Sopenharmony_ciswitch_mm(struct mm_struct *prev, struct mm_struct *next,
11862306a36Sopenharmony_ci	  struct task_struct *tsk)
11962306a36Sopenharmony_ci{
12062306a36Sopenharmony_ci#ifdef CONFIG_MMU
12162306a36Sopenharmony_ci	unsigned int cpu = smp_processor_id();
12262306a36Sopenharmony_ci
12362306a36Sopenharmony_ci	/*
12462306a36Sopenharmony_ci	 * __sync_icache_dcache doesn't broadcast the I-cache invalidation,
12562306a36Sopenharmony_ci	 * so check for possible thread migration and invalidate the I-cache
12662306a36Sopenharmony_ci	 * if we're new to this CPU.
12762306a36Sopenharmony_ci	 */
12862306a36Sopenharmony_ci	if (cache_ops_need_broadcast() &&
12962306a36Sopenharmony_ci	    !cpumask_empty(mm_cpumask(next)) &&
13062306a36Sopenharmony_ci	    !cpumask_test_cpu(cpu, mm_cpumask(next)))
13162306a36Sopenharmony_ci		__flush_icache_all();
13262306a36Sopenharmony_ci
13362306a36Sopenharmony_ci	if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) {
13462306a36Sopenharmony_ci		check_and_switch_context(next, tsk);
13562306a36Sopenharmony_ci		if (cache_is_vivt())
13662306a36Sopenharmony_ci			cpumask_clear_cpu(cpu, mm_cpumask(prev));
13762306a36Sopenharmony_ci	}
13862306a36Sopenharmony_ci#endif
13962306a36Sopenharmony_ci}
14062306a36Sopenharmony_ci
14162306a36Sopenharmony_ci#ifdef CONFIG_VMAP_STACK
14262306a36Sopenharmony_cistatic inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
14362306a36Sopenharmony_ci{
14462306a36Sopenharmony_ci	if (mm != &init_mm)
14562306a36Sopenharmony_ci		check_vmalloc_seq(mm);
14662306a36Sopenharmony_ci}
14762306a36Sopenharmony_ci#define enter_lazy_tlb enter_lazy_tlb
14862306a36Sopenharmony_ci#endif
14962306a36Sopenharmony_ci
15062306a36Sopenharmony_ci#include <asm-generic/mmu_context.h>
15162306a36Sopenharmony_ci
15262306a36Sopenharmony_ci#endif
153