162306a36Sopenharmony_ci// SPDX-License-Identifier: GPL-2.0-or-later
262306a36Sopenharmony_ci/*
362306a36Sopenharmony_ci * This file contains the routines for handling the MMU on those
462306a36Sopenharmony_ci * PowerPC implementations where the MMU is not using the hash
562306a36Sopenharmony_ci * table, such as 8xx, 4xx, BookE's etc...
662306a36Sopenharmony_ci *
762306a36Sopenharmony_ci * Copyright 2008 Ben Herrenschmidt <benh@kernel.crashing.org>
862306a36Sopenharmony_ci *                IBM Corp.
962306a36Sopenharmony_ci *
1062306a36Sopenharmony_ci *  Derived from previous arch/powerpc/mm/mmu_context.c
1162306a36Sopenharmony_ci *  and arch/powerpc/include/asm/mmu_context.h
1262306a36Sopenharmony_ci *
1362306a36Sopenharmony_ci * TODO:
1462306a36Sopenharmony_ci *
1562306a36Sopenharmony_ci *   - The global context lock will not scale very well
1662306a36Sopenharmony_ci *   - The maps should be dynamically allocated to allow for processors
1762306a36Sopenharmony_ci *     that support more PID bits at runtime
1862306a36Sopenharmony_ci *   - Implement flush_tlb_mm() by making the context stale and picking
1962306a36Sopenharmony_ci *     a new one
2062306a36Sopenharmony_ci *   - More aggressively clear stale map bits and maybe find some way to
2162306a36Sopenharmony_ci *     also clear mm->cpu_vm_mask bits when processes are migrated
2262306a36Sopenharmony_ci */
2362306a36Sopenharmony_ci
2462306a36Sopenharmony_ci#include <linux/kernel.h>
2562306a36Sopenharmony_ci#include <linux/mm.h>
2662306a36Sopenharmony_ci#include <linux/init.h>
2762306a36Sopenharmony_ci#include <linux/spinlock.h>
2862306a36Sopenharmony_ci#include <linux/memblock.h>
2962306a36Sopenharmony_ci#include <linux/notifier.h>
3062306a36Sopenharmony_ci#include <linux/cpu.h>
3162306a36Sopenharmony_ci#include <linux/slab.h>
3262306a36Sopenharmony_ci
3362306a36Sopenharmony_ci#include <asm/mmu_context.h>
3462306a36Sopenharmony_ci#include <asm/tlbflush.h>
3562306a36Sopenharmony_ci#include <asm/smp.h>
3662306a36Sopenharmony_ci#include <asm/kup.h>
3762306a36Sopenharmony_ci
3862306a36Sopenharmony_ci#include <mm/mmu_decl.h>
3962306a36Sopenharmony_ci
4062306a36Sopenharmony_ci/*
4162306a36Sopenharmony_ci * Room for two PTE table pointers, usually the kernel and current user
4262306a36Sopenharmony_ci * pointer to their respective root page table (pgdir).
4362306a36Sopenharmony_ci */
4462306a36Sopenharmony_civoid *abatron_pteptrs[2];
4562306a36Sopenharmony_ci
4662306a36Sopenharmony_ci/*
4762306a36Sopenharmony_ci * The MPC8xx has only 16 contexts. We rotate through them on each task switch.
4862306a36Sopenharmony_ci * A better way would be to keep track of tasks that own contexts, and implement
4962306a36Sopenharmony_ci * an LRU usage. That way very active tasks don't always have to pay the TLB
5062306a36Sopenharmony_ci * reload overhead. The kernel pages are mapped shared, so the kernel can run on
5162306a36Sopenharmony_ci * behalf of any task that makes a kernel entry. Shared does not mean they are
5262306a36Sopenharmony_ci * not protected, just that the ASID comparison is not performed. -- Dan
5362306a36Sopenharmony_ci *
5462306a36Sopenharmony_ci * The IBM4xx has 256 contexts, so we can just rotate through these as a way of
5562306a36Sopenharmony_ci * "switching" contexts. If the TID of the TLB is zero, the PID/TID comparison
5662306a36Sopenharmony_ci * is disabled, so we can use a TID of zero to represent all kernel pages as
5762306a36Sopenharmony_ci * shared among all contexts. -- Dan
5862306a36Sopenharmony_ci *
5962306a36Sopenharmony_ci * The IBM 47x core supports 16-bit PIDs, thus 65535 contexts. We should
6062306a36Sopenharmony_ci * normally never have to steal though the facility is present if needed.
6162306a36Sopenharmony_ci * -- BenH
6262306a36Sopenharmony_ci */
6362306a36Sopenharmony_ci#define FIRST_CONTEXT 1
6462306a36Sopenharmony_ci#if defined(CONFIG_PPC_8xx)
6562306a36Sopenharmony_ci#define LAST_CONTEXT 16
6662306a36Sopenharmony_ci#elif defined(CONFIG_PPC_47x)
6762306a36Sopenharmony_ci#define LAST_CONTEXT 65535
6862306a36Sopenharmony_ci#else
6962306a36Sopenharmony_ci#define LAST_CONTEXT 255
7062306a36Sopenharmony_ci#endif
7162306a36Sopenharmony_ci
7262306a36Sopenharmony_cistatic unsigned int next_context, nr_free_contexts;
7362306a36Sopenharmony_cistatic unsigned long *context_map;
7462306a36Sopenharmony_cistatic unsigned long *stale_map[NR_CPUS];
7562306a36Sopenharmony_cistatic struct mm_struct **context_mm;
7662306a36Sopenharmony_cistatic DEFINE_RAW_SPINLOCK(context_lock);
7762306a36Sopenharmony_ci
7862306a36Sopenharmony_ci#define CTX_MAP_SIZE	\
7962306a36Sopenharmony_ci	(sizeof(unsigned long) * (LAST_CONTEXT / BITS_PER_LONG + 1))
8062306a36Sopenharmony_ci
8162306a36Sopenharmony_ci
8262306a36Sopenharmony_ci/* Steal a context from a task that has one at the moment.
8362306a36Sopenharmony_ci *
8462306a36Sopenharmony_ci * This is used when we are running out of available PID numbers
8562306a36Sopenharmony_ci * on the processors.
8662306a36Sopenharmony_ci *
8762306a36Sopenharmony_ci * This isn't an LRU system, it just frees up each context in
8862306a36Sopenharmony_ci * turn (sort-of pseudo-random replacement :).  This would be the
8962306a36Sopenharmony_ci * place to implement an LRU scheme if anyone was motivated to do it.
9062306a36Sopenharmony_ci *  -- paulus
9162306a36Sopenharmony_ci *
9262306a36Sopenharmony_ci * For context stealing, we use a slightly different approach for
9362306a36Sopenharmony_ci * SMP and UP. Basically, the UP one is simpler and doesn't use
9462306a36Sopenharmony_ci * the stale map as we can just flush the local CPU
9562306a36Sopenharmony_ci *  -- benh
9662306a36Sopenharmony_ci */
9762306a36Sopenharmony_cistatic unsigned int steal_context_smp(unsigned int id)
9862306a36Sopenharmony_ci{
9962306a36Sopenharmony_ci	struct mm_struct *mm;
10062306a36Sopenharmony_ci	unsigned int cpu, max, i;
10162306a36Sopenharmony_ci
10262306a36Sopenharmony_ci	max = LAST_CONTEXT - FIRST_CONTEXT;
10362306a36Sopenharmony_ci
10462306a36Sopenharmony_ci	/* Attempt to free next_context first and then loop until we manage */
10562306a36Sopenharmony_ci	while (max--) {
10662306a36Sopenharmony_ci		/* Pick up the victim mm */
10762306a36Sopenharmony_ci		mm = context_mm[id];
10862306a36Sopenharmony_ci
10962306a36Sopenharmony_ci		/* We have a candidate victim, check if it's active, on SMP
11062306a36Sopenharmony_ci		 * we cannot steal active contexts
11162306a36Sopenharmony_ci		 */
11262306a36Sopenharmony_ci		if (mm->context.active) {
11362306a36Sopenharmony_ci			id++;
11462306a36Sopenharmony_ci			if (id > LAST_CONTEXT)
11562306a36Sopenharmony_ci				id = FIRST_CONTEXT;
11662306a36Sopenharmony_ci			continue;
11762306a36Sopenharmony_ci		}
11862306a36Sopenharmony_ci
11962306a36Sopenharmony_ci		/* Mark this mm has having no context anymore */
12062306a36Sopenharmony_ci		mm->context.id = MMU_NO_CONTEXT;
12162306a36Sopenharmony_ci
12262306a36Sopenharmony_ci		/* Mark it stale on all CPUs that used this mm. For threaded
12362306a36Sopenharmony_ci		 * implementations, we set it on all threads on each core
12462306a36Sopenharmony_ci		 * represented in the mask. A future implementation will use
12562306a36Sopenharmony_ci		 * a core map instead but this will do for now.
12662306a36Sopenharmony_ci		 */
12762306a36Sopenharmony_ci		for_each_cpu(cpu, mm_cpumask(mm)) {
12862306a36Sopenharmony_ci			for (i = cpu_first_thread_sibling(cpu);
12962306a36Sopenharmony_ci			     i <= cpu_last_thread_sibling(cpu); i++) {
13062306a36Sopenharmony_ci				if (stale_map[i])
13162306a36Sopenharmony_ci					__set_bit(id, stale_map[i]);
13262306a36Sopenharmony_ci			}
13362306a36Sopenharmony_ci			cpu = i - 1;
13462306a36Sopenharmony_ci		}
13562306a36Sopenharmony_ci		return id;
13662306a36Sopenharmony_ci	}
13762306a36Sopenharmony_ci
13862306a36Sopenharmony_ci	/* This will happen if you have more CPUs than available contexts,
13962306a36Sopenharmony_ci	 * all we can do here is wait a bit and try again
14062306a36Sopenharmony_ci	 */
14162306a36Sopenharmony_ci	raw_spin_unlock(&context_lock);
14262306a36Sopenharmony_ci	cpu_relax();
14362306a36Sopenharmony_ci	raw_spin_lock(&context_lock);
14462306a36Sopenharmony_ci
14562306a36Sopenharmony_ci	/* This will cause the caller to try again */
14662306a36Sopenharmony_ci	return MMU_NO_CONTEXT;
14762306a36Sopenharmony_ci}
14862306a36Sopenharmony_ci
14962306a36Sopenharmony_cistatic unsigned int steal_all_contexts(void)
15062306a36Sopenharmony_ci{
15162306a36Sopenharmony_ci	struct mm_struct *mm;
15262306a36Sopenharmony_ci	int cpu = smp_processor_id();
15362306a36Sopenharmony_ci	unsigned int id;
15462306a36Sopenharmony_ci
15562306a36Sopenharmony_ci	for (id = FIRST_CONTEXT; id <= LAST_CONTEXT; id++) {
15662306a36Sopenharmony_ci		/* Pick up the victim mm */
15762306a36Sopenharmony_ci		mm = context_mm[id];
15862306a36Sopenharmony_ci
15962306a36Sopenharmony_ci		/* Mark this mm as having no context anymore */
16062306a36Sopenharmony_ci		mm->context.id = MMU_NO_CONTEXT;
16162306a36Sopenharmony_ci		if (id != FIRST_CONTEXT) {
16262306a36Sopenharmony_ci			context_mm[id] = NULL;
16362306a36Sopenharmony_ci			__clear_bit(id, context_map);
16462306a36Sopenharmony_ci		}
16562306a36Sopenharmony_ci		if (IS_ENABLED(CONFIG_SMP))
16662306a36Sopenharmony_ci			__clear_bit(id, stale_map[cpu]);
16762306a36Sopenharmony_ci	}
16862306a36Sopenharmony_ci
16962306a36Sopenharmony_ci	/* Flush the TLB for all contexts (not to be used on SMP) */
17062306a36Sopenharmony_ci	_tlbil_all();
17162306a36Sopenharmony_ci
17262306a36Sopenharmony_ci	nr_free_contexts = LAST_CONTEXT - FIRST_CONTEXT;
17362306a36Sopenharmony_ci
17462306a36Sopenharmony_ci	return FIRST_CONTEXT;
17562306a36Sopenharmony_ci}
17662306a36Sopenharmony_ci
17762306a36Sopenharmony_ci/* Note that this will also be called on SMP if all other CPUs are
17862306a36Sopenharmony_ci * offlined, which means that it may be called for cpu != 0. For
17962306a36Sopenharmony_ci * this to work, we somewhat assume that CPUs that are onlined
18062306a36Sopenharmony_ci * come up with a fully clean TLB (or are cleaned when offlined)
18162306a36Sopenharmony_ci */
18262306a36Sopenharmony_cistatic unsigned int steal_context_up(unsigned int id)
18362306a36Sopenharmony_ci{
18462306a36Sopenharmony_ci	struct mm_struct *mm;
18562306a36Sopenharmony_ci	int cpu = smp_processor_id();
18662306a36Sopenharmony_ci
18762306a36Sopenharmony_ci	/* Pick up the victim mm */
18862306a36Sopenharmony_ci	mm = context_mm[id];
18962306a36Sopenharmony_ci
19062306a36Sopenharmony_ci	/* Flush the TLB for that context */
19162306a36Sopenharmony_ci	local_flush_tlb_mm(mm);
19262306a36Sopenharmony_ci
19362306a36Sopenharmony_ci	/* Mark this mm has having no context anymore */
19462306a36Sopenharmony_ci	mm->context.id = MMU_NO_CONTEXT;
19562306a36Sopenharmony_ci
19662306a36Sopenharmony_ci	/* XXX This clear should ultimately be part of local_flush_tlb_mm */
19762306a36Sopenharmony_ci	if (IS_ENABLED(CONFIG_SMP))
19862306a36Sopenharmony_ci		__clear_bit(id, stale_map[cpu]);
19962306a36Sopenharmony_ci
20062306a36Sopenharmony_ci	return id;
20162306a36Sopenharmony_ci}
20262306a36Sopenharmony_ci
20362306a36Sopenharmony_cistatic void set_context(unsigned long id, pgd_t *pgd)
20462306a36Sopenharmony_ci{
20562306a36Sopenharmony_ci	if (IS_ENABLED(CONFIG_PPC_8xx)) {
20662306a36Sopenharmony_ci		s16 offset = (s16)(__pa(swapper_pg_dir));
20762306a36Sopenharmony_ci
20862306a36Sopenharmony_ci		/*
20962306a36Sopenharmony_ci		 * Register M_TWB will contain base address of level 1 table minus the
21062306a36Sopenharmony_ci		 * lower part of the kernel PGDIR base address, so that all accesses to
21162306a36Sopenharmony_ci		 * level 1 table are done relative to lower part of kernel PGDIR base
21262306a36Sopenharmony_ci		 * address.
21362306a36Sopenharmony_ci		 */
21462306a36Sopenharmony_ci		mtspr(SPRN_M_TWB, __pa(pgd) - offset);
21562306a36Sopenharmony_ci
21662306a36Sopenharmony_ci		/* Update context */
21762306a36Sopenharmony_ci		mtspr(SPRN_M_CASID, id - 1);
21862306a36Sopenharmony_ci
21962306a36Sopenharmony_ci		/* sync */
22062306a36Sopenharmony_ci		mb();
22162306a36Sopenharmony_ci	} else if (kuap_is_disabled()) {
22262306a36Sopenharmony_ci		if (IS_ENABLED(CONFIG_40x))
22362306a36Sopenharmony_ci			mb();	/* sync */
22462306a36Sopenharmony_ci
22562306a36Sopenharmony_ci		mtspr(SPRN_PID, id);
22662306a36Sopenharmony_ci		isync();
22762306a36Sopenharmony_ci	}
22862306a36Sopenharmony_ci}
22962306a36Sopenharmony_ci
23062306a36Sopenharmony_civoid switch_mmu_context(struct mm_struct *prev, struct mm_struct *next,
23162306a36Sopenharmony_ci			struct task_struct *tsk)
23262306a36Sopenharmony_ci{
23362306a36Sopenharmony_ci	unsigned int id;
23462306a36Sopenharmony_ci	unsigned int i, cpu = smp_processor_id();
23562306a36Sopenharmony_ci	unsigned long *map;
23662306a36Sopenharmony_ci
23762306a36Sopenharmony_ci	/* No lockless fast path .. yet */
23862306a36Sopenharmony_ci	raw_spin_lock(&context_lock);
23962306a36Sopenharmony_ci
24062306a36Sopenharmony_ci	if (IS_ENABLED(CONFIG_SMP)) {
24162306a36Sopenharmony_ci		/* Mark us active and the previous one not anymore */
24262306a36Sopenharmony_ci		next->context.active++;
24362306a36Sopenharmony_ci		if (prev) {
24462306a36Sopenharmony_ci			WARN_ON(prev->context.active < 1);
24562306a36Sopenharmony_ci			prev->context.active--;
24662306a36Sopenharmony_ci		}
24762306a36Sopenharmony_ci	}
24862306a36Sopenharmony_ci
24962306a36Sopenharmony_ci again:
25062306a36Sopenharmony_ci
25162306a36Sopenharmony_ci	/* If we already have a valid assigned context, skip all that */
25262306a36Sopenharmony_ci	id = next->context.id;
25362306a36Sopenharmony_ci	if (likely(id != MMU_NO_CONTEXT))
25462306a36Sopenharmony_ci		goto ctxt_ok;
25562306a36Sopenharmony_ci
25662306a36Sopenharmony_ci	/* We really don't have a context, let's try to acquire one */
25762306a36Sopenharmony_ci	id = next_context;
25862306a36Sopenharmony_ci	if (id > LAST_CONTEXT)
25962306a36Sopenharmony_ci		id = FIRST_CONTEXT;
26062306a36Sopenharmony_ci	map = context_map;
26162306a36Sopenharmony_ci
26262306a36Sopenharmony_ci	/* No more free contexts, let's try to steal one */
26362306a36Sopenharmony_ci	if (nr_free_contexts == 0) {
26462306a36Sopenharmony_ci		if (num_online_cpus() > 1) {
26562306a36Sopenharmony_ci			id = steal_context_smp(id);
26662306a36Sopenharmony_ci			if (id == MMU_NO_CONTEXT)
26762306a36Sopenharmony_ci				goto again;
26862306a36Sopenharmony_ci			goto stolen;
26962306a36Sopenharmony_ci		}
27062306a36Sopenharmony_ci		if (IS_ENABLED(CONFIG_PPC_8xx))
27162306a36Sopenharmony_ci			id = steal_all_contexts();
27262306a36Sopenharmony_ci		else
27362306a36Sopenharmony_ci			id = steal_context_up(id);
27462306a36Sopenharmony_ci		goto stolen;
27562306a36Sopenharmony_ci	}
27662306a36Sopenharmony_ci	nr_free_contexts--;
27762306a36Sopenharmony_ci
27862306a36Sopenharmony_ci	/* We know there's at least one free context, try to find it */
27962306a36Sopenharmony_ci	while (__test_and_set_bit(id, map)) {
28062306a36Sopenharmony_ci		id = find_next_zero_bit(map, LAST_CONTEXT+1, id);
28162306a36Sopenharmony_ci		if (id > LAST_CONTEXT)
28262306a36Sopenharmony_ci			id = FIRST_CONTEXT;
28362306a36Sopenharmony_ci	}
28462306a36Sopenharmony_ci stolen:
28562306a36Sopenharmony_ci	next_context = id + 1;
28662306a36Sopenharmony_ci	context_mm[id] = next;
28762306a36Sopenharmony_ci	next->context.id = id;
28862306a36Sopenharmony_ci
28962306a36Sopenharmony_ci ctxt_ok:
29062306a36Sopenharmony_ci
29162306a36Sopenharmony_ci	/* If that context got marked stale on this CPU, then flush the
29262306a36Sopenharmony_ci	 * local TLB for it and unmark it before we use it
29362306a36Sopenharmony_ci	 */
29462306a36Sopenharmony_ci	if (IS_ENABLED(CONFIG_SMP) && test_bit(id, stale_map[cpu])) {
29562306a36Sopenharmony_ci		local_flush_tlb_mm(next);
29662306a36Sopenharmony_ci
29762306a36Sopenharmony_ci		/* XXX This clear should ultimately be part of local_flush_tlb_mm */
29862306a36Sopenharmony_ci		for (i = cpu_first_thread_sibling(cpu);
29962306a36Sopenharmony_ci		     i <= cpu_last_thread_sibling(cpu); i++) {
30062306a36Sopenharmony_ci			if (stale_map[i])
30162306a36Sopenharmony_ci				__clear_bit(id, stale_map[i]);
30262306a36Sopenharmony_ci		}
30362306a36Sopenharmony_ci	}
30462306a36Sopenharmony_ci
30562306a36Sopenharmony_ci	/* Flick the MMU and release lock */
30662306a36Sopenharmony_ci	if (IS_ENABLED(CONFIG_BDI_SWITCH))
30762306a36Sopenharmony_ci		abatron_pteptrs[1] = next->pgd;
30862306a36Sopenharmony_ci	set_context(id, next->pgd);
30962306a36Sopenharmony_ci#if defined(CONFIG_BOOKE_OR_40x) && defined(CONFIG_PPC_KUAP)
31062306a36Sopenharmony_ci	tsk->thread.pid = id;
31162306a36Sopenharmony_ci#endif
31262306a36Sopenharmony_ci	raw_spin_unlock(&context_lock);
31362306a36Sopenharmony_ci}
31462306a36Sopenharmony_ci
31562306a36Sopenharmony_ci/*
31662306a36Sopenharmony_ci * Set up the context for a new address space.
31762306a36Sopenharmony_ci */
31862306a36Sopenharmony_ciint init_new_context(struct task_struct *t, struct mm_struct *mm)
31962306a36Sopenharmony_ci{
32062306a36Sopenharmony_ci	mm->context.id = MMU_NO_CONTEXT;
32162306a36Sopenharmony_ci	mm->context.active = 0;
32262306a36Sopenharmony_ci	pte_frag_set(&mm->context, NULL);
32362306a36Sopenharmony_ci	return 0;
32462306a36Sopenharmony_ci}
32562306a36Sopenharmony_ci
32662306a36Sopenharmony_ci/*
32762306a36Sopenharmony_ci * We're finished using the context for an address space.
32862306a36Sopenharmony_ci */
32962306a36Sopenharmony_civoid destroy_context(struct mm_struct *mm)
33062306a36Sopenharmony_ci{
33162306a36Sopenharmony_ci	unsigned long flags;
33262306a36Sopenharmony_ci	unsigned int id;
33362306a36Sopenharmony_ci
33462306a36Sopenharmony_ci	if (mm->context.id == MMU_NO_CONTEXT)
33562306a36Sopenharmony_ci		return;
33662306a36Sopenharmony_ci
33762306a36Sopenharmony_ci	WARN_ON(mm->context.active != 0);
33862306a36Sopenharmony_ci
33962306a36Sopenharmony_ci	raw_spin_lock_irqsave(&context_lock, flags);
34062306a36Sopenharmony_ci	id = mm->context.id;
34162306a36Sopenharmony_ci	if (id != MMU_NO_CONTEXT) {
34262306a36Sopenharmony_ci		__clear_bit(id, context_map);
34362306a36Sopenharmony_ci		mm->context.id = MMU_NO_CONTEXT;
34462306a36Sopenharmony_ci		context_mm[id] = NULL;
34562306a36Sopenharmony_ci		nr_free_contexts++;
34662306a36Sopenharmony_ci	}
34762306a36Sopenharmony_ci	raw_spin_unlock_irqrestore(&context_lock, flags);
34862306a36Sopenharmony_ci}
34962306a36Sopenharmony_ci
35062306a36Sopenharmony_cistatic int mmu_ctx_cpu_prepare(unsigned int cpu)
35162306a36Sopenharmony_ci{
35262306a36Sopenharmony_ci	/* We don't touch CPU 0 map, it's allocated at aboot and kept
35362306a36Sopenharmony_ci	 * around forever
35462306a36Sopenharmony_ci	 */
35562306a36Sopenharmony_ci	if (cpu == boot_cpuid)
35662306a36Sopenharmony_ci		return 0;
35762306a36Sopenharmony_ci
35862306a36Sopenharmony_ci	stale_map[cpu] = kzalloc(CTX_MAP_SIZE, GFP_KERNEL);
35962306a36Sopenharmony_ci	return 0;
36062306a36Sopenharmony_ci}
36162306a36Sopenharmony_ci
36262306a36Sopenharmony_cistatic int mmu_ctx_cpu_dead(unsigned int cpu)
36362306a36Sopenharmony_ci{
36462306a36Sopenharmony_ci#ifdef CONFIG_HOTPLUG_CPU
36562306a36Sopenharmony_ci	if (cpu == boot_cpuid)
36662306a36Sopenharmony_ci		return 0;
36762306a36Sopenharmony_ci
36862306a36Sopenharmony_ci	kfree(stale_map[cpu]);
36962306a36Sopenharmony_ci	stale_map[cpu] = NULL;
37062306a36Sopenharmony_ci
37162306a36Sopenharmony_ci	/* We also clear the cpu_vm_mask bits of CPUs going away */
37262306a36Sopenharmony_ci	clear_tasks_mm_cpumask(cpu);
37362306a36Sopenharmony_ci#endif
37462306a36Sopenharmony_ci	return 0;
37562306a36Sopenharmony_ci}
37662306a36Sopenharmony_ci
37762306a36Sopenharmony_ci/*
37862306a36Sopenharmony_ci * Initialize the context management stuff.
37962306a36Sopenharmony_ci */
38062306a36Sopenharmony_civoid __init mmu_context_init(void)
38162306a36Sopenharmony_ci{
38262306a36Sopenharmony_ci	/* Mark init_mm as being active on all possible CPUs since
38362306a36Sopenharmony_ci	 * we'll get called with prev == init_mm the first time
38462306a36Sopenharmony_ci	 * we schedule on a given CPU
38562306a36Sopenharmony_ci	 */
38662306a36Sopenharmony_ci	init_mm.context.active = NR_CPUS;
38762306a36Sopenharmony_ci
38862306a36Sopenharmony_ci	/*
38962306a36Sopenharmony_ci	 * Allocate the maps used by context management
39062306a36Sopenharmony_ci	 */
39162306a36Sopenharmony_ci	context_map = memblock_alloc(CTX_MAP_SIZE, SMP_CACHE_BYTES);
39262306a36Sopenharmony_ci	if (!context_map)
39362306a36Sopenharmony_ci		panic("%s: Failed to allocate %zu bytes\n", __func__,
39462306a36Sopenharmony_ci		      CTX_MAP_SIZE);
39562306a36Sopenharmony_ci	context_mm = memblock_alloc(sizeof(void *) * (LAST_CONTEXT + 1),
39662306a36Sopenharmony_ci				    SMP_CACHE_BYTES);
39762306a36Sopenharmony_ci	if (!context_mm)
39862306a36Sopenharmony_ci		panic("%s: Failed to allocate %zu bytes\n", __func__,
39962306a36Sopenharmony_ci		      sizeof(void *) * (LAST_CONTEXT + 1));
40062306a36Sopenharmony_ci	if (IS_ENABLED(CONFIG_SMP)) {
40162306a36Sopenharmony_ci		stale_map[boot_cpuid] = memblock_alloc(CTX_MAP_SIZE, SMP_CACHE_BYTES);
40262306a36Sopenharmony_ci		if (!stale_map[boot_cpuid])
40362306a36Sopenharmony_ci			panic("%s: Failed to allocate %zu bytes\n", __func__,
40462306a36Sopenharmony_ci			      CTX_MAP_SIZE);
40562306a36Sopenharmony_ci
40662306a36Sopenharmony_ci		cpuhp_setup_state_nocalls(CPUHP_POWERPC_MMU_CTX_PREPARE,
40762306a36Sopenharmony_ci					  "powerpc/mmu/ctx:prepare",
40862306a36Sopenharmony_ci					  mmu_ctx_cpu_prepare, mmu_ctx_cpu_dead);
40962306a36Sopenharmony_ci	}
41062306a36Sopenharmony_ci
41162306a36Sopenharmony_ci	printk(KERN_INFO
41262306a36Sopenharmony_ci	       "MMU: Allocated %zu bytes of context maps for %d contexts\n",
41362306a36Sopenharmony_ci	       2 * CTX_MAP_SIZE + (sizeof(void *) * (LAST_CONTEXT + 1)),
41462306a36Sopenharmony_ci	       LAST_CONTEXT - FIRST_CONTEXT + 1);
41562306a36Sopenharmony_ci
41662306a36Sopenharmony_ci	/*
41762306a36Sopenharmony_ci	 * Some processors have too few contexts to reserve one for
41862306a36Sopenharmony_ci	 * init_mm, and require using context 0 for a normal task.
41962306a36Sopenharmony_ci	 * Other processors reserve the use of context zero for the kernel.
42062306a36Sopenharmony_ci	 * This code assumes FIRST_CONTEXT < 32.
42162306a36Sopenharmony_ci	 */
42262306a36Sopenharmony_ci	context_map[0] = (1 << FIRST_CONTEXT) - 1;
42362306a36Sopenharmony_ci	next_context = FIRST_CONTEXT;
42462306a36Sopenharmony_ci	nr_free_contexts = LAST_CONTEXT - FIRST_CONTEXT + 1;
42562306a36Sopenharmony_ci}
426