xref: /kernel/linux/linux-6.6/arch/m68k/mm/mcfmmu.c (revision 62306a36)
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Based upon linux/arch/m68k/mm/sun3mmu.c
4 * Based upon linux/arch/ppc/mm/mmu_context.c
5 *
6 * Implementations of mm routines specific to the Coldfire MMU.
7 *
8 * Copyright (c) 2008 Freescale Semiconductor, Inc.
9 */
10
11#include <linux/kernel.h>
12#include <linux/types.h>
13#include <linux/mm.h>
14#include <linux/init.h>
15#include <linux/string.h>
16#include <linux/memblock.h>
17
18#include <asm/setup.h>
19#include <asm/page.h>
20#include <asm/mmu_context.h>
21#include <asm/mcf_pgalloc.h>
22#include <asm/tlbflush.h>
23#include <asm/pgalloc.h>
24
25#define KMAPAREA(x)	((x >= VMALLOC_START) && (x < KMAP_END))
26
27mm_context_t next_mmu_context;
28unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1];
29atomic_t nr_free_contexts;
30struct mm_struct *context_mm[LAST_CONTEXT+1];
31unsigned long num_pages;
32
33/*
34 * ColdFire paging_init derived from sun3.
35 */
36void __init paging_init(void)
37{
38	pgd_t *pg_dir;
39	pte_t *pg_table;
40	unsigned long address, size;
41	unsigned long next_pgtable, bootmem_end;
42	unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
43	int i;
44
45	empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
46	if (!empty_zero_page)
47		panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
48		      __func__, PAGE_SIZE, PAGE_SIZE);
49
50	pg_dir = swapper_pg_dir;
51	memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
52
53	size = num_pages * sizeof(pte_t);
54	size = (size + PAGE_SIZE) & ~(PAGE_SIZE-1);
55	next_pgtable = (unsigned long) memblock_alloc(size, PAGE_SIZE);
56	if (!next_pgtable)
57		panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
58		      __func__, size, PAGE_SIZE);
59
60	bootmem_end = (next_pgtable + size + PAGE_SIZE) & PAGE_MASK;
61	pg_dir += PAGE_OFFSET >> PGDIR_SHIFT;
62
63	address = PAGE_OFFSET;
64	while (address < (unsigned long)high_memory) {
65		pg_table = (pte_t *) next_pgtable;
66		next_pgtable += PTRS_PER_PTE * sizeof(pte_t);
67		pgd_val(*pg_dir) = (unsigned long) pg_table;
68		pg_dir++;
69
70		/* now change pg_table to kernel virtual addresses */
71		for (i = 0; i < PTRS_PER_PTE; ++i, ++pg_table) {
72			pte_t pte = pfn_pte(virt_to_pfn((void *)address),
73					    PAGE_INIT);
74			if (address >= (unsigned long) high_memory)
75				pte_val(pte) = 0;
76
77			set_pte(pg_table, pte);
78			address += PAGE_SIZE;
79		}
80	}
81
82	current->mm = NULL;
83	max_zone_pfn[ZONE_DMA] = PFN_DOWN(_ramend);
84	free_area_init(max_zone_pfn);
85}
86
87int cf_tlb_miss(struct pt_regs *regs, int write, int dtlb, int extension_word)
88{
89	unsigned long flags, mmuar, mmutr;
90	struct mm_struct *mm;
91	pgd_t *pgd;
92	p4d_t *p4d;
93	pud_t *pud;
94	pmd_t *pmd;
95	pte_t *pte = NULL;
96	int ret = -1;
97	int asid;
98
99	local_irq_save(flags);
100
101	mmuar = (dtlb) ? mmu_read(MMUAR) :
102		regs->pc + (extension_word * sizeof(long));
103
104	mm = (!user_mode(regs) && KMAPAREA(mmuar)) ? &init_mm : current->mm;
105	if (!mm)
106		goto out;
107
108	pgd = pgd_offset(mm, mmuar);
109	if (pgd_none(*pgd))
110		goto out;
111
112	p4d = p4d_offset(pgd, mmuar);
113	if (p4d_none(*p4d))
114		goto out;
115
116	pud = pud_offset(p4d, mmuar);
117	if (pud_none(*pud))
118		goto out;
119
120	pmd = pmd_offset(pud, mmuar);
121	if (pmd_none(*pmd))
122		goto out;
123
124	pte = (KMAPAREA(mmuar)) ? pte_offset_kernel(pmd, mmuar)
125				: pte_offset_map(pmd, mmuar);
126	if (!pte || pte_none(*pte) || !pte_present(*pte))
127		goto out;
128
129	if (write) {
130		if (!pte_write(*pte))
131			goto out;
132		set_pte(pte, pte_mkdirty(*pte));
133	}
134
135	set_pte(pte, pte_mkyoung(*pte));
136	asid = mm->context & 0xff;
137	if (!pte_dirty(*pte) && !KMAPAREA(mmuar))
138		set_pte(pte, pte_wrprotect(*pte));
139
140	mmutr = (mmuar & PAGE_MASK) | (asid << MMUTR_IDN) | MMUTR_V;
141	if ((mmuar < TASK_UNMAPPED_BASE) || (mmuar >= TASK_SIZE))
142		mmutr |= (pte->pte & CF_PAGE_MMUTR_MASK) >> CF_PAGE_MMUTR_SHIFT;
143	mmu_write(MMUTR, mmutr);
144
145	mmu_write(MMUDR, (pte_val(*pte) & PAGE_MASK) |
146		((pte->pte) & CF_PAGE_MMUDR_MASK) | MMUDR_SZ_8KB | MMUDR_X);
147
148	if (dtlb)
149		mmu_write(MMUOR, MMUOR_ACC | MMUOR_UAA);
150	else
151		mmu_write(MMUOR, MMUOR_ITLB | MMUOR_ACC | MMUOR_UAA);
152	ret = 0;
153out:
154	if (pte && !KMAPAREA(mmuar))
155		pte_unmap(pte);
156	local_irq_restore(flags);
157	return ret;
158}
159
160void __init cf_bootmem_alloc(void)
161{
162	unsigned long memstart;
163
164	/* _rambase and _ramend will be naturally page aligned */
165	m68k_memory[0].addr = _rambase;
166	m68k_memory[0].size = _ramend - _rambase;
167
168	memblock_add_node(m68k_memory[0].addr, m68k_memory[0].size, 0,
169			  MEMBLOCK_NONE);
170
171	/* compute total pages in system */
172	num_pages = PFN_DOWN(_ramend - _rambase);
173
174	/* page numbers */
175	memstart = PAGE_ALIGN(_ramstart);
176	min_low_pfn = PFN_DOWN(_rambase);
177	max_pfn = max_low_pfn = PFN_DOWN(_ramend);
178	high_memory = (void *)_ramend;
179
180	/* Reserve kernel text/data/bss */
181	memblock_reserve(_rambase, memstart - _rambase);
182
183	m68k_virt_to_node_shift = fls(_ramend - 1) - 6;
184	module_fixup(NULL, __start_fixup, __stop_fixup);
185
186	/* setup node data */
187	m68k_setup_node(0);
188}
189
190/*
191 * Initialize the context management stuff.
192 * The following was taken from arch/ppc/mmu_context.c
193 */
194void __init cf_mmu_context_init(void)
195{
196	/*
197	 * Some processors have too few contexts to reserve one for
198	 * init_mm, and require using context 0 for a normal task.
199	 * Other processors reserve the use of context zero for the kernel.
200	 * This code assumes FIRST_CONTEXT < 32.
201	 */
202	context_map[0] = (1 << FIRST_CONTEXT) - 1;
203	next_mmu_context = FIRST_CONTEXT;
204	atomic_set(&nr_free_contexts, LAST_CONTEXT - FIRST_CONTEXT + 1);
205}
206
207/*
208 * Steal a context from a task that has one at the moment.
209 * This isn't an LRU system, it just frees up each context in
210 * turn (sort-of pseudo-random replacement :).  This would be the
211 * place to implement an LRU scheme if anyone was motivated to do it.
212 *  -- paulus
213 */
214void steal_context(void)
215{
216	struct mm_struct *mm;
217	/*
218	 * free up context `next_mmu_context'
219	 * if we shouldn't free context 0, don't...
220	 */
221	if (next_mmu_context < FIRST_CONTEXT)
222		next_mmu_context = FIRST_CONTEXT;
223	mm = context_mm[next_mmu_context];
224	flush_tlb_mm(mm);
225	destroy_context(mm);
226}
227
228static const pgprot_t protection_map[16] = {
229	[VM_NONE]					= PAGE_NONE,
230	[VM_READ]					= __pgprot(CF_PAGE_VALID |
231								   CF_PAGE_ACCESSED |
232								   CF_PAGE_READABLE),
233	[VM_WRITE]					= __pgprot(CF_PAGE_VALID |
234								   CF_PAGE_ACCESSED |
235								   CF_PAGE_WRITABLE),
236	[VM_WRITE | VM_READ]				= __pgprot(CF_PAGE_VALID |
237								   CF_PAGE_ACCESSED |
238								   CF_PAGE_READABLE |
239								   CF_PAGE_WRITABLE),
240	[VM_EXEC]					= __pgprot(CF_PAGE_VALID |
241								   CF_PAGE_ACCESSED |
242								   CF_PAGE_EXEC),
243	[VM_EXEC | VM_READ]				= __pgprot(CF_PAGE_VALID |
244								   CF_PAGE_ACCESSED |
245								   CF_PAGE_READABLE |
246								   CF_PAGE_EXEC),
247	[VM_EXEC | VM_WRITE]				= __pgprot(CF_PAGE_VALID |
248								   CF_PAGE_ACCESSED |
249								   CF_PAGE_WRITABLE |
250								   CF_PAGE_EXEC),
251	[VM_EXEC | VM_WRITE | VM_READ]			=  __pgprot(CF_PAGE_VALID |
252								    CF_PAGE_ACCESSED |
253								    CF_PAGE_READABLE |
254								    CF_PAGE_WRITABLE |
255								    CF_PAGE_EXEC),
256	[VM_SHARED]					= PAGE_NONE,
257	[VM_SHARED | VM_READ]				= __pgprot(CF_PAGE_VALID |
258								   CF_PAGE_ACCESSED |
259								   CF_PAGE_READABLE),
260	[VM_SHARED | VM_WRITE]				= PAGE_SHARED,
261	[VM_SHARED | VM_WRITE | VM_READ]		= __pgprot(CF_PAGE_VALID |
262								   CF_PAGE_ACCESSED |
263								   CF_PAGE_READABLE |
264								   CF_PAGE_SHARED),
265	[VM_SHARED | VM_EXEC]				= __pgprot(CF_PAGE_VALID |
266								   CF_PAGE_ACCESSED |
267								   CF_PAGE_EXEC),
268	[VM_SHARED | VM_EXEC | VM_READ]			= __pgprot(CF_PAGE_VALID |
269								   CF_PAGE_ACCESSED |
270								   CF_PAGE_READABLE |
271								   CF_PAGE_EXEC),
272	[VM_SHARED | VM_EXEC | VM_WRITE]		= __pgprot(CF_PAGE_VALID |
273								   CF_PAGE_ACCESSED |
274								   CF_PAGE_SHARED |
275								   CF_PAGE_EXEC),
276	[VM_SHARED | VM_EXEC | VM_WRITE | VM_READ]	= __pgprot(CF_PAGE_VALID |
277								   CF_PAGE_ACCESSED |
278								   CF_PAGE_READABLE |
279								   CF_PAGE_SHARED |
280								   CF_PAGE_EXEC)
281};
282DECLARE_VM_GET_PAGE_PROT
283