1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * This file contains kasan initialization code for ARM64.
4 *
5 * Copyright (c) 2015 Samsung Electronics Co., Ltd.
6 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
7 */
8
9#define pr_fmt(fmt) "kasan: " fmt
10#include <linux/kasan.h>
11#include <linux/kernel.h>
12#include <linux/sched/task.h>
13#include <linux/memblock.h>
14#include <linux/start_kernel.h>
15#include <linux/mm.h>
16
17#include <asm/mmu_context.h>
18#include <asm/kernel-pgtable.h>
19#include <asm/page.h>
20#include <asm/pgalloc.h>
21#include <asm/sections.h>
22#include <asm/tlbflush.h>
23
24#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
25
26static pgd_t tmp_pg_dir[PTRS_PER_PGD] __initdata __aligned(PGD_SIZE);
27
28/*
29 * The p*d_populate functions call virt_to_phys implicitly so they can't be used
30 * directly on kernel symbols (bm_p*d). All the early functions are called too
31 * early to use lm_alias so __p*d_populate functions must be used to populate
32 * with the physical address from __pa_symbol.
33 */
34
35static phys_addr_t __init kasan_alloc_zeroed_page(int node)
36{
37	void *p = memblock_alloc_try_nid(PAGE_SIZE, PAGE_SIZE,
38					      __pa(MAX_DMA_ADDRESS),
39					      MEMBLOCK_ALLOC_NOLEAKTRACE, node);
40	if (!p)
41		panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%llx\n",
42		      __func__, PAGE_SIZE, PAGE_SIZE, node,
43		      __pa(MAX_DMA_ADDRESS));
44
45	return __pa(p);
46}
47
48static phys_addr_t __init kasan_alloc_raw_page(int node)
49{
50	void *p = memblock_alloc_try_nid_raw(PAGE_SIZE, PAGE_SIZE,
51						__pa(MAX_DMA_ADDRESS),
52						MEMBLOCK_ALLOC_NOLEAKTRACE,
53						node);
54	if (!p)
55		panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%llx\n",
56		      __func__, PAGE_SIZE, PAGE_SIZE, node,
57		      __pa(MAX_DMA_ADDRESS));
58
59	return __pa(p);
60}
61
62static pte_t *__init kasan_pte_offset(pmd_t *pmdp, unsigned long addr, int node,
63				      bool early)
64{
65	if (pmd_none(READ_ONCE(*pmdp))) {
66		phys_addr_t pte_phys = early ?
67				__pa_symbol(kasan_early_shadow_pte)
68					: kasan_alloc_zeroed_page(node);
69		__pmd_populate(pmdp, pte_phys, PMD_TYPE_TABLE);
70	}
71
72	return early ? pte_offset_kimg(pmdp, addr)
73		     : pte_offset_kernel(pmdp, addr);
74}
75
76static pmd_t *__init kasan_pmd_offset(pud_t *pudp, unsigned long addr, int node,
77				      bool early)
78{
79	if (pud_none(READ_ONCE(*pudp))) {
80		phys_addr_t pmd_phys = early ?
81				__pa_symbol(kasan_early_shadow_pmd)
82					: kasan_alloc_zeroed_page(node);
83		__pud_populate(pudp, pmd_phys, PUD_TYPE_TABLE);
84	}
85
86	return early ? pmd_offset_kimg(pudp, addr) : pmd_offset(pudp, addr);
87}
88
89static pud_t *__init kasan_pud_offset(p4d_t *p4dp, unsigned long addr, int node,
90				      bool early)
91{
92	if (p4d_none(READ_ONCE(*p4dp))) {
93		phys_addr_t pud_phys = early ?
94				__pa_symbol(kasan_early_shadow_pud)
95					: kasan_alloc_zeroed_page(node);
96		__p4d_populate(p4dp, pud_phys, P4D_TYPE_TABLE);
97	}
98
99	return early ? pud_offset_kimg(p4dp, addr) : pud_offset(p4dp, addr);
100}
101
102static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr,
103				      unsigned long end, int node, bool early)
104{
105	unsigned long next;
106	pte_t *ptep = kasan_pte_offset(pmdp, addr, node, early);
107
108	do {
109		phys_addr_t page_phys = early ?
110				__pa_symbol(kasan_early_shadow_page)
111					: kasan_alloc_raw_page(node);
112		if (!early)
113			memset(__va(page_phys), KASAN_SHADOW_INIT, PAGE_SIZE);
114		next = addr + PAGE_SIZE;
115		set_pte(ptep, pfn_pte(__phys_to_pfn(page_phys), PAGE_KERNEL));
116	} while (ptep++, addr = next, addr != end && pte_none(READ_ONCE(*ptep)));
117}
118
119static void __init kasan_pmd_populate(pud_t *pudp, unsigned long addr,
120				      unsigned long end, int node, bool early)
121{
122	unsigned long next;
123	pmd_t *pmdp = kasan_pmd_offset(pudp, addr, node, early);
124
125	do {
126		next = pmd_addr_end(addr, end);
127		kasan_pte_populate(pmdp, addr, next, node, early);
128	} while (pmdp++, addr = next, addr != end && pmd_none(READ_ONCE(*pmdp)));
129}
130
131static void __init kasan_pud_populate(p4d_t *p4dp, unsigned long addr,
132				      unsigned long end, int node, bool early)
133{
134	unsigned long next;
135	pud_t *pudp = kasan_pud_offset(p4dp, addr, node, early);
136
137	do {
138		next = pud_addr_end(addr, end);
139		kasan_pmd_populate(pudp, addr, next, node, early);
140	} while (pudp++, addr = next, addr != end && pud_none(READ_ONCE(*pudp)));
141}
142
143static void __init kasan_p4d_populate(pgd_t *pgdp, unsigned long addr,
144				      unsigned long end, int node, bool early)
145{
146	unsigned long next;
147	p4d_t *p4dp = p4d_offset(pgdp, addr);
148
149	do {
150		next = p4d_addr_end(addr, end);
151		kasan_pud_populate(p4dp, addr, next, node, early);
152	} while (p4dp++, addr = next, addr != end);
153}
154
155static void __init kasan_pgd_populate(unsigned long addr, unsigned long end,
156				      int node, bool early)
157{
158	unsigned long next;
159	pgd_t *pgdp;
160
161	pgdp = pgd_offset_k(addr);
162	do {
163		next = pgd_addr_end(addr, end);
164		kasan_p4d_populate(pgdp, addr, next, node, early);
165	} while (pgdp++, addr = next, addr != end);
166}
167
168/* The early shadow maps everything to a single page of zeroes */
169asmlinkage void __init kasan_early_init(void)
170{
171	BUILD_BUG_ON(KASAN_SHADOW_OFFSET !=
172		KASAN_SHADOW_END - (1UL << (64 - KASAN_SHADOW_SCALE_SHIFT)));
173	BUILD_BUG_ON(!IS_ALIGNED(_KASAN_SHADOW_START(VA_BITS), PGDIR_SIZE));
174	BUILD_BUG_ON(!IS_ALIGNED(_KASAN_SHADOW_START(VA_BITS_MIN), PGDIR_SIZE));
175	BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PGDIR_SIZE));
176	kasan_pgd_populate(KASAN_SHADOW_START, KASAN_SHADOW_END, NUMA_NO_NODE,
177			   true);
178}
179
180/* Set up full kasan mappings, ensuring that the mapped pages are zeroed */
181static void __init kasan_map_populate(unsigned long start, unsigned long end,
182				      int node)
183{
184	kasan_pgd_populate(start & PAGE_MASK, PAGE_ALIGN(end), node, false);
185}
186
187/*
188 * Copy the current shadow region into a new pgdir.
189 */
190void __init kasan_copy_shadow(pgd_t *pgdir)
191{
192	pgd_t *pgdp, *pgdp_new, *pgdp_end;
193
194	pgdp = pgd_offset_k(KASAN_SHADOW_START);
195	pgdp_end = pgd_offset_k(KASAN_SHADOW_END);
196	pgdp_new = pgd_offset_pgd(pgdir, KASAN_SHADOW_START);
197	do {
198		set_pgd(pgdp_new, READ_ONCE(*pgdp));
199	} while (pgdp++, pgdp_new++, pgdp != pgdp_end);
200}
201
202static void __init clear_pgds(unsigned long start,
203			unsigned long end)
204{
205	/*
206	 * Remove references to kasan page tables from
207	 * swapper_pg_dir. pgd_clear() can't be used
208	 * here because it's nop on 2,3-level pagetable setups
209	 */
210	for (; start < end; start += PGDIR_SIZE)
211		set_pgd(pgd_offset_k(start), __pgd(0));
212}
213
214static void __init kasan_init_shadow(void)
215{
216	u64 kimg_shadow_start, kimg_shadow_end;
217	u64 mod_shadow_start;
218	u64 vmalloc_shadow_end;
219	phys_addr_t pa_start, pa_end;
220	u64 i;
221
222	kimg_shadow_start = (u64)kasan_mem_to_shadow(KERNEL_START) & PAGE_MASK;
223	kimg_shadow_end = PAGE_ALIGN((u64)kasan_mem_to_shadow(KERNEL_END));
224
225	mod_shadow_start = (u64)kasan_mem_to_shadow((void *)MODULES_VADDR);
226
227	vmalloc_shadow_end = (u64)kasan_mem_to_shadow((void *)VMALLOC_END);
228
229	/*
230	 * We are going to perform proper setup of shadow memory.
231	 * At first we should unmap early shadow (clear_pgds() call below).
232	 * However, instrumented code couldn't execute without shadow memory.
233	 * tmp_pg_dir used to keep early shadow mapped until full shadow
234	 * setup will be finished.
235	 */
236	memcpy(tmp_pg_dir, swapper_pg_dir, sizeof(tmp_pg_dir));
237	dsb(ishst);
238	cpu_replace_ttbr1(lm_alias(tmp_pg_dir), idmap_pg_dir);
239
240	clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
241
242	kasan_map_populate(kimg_shadow_start, kimg_shadow_end,
243			   early_pfn_to_nid(virt_to_pfn(lm_alias(KERNEL_START))));
244
245	kasan_populate_early_shadow(kasan_mem_to_shadow((void *)PAGE_END),
246				   (void *)mod_shadow_start);
247
248	BUILD_BUG_ON(VMALLOC_START != MODULES_END);
249	kasan_populate_early_shadow((void *)vmalloc_shadow_end,
250				    (void *)KASAN_SHADOW_END);
251
252	for_each_mem_range(i, &pa_start, &pa_end) {
253		void *start = (void *)__phys_to_virt(pa_start);
254		void *end = (void *)__phys_to_virt(pa_end);
255
256		if (start >= end)
257			break;
258
259		kasan_map_populate((unsigned long)kasan_mem_to_shadow(start),
260				   (unsigned long)kasan_mem_to_shadow(end),
261				   early_pfn_to_nid(virt_to_pfn(start)));
262	}
263
264	/*
265	 * KAsan may reuse the contents of kasan_early_shadow_pte directly,
266	 * so we should make sure that it maps the zero page read-only.
267	 */
268	for (i = 0; i < PTRS_PER_PTE; i++)
269		set_pte(&kasan_early_shadow_pte[i],
270			pfn_pte(sym_to_pfn(kasan_early_shadow_page),
271				PAGE_KERNEL_RO));
272
273	memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE);
274	cpu_replace_ttbr1(lm_alias(swapper_pg_dir), idmap_pg_dir);
275}
276
277static void __init kasan_init_depth(void)
278{
279	init_task.kasan_depth = 0;
280}
281
282#ifdef CONFIG_KASAN_VMALLOC
283void __init kasan_populate_early_vm_area_shadow(void *start, unsigned long size)
284{
285	unsigned long shadow_start, shadow_end;
286
287	if (!is_vmalloc_or_module_addr(start))
288		return;
289
290	shadow_start = (unsigned long)kasan_mem_to_shadow(start);
291	shadow_start = ALIGN_DOWN(shadow_start, PAGE_SIZE);
292	shadow_end = (unsigned long)kasan_mem_to_shadow(start + size);
293	shadow_end = ALIGN(shadow_end, PAGE_SIZE);
294	kasan_map_populate(shadow_start, shadow_end, NUMA_NO_NODE);
295}
296#endif
297
298void __init kasan_init(void)
299{
300	kasan_init_shadow();
301	kasan_init_depth();
302#if defined(CONFIG_KASAN_GENERIC)
303	/* CONFIG_KASAN_SW_TAGS also requires kasan_init_sw_tags(). */
304	pr_info("KernelAddressSanitizer initialized (generic)\n");
305#endif
306}
307
308#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
309