1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * This file contains some kasan initialization code.
4 *
5 * Copyright (c) 2015 Samsung Electronics Co., Ltd.
6 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 */
13
14 #include <linux/memblock.h>
15 #include <linux/init.h>
16 #include <linux/kasan.h>
17 #include <linux/kernel.h>
18 #include <linux/mm.h>
19 #include <linux/pfn.h>
20 #include <linux/slab.h>
21
22 #include <asm/page.h>
23 #include <asm/pgalloc.h>
24
25 #include "kasan.h"
26
27 /*
28 * This page serves two purposes:
29 * - It used as early shadow memory. The entire shadow region populated
30 * with this page, before we will be able to setup normal shadow memory.
31 * - Latter it reused it as zero shadow to cover large ranges of memory
32 * that allowed to access, but not handled by kasan (vmalloc/vmemmap ...).
33 */
34 unsigned char kasan_early_shadow_page[PAGE_SIZE] __page_aligned_bss;
35
36 #if CONFIG_PGTABLE_LEVELS > 4
37 p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D] __page_aligned_bss;
kasan_p4d_table(pgd_t pgd)38 static inline bool kasan_p4d_table(pgd_t pgd)
39 {
40 return pgd_page(pgd) == virt_to_page(lm_alias(kasan_early_shadow_p4d));
41 }
42 #else
kasan_p4d_table(pgd_t pgd)43 static inline bool kasan_p4d_table(pgd_t pgd)
44 {
45 return false;
46 }
47 #endif
48 #if CONFIG_PGTABLE_LEVELS > 3
49 pud_t kasan_early_shadow_pud[PTRS_PER_PUD] __page_aligned_bss;
kasan_pud_table(p4d_t p4d)50 static inline bool kasan_pud_table(p4d_t p4d)
51 {
52 return p4d_page(p4d) == virt_to_page(lm_alias(kasan_early_shadow_pud));
53 }
54 #else
kasan_pud_table(p4d_t p4d)55 static inline bool kasan_pud_table(p4d_t p4d)
56 {
57 return false;
58 }
59 #endif
60 #if CONFIG_PGTABLE_LEVELS > 2
61 pmd_t kasan_early_shadow_pmd[PTRS_PER_PMD] __page_aligned_bss;
kasan_pmd_table(pud_t pud)62 static inline bool kasan_pmd_table(pud_t pud)
63 {
64 return pud_page(pud) == virt_to_page(lm_alias(kasan_early_shadow_pmd));
65 }
66 #else
kasan_pmd_table(pud_t pud)67 static inline bool kasan_pmd_table(pud_t pud)
68 {
69 return false;
70 }
71 #endif
72 pte_t kasan_early_shadow_pte[PTRS_PER_PTE] __page_aligned_bss;
73
kasan_pte_table(pmd_t pmd)74 static inline bool kasan_pte_table(pmd_t pmd)
75 {
76 return pmd_page(pmd) == virt_to_page(lm_alias(kasan_early_shadow_pte));
77 }
78
kasan_early_shadow_page_entry(pte_t pte)79 static inline bool kasan_early_shadow_page_entry(pte_t pte)
80 {
81 return pte_page(pte) == virt_to_page(lm_alias(kasan_early_shadow_page));
82 }
83
early_alloc(size_t size, int node)84 static __init void *early_alloc(size_t size, int node)
85 {
86 void *ptr = memblock_alloc_try_nid(size, size, __pa(MAX_DMA_ADDRESS),
87 MEMBLOCK_ALLOC_ACCESSIBLE, node);
88
89 if (!ptr)
90 panic("%s: Failed to allocate %zu bytes align=%zx nid=%d from=%llx\n",
91 __func__, size, size, node, (u64)__pa(MAX_DMA_ADDRESS));
92
93 return ptr;
94 }
95
zero_pte_populate(pmd_t *pmd, unsigned long addr, unsigned long end)96 static void __ref zero_pte_populate(pmd_t *pmd, unsigned long addr,
97 unsigned long end)
98 {
99 pte_t *pte = pte_offset_kernel(pmd, addr);
100 pte_t zero_pte;
101
102 zero_pte = pfn_pte(PFN_DOWN(__pa_symbol(kasan_early_shadow_page)),
103 PAGE_KERNEL);
104 zero_pte = pte_wrprotect(zero_pte);
105
106 while (addr + PAGE_SIZE <= end) {
107 set_pte_at(&init_mm, addr, pte, zero_pte);
108 addr += PAGE_SIZE;
109 pte = pte_offset_kernel(pmd, addr);
110 }
111 }
112
zero_pmd_populate(pud_t *pud, unsigned long addr, unsigned long end)113 static int __ref zero_pmd_populate(pud_t *pud, unsigned long addr,
114 unsigned long end)
115 {
116 pmd_t *pmd = pmd_offset(pud, addr);
117 unsigned long next;
118
119 do {
120 next = pmd_addr_end(addr, end);
121
122 if (IS_ALIGNED(addr, PMD_SIZE) && end - addr >= PMD_SIZE) {
123 pmd_populate_kernel(&init_mm, pmd,
124 lm_alias(kasan_early_shadow_pte));
125 continue;
126 }
127
128 if (pmd_none(*pmd)) {
129 pte_t *p;
130
131 if (slab_is_available())
132 p = pte_alloc_one_kernel(&init_mm);
133 else
134 p = early_alloc(PAGE_SIZE, NUMA_NO_NODE);
135 if (!p)
136 return -ENOMEM;
137
138 pmd_populate_kernel(&init_mm, pmd, p);
139 }
140 zero_pte_populate(pmd, addr, next);
141 } while (pmd++, addr = next, addr != end);
142
143 return 0;
144 }
145
zero_pud_populate(p4d_t *p4d, unsigned long addr, unsigned long end)146 static int __ref zero_pud_populate(p4d_t *p4d, unsigned long addr,
147 unsigned long end)
148 {
149 pud_t *pud = pud_offset(p4d, addr);
150 unsigned long next;
151
152 do {
153 next = pud_addr_end(addr, end);
154 if (IS_ALIGNED(addr, PUD_SIZE) && end - addr >= PUD_SIZE) {
155 pmd_t *pmd;
156
157 pud_populate(&init_mm, pud,
158 lm_alias(kasan_early_shadow_pmd));
159 pmd = pmd_offset(pud, addr);
160 pmd_populate_kernel(&init_mm, pmd,
161 lm_alias(kasan_early_shadow_pte));
162 continue;
163 }
164
165 if (pud_none(*pud)) {
166 pmd_t *p;
167
168 if (slab_is_available()) {
169 p = pmd_alloc(&init_mm, pud, addr);
170 if (!p)
171 return -ENOMEM;
172 } else {
173 p = early_alloc(PAGE_SIZE, NUMA_NO_NODE);
174 #ifdef CONFIG_LOONGARCH
175 pmd_init((unsigned long)p, (unsigned long)invalid_pte_table);
176 #endif
177 pud_populate(&init_mm, pud, p);
178 }
179 }
180 zero_pmd_populate(pud, addr, next);
181 } while (pud++, addr = next, addr != end);
182
183 return 0;
184 }
185
zero_p4d_populate(pgd_t *pgd, unsigned long addr, unsigned long end)186 static int __ref zero_p4d_populate(pgd_t *pgd, unsigned long addr,
187 unsigned long end)
188 {
189 p4d_t *p4d = p4d_offset(pgd, addr);
190 unsigned long next;
191
192 do {
193 next = p4d_addr_end(addr, end);
194 if (IS_ALIGNED(addr, P4D_SIZE) && end - addr >= P4D_SIZE) {
195 pud_t *pud;
196 pmd_t *pmd;
197
198 p4d_populate(&init_mm, p4d,
199 lm_alias(kasan_early_shadow_pud));
200 pud = pud_offset(p4d, addr);
201 pud_populate(&init_mm, pud,
202 lm_alias(kasan_early_shadow_pmd));
203 pmd = pmd_offset(pud, addr);
204 pmd_populate_kernel(&init_mm, pmd,
205 lm_alias(kasan_early_shadow_pte));
206 continue;
207 }
208
209 if (p4d_none(*p4d)) {
210 pud_t *p;
211
212 if (slab_is_available()) {
213 p = pud_alloc(&init_mm, p4d, addr);
214 if (!p)
215 return -ENOMEM;
216 } else {
217 p = early_alloc(PAGE_SIZE, NUMA_NO_NODE);
218 #ifdef CONFIG_LOONGARCH
219 pud_init((unsigned long)p, (unsigned long)invalid_pmd_table);
220 #endif
221 p4d_populate(&init_mm, p4d, p);
222 }
223 }
224 zero_pud_populate(p4d, addr, next);
225 } while (p4d++, addr = next, addr != end);
226
227 return 0;
228 }
229
230 /**
231 * kasan_populate_early_shadow - populate shadow memory region with
232 * kasan_early_shadow_page
233 * @shadow_start - start of the memory range to populate
234 * @shadow_end - end of the memory range to populate
235 */
kasan_populate_early_shadow(const void *shadow_start, const void *shadow_end)236 int __ref kasan_populate_early_shadow(const void *shadow_start,
237 const void *shadow_end)
238 {
239 unsigned long addr = (unsigned long)shadow_start;
240 unsigned long end = (unsigned long)shadow_end;
241 pgd_t *pgd = pgd_offset_k(addr);
242 unsigned long next;
243
244 do {
245 next = pgd_addr_end(addr, end);
246
247 if (IS_ALIGNED(addr, PGDIR_SIZE) && end - addr >= PGDIR_SIZE) {
248 p4d_t *p4d;
249 pud_t *pud;
250 pmd_t *pmd;
251
252 /*
253 * kasan_early_shadow_pud should be populated with pmds
254 * at this moment.
255 * [pud,pmd]_populate*() below needed only for
256 * 3,2 - level page tables where we don't have
257 * puds,pmds, so pgd_populate(), pud_populate()
258 * is noops.
259 */
260 pgd_populate(&init_mm, pgd,
261 lm_alias(kasan_early_shadow_p4d));
262 p4d = p4d_offset(pgd, addr);
263 p4d_populate(&init_mm, p4d,
264 lm_alias(kasan_early_shadow_pud));
265 pud = pud_offset(p4d, addr);
266 pud_populate(&init_mm, pud,
267 lm_alias(kasan_early_shadow_pmd));
268 pmd = pmd_offset(pud, addr);
269 pmd_populate_kernel(&init_mm, pmd,
270 lm_alias(kasan_early_shadow_pte));
271 continue;
272 }
273
274 if (pgd_none(*pgd)) {
275 p4d_t *p;
276
277 if (slab_is_available()) {
278 p = p4d_alloc(&init_mm, pgd, addr);
279 if (!p)
280 return -ENOMEM;
281 } else {
282 pgd_populate(&init_mm, pgd,
283 early_alloc(PAGE_SIZE, NUMA_NO_NODE));
284 }
285 }
286 zero_p4d_populate(pgd, addr, next);
287 } while (pgd++, addr = next, addr != end);
288
289 return 0;
290 }
291
kasan_free_pte(pte_t *pte_start, pmd_t *pmd)292 static void kasan_free_pte(pte_t *pte_start, pmd_t *pmd)
293 {
294 pte_t *pte;
295 int i;
296
297 for (i = 0; i < PTRS_PER_PTE; i++) {
298 pte = pte_start + i;
299 if (!pte_none(*pte))
300 return;
301 }
302
303 pte_free_kernel(&init_mm, (pte_t *)page_to_virt(pmd_page(*pmd)));
304 pmd_clear(pmd);
305 }
306
kasan_free_pmd(pmd_t *pmd_start, pud_t *pud)307 static void kasan_free_pmd(pmd_t *pmd_start, pud_t *pud)
308 {
309 pmd_t *pmd;
310 int i;
311
312 for (i = 0; i < PTRS_PER_PMD; i++) {
313 pmd = pmd_start + i;
314 if (!pmd_none(*pmd))
315 return;
316 }
317
318 pmd_free(&init_mm, (pmd_t *)page_to_virt(pud_page(*pud)));
319 pud_clear(pud);
320 }
321
kasan_free_pud(pud_t *pud_start, p4d_t *p4d)322 static void kasan_free_pud(pud_t *pud_start, p4d_t *p4d)
323 {
324 pud_t *pud;
325 int i;
326
327 for (i = 0; i < PTRS_PER_PUD; i++) {
328 pud = pud_start + i;
329 if (!pud_none(*pud))
330 return;
331 }
332
333 pud_free(&init_mm, (pud_t *)page_to_virt(p4d_page(*p4d)));
334 p4d_clear(p4d);
335 }
336
kasan_free_p4d(p4d_t *p4d_start, pgd_t *pgd)337 static void kasan_free_p4d(p4d_t *p4d_start, pgd_t *pgd)
338 {
339 p4d_t *p4d;
340 int i;
341
342 for (i = 0; i < PTRS_PER_P4D; i++) {
343 p4d = p4d_start + i;
344 if (!p4d_none(*p4d))
345 return;
346 }
347
348 p4d_free(&init_mm, (p4d_t *)page_to_virt(pgd_page(*pgd)));
349 pgd_clear(pgd);
350 }
351
kasan_remove_pte_table(pte_t *pte, unsigned long addr, unsigned long end)352 static void kasan_remove_pte_table(pte_t *pte, unsigned long addr,
353 unsigned long end)
354 {
355 unsigned long next;
356
357 for (; addr < end; addr = next, pte++) {
358 next = (addr + PAGE_SIZE) & PAGE_MASK;
359 if (next > end)
360 next = end;
361
362 if (!pte_present(*pte))
363 continue;
364
365 if (WARN_ON(!kasan_early_shadow_page_entry(*pte)))
366 continue;
367 pte_clear(&init_mm, addr, pte);
368 }
369 }
370
kasan_remove_pmd_table(pmd_t *pmd, unsigned long addr, unsigned long end)371 static void kasan_remove_pmd_table(pmd_t *pmd, unsigned long addr,
372 unsigned long end)
373 {
374 unsigned long next;
375
376 for (; addr < end; addr = next, pmd++) {
377 pte_t *pte;
378
379 next = pmd_addr_end(addr, end);
380
381 if (!pmd_present(*pmd))
382 continue;
383
384 if (kasan_pte_table(*pmd)) {
385 if (IS_ALIGNED(addr, PMD_SIZE) &&
386 IS_ALIGNED(next, PMD_SIZE)) {
387 pmd_clear(pmd);
388 continue;
389 }
390 }
391 pte = pte_offset_kernel(pmd, addr);
392 kasan_remove_pte_table(pte, addr, next);
393 kasan_free_pte(pte_offset_kernel(pmd, 0), pmd);
394 }
395 }
396
kasan_remove_pud_table(pud_t *pud, unsigned long addr, unsigned long end)397 static void kasan_remove_pud_table(pud_t *pud, unsigned long addr,
398 unsigned long end)
399 {
400 unsigned long next;
401
402 for (; addr < end; addr = next, pud++) {
403 pmd_t *pmd, *pmd_base;
404
405 next = pud_addr_end(addr, end);
406
407 if (!pud_present(*pud))
408 continue;
409
410 if (kasan_pmd_table(*pud)) {
411 if (IS_ALIGNED(addr, PUD_SIZE) &&
412 IS_ALIGNED(next, PUD_SIZE)) {
413 pud_clear(pud);
414 continue;
415 }
416 }
417 pmd = pmd_offset(pud, addr);
418 pmd_base = pmd_offset(pud, 0);
419 kasan_remove_pmd_table(pmd, addr, next);
420 kasan_free_pmd(pmd_base, pud);
421 }
422 }
423
kasan_remove_p4d_table(p4d_t *p4d, unsigned long addr, unsigned long end)424 static void kasan_remove_p4d_table(p4d_t *p4d, unsigned long addr,
425 unsigned long end)
426 {
427 unsigned long next;
428
429 for (; addr < end; addr = next, p4d++) {
430 pud_t *pud;
431
432 next = p4d_addr_end(addr, end);
433
434 if (!p4d_present(*p4d))
435 continue;
436
437 if (kasan_pud_table(*p4d)) {
438 if (IS_ALIGNED(addr, P4D_SIZE) &&
439 IS_ALIGNED(next, P4D_SIZE)) {
440 p4d_clear(p4d);
441 continue;
442 }
443 }
444 pud = pud_offset(p4d, addr);
445 kasan_remove_pud_table(pud, addr, next);
446 kasan_free_pud(pud_offset(p4d, 0), p4d);
447 }
448 }
449
kasan_remove_zero_shadow(void *start, unsigned long size)450 void kasan_remove_zero_shadow(void *start, unsigned long size)
451 {
452 unsigned long addr, end, next;
453 pgd_t *pgd;
454
455 addr = (unsigned long)kasan_mem_to_shadow(start);
456 end = addr + (size >> KASAN_SHADOW_SCALE_SHIFT);
457
458 if (WARN_ON((unsigned long)start %
459 (KASAN_SHADOW_SCALE_SIZE * PAGE_SIZE)) ||
460 WARN_ON(size % (KASAN_SHADOW_SCALE_SIZE * PAGE_SIZE)))
461 return;
462
463 for (; addr < end; addr = next) {
464 p4d_t *p4d;
465
466 next = pgd_addr_end(addr, end);
467
468 pgd = pgd_offset_k(addr);
469 if (!pgd_present(*pgd))
470 continue;
471
472 if (kasan_p4d_table(*pgd)) {
473 if (IS_ALIGNED(addr, PGDIR_SIZE) &&
474 IS_ALIGNED(next, PGDIR_SIZE)) {
475 pgd_clear(pgd);
476 continue;
477 }
478 }
479
480 p4d = p4d_offset(pgd, addr);
481 kasan_remove_p4d_table(p4d, addr, next);
482 kasan_free_p4d(p4d_offset(pgd, 0), pgd);
483 }
484 }
485
kasan_add_zero_shadow(void *start, unsigned long size)486 int kasan_add_zero_shadow(void *start, unsigned long size)
487 {
488 int ret;
489 void *shadow_start, *shadow_end;
490
491 shadow_start = kasan_mem_to_shadow(start);
492 shadow_end = shadow_start + (size >> KASAN_SHADOW_SCALE_SHIFT);
493
494 if (WARN_ON((unsigned long)start %
495 (KASAN_SHADOW_SCALE_SIZE * PAGE_SIZE)) ||
496 WARN_ON(size % (KASAN_SHADOW_SCALE_SIZE * PAGE_SIZE)))
497 return -EINVAL;
498
499 ret = kasan_populate_early_shadow(shadow_start, shadow_end);
500 if (ret)
501 kasan_remove_zero_shadow(start, size);
502 return ret;
503 }
504