1/*
2 * Xtensa KASAN shadow map initialization
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License.  See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 2017 Cadence Design Systems Inc.
9 */
10
11#include <linux/memblock.h>
12#include <linux/init_task.h>
13#include <linux/kasan.h>
14#include <linux/kernel.h>
15#include <asm/initialize_mmu.h>
16#include <asm/tlbflush.h>
17
18void __init kasan_early_init(void)
19{
20	unsigned long vaddr = KASAN_SHADOW_START;
21	pmd_t *pmd = pmd_off_k(vaddr);
22	int i;
23
24	for (i = 0; i < PTRS_PER_PTE; ++i)
25		set_pte(kasan_early_shadow_pte + i,
26			mk_pte(virt_to_page(kasan_early_shadow_page),
27				PAGE_KERNEL));
28
29	for (vaddr = 0; vaddr < KASAN_SHADOW_SIZE; vaddr += PMD_SIZE, ++pmd) {
30		BUG_ON(!pmd_none(*pmd));
31		set_pmd(pmd, __pmd((unsigned long)kasan_early_shadow_pte));
32	}
33}
34
35static void __init populate(void *start, void *end)
36{
37	unsigned long n_pages = (end - start) / PAGE_SIZE;
38	unsigned long n_pmds = n_pages / PTRS_PER_PTE;
39	unsigned long i, j;
40	unsigned long vaddr = (unsigned long)start;
41	pmd_t *pmd = pmd_off_k(vaddr);
42	pte_t *pte = memblock_alloc(n_pages * sizeof(pte_t), PAGE_SIZE);
43
44	if (!pte)
45		panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
46		      __func__, n_pages * sizeof(pte_t), PAGE_SIZE);
47
48	pr_debug("%s: %p - %p\n", __func__, start, end);
49
50	for (i = j = 0; i < n_pmds; ++i) {
51		int k;
52
53		for (k = 0; k < PTRS_PER_PTE; ++k, ++j) {
54			phys_addr_t phys =
55				memblock_phys_alloc_range(PAGE_SIZE, PAGE_SIZE,
56							  0,
57							  MEMBLOCK_ALLOC_ANYWHERE);
58
59			if (!phys)
60				panic("Failed to allocate page table page\n");
61
62			set_pte(pte + j, pfn_pte(PHYS_PFN(phys), PAGE_KERNEL));
63		}
64	}
65
66	for (i = 0; i < n_pmds ; ++i, pte += PTRS_PER_PTE)
67		set_pmd(pmd + i, __pmd((unsigned long)pte));
68
69	local_flush_tlb_all();
70	memset(start, 0, end - start);
71}
72
73void __init kasan_init(void)
74{
75	int i;
76
77	BUILD_BUG_ON(KASAN_SHADOW_OFFSET != KASAN_SHADOW_START -
78		     (KASAN_START_VADDR >> KASAN_SHADOW_SCALE_SHIFT));
79	BUILD_BUG_ON(VMALLOC_START < KASAN_START_VADDR);
80
81	/*
82	 * Replace shadow map pages that cover addresses from VMALLOC area
83	 * start to the end of KSEG with clean writable pages.
84	 */
85	populate(kasan_mem_to_shadow((void *)VMALLOC_START),
86		 kasan_mem_to_shadow((void *)XCHAL_KSEG_BYPASS_VADDR));
87
88	/*
89	 * Write protect kasan_early_shadow_page and zero-initialize it again.
90	 */
91	for (i = 0; i < PTRS_PER_PTE; ++i)
92		set_pte(kasan_early_shadow_pte + i,
93			mk_pte(virt_to_page(kasan_early_shadow_page),
94				PAGE_KERNEL_RO));
95
96	local_flush_tlb_all();
97	memset(kasan_early_shadow_page, 0, PAGE_SIZE);
98
99	/* At this point kasan is fully initialized. Enable error messages. */
100	current->kasan_depth = 0;
101	pr_info("KernelAddressSanitizer initialized\n");
102}
103