1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020 Loongson Technology Corporation Limited
4  */
5 #define pr_fmt(fmt) "kasan: " fmt
6 #include <linux/kasan.h>
7 #include <linux/memblock.h>
8 #include <linux/sched/task.h>
9 
10 #include <asm/pgalloc.h>
11 #include <asm/tlbflush.h>
12 
13 static pgd_t kasan_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
14 
15 #define __pgd_none(early, pgd) (early ? (pgd_val(pgd) == 0) : \
16 (__pa(pgd_val(pgd)) == (unsigned long)__pa(kasan_early_shadow_pmd)))
17 
18 #define __pmd_none(early, pmd) (early ? (pmd_val(pmd) == 0) : \
19 (__pa(pmd_val(pmd)) == (unsigned long)__pa(kasan_early_shadow_pte)))
20 
21 #define __pte_none(early, pte) (early ? pte_none(pte) : \
22 ((pte_val(pte) & _PFN_MASK) == (unsigned long)__pa(kasan_early_shadow_page)))
23 
24 bool kasan_early_stage = true;
25 
kasan_mem_to_shadow(const void *addr)26 void *kasan_mem_to_shadow(const void *addr)
27 {
28 	if (kasan_early_stage) {
29 		return (void *)(kasan_early_shadow_page);
30 	} else {
31 		unsigned long maddr = (unsigned long)addr;
32 		unsigned long xrange = (maddr >> XRANGE_SHIFT) & 0xffff;
33 		unsigned long offset = 0;
34 
35 		if (maddr >= FIXADDR_START)
36 			return (void *)(kasan_early_shadow_page);
37 
38 		maddr &= XRANGE_SHADOW_MASK;
39 		switch (xrange) {
40 		case XKPRANGE_CC_SEG:
41 			offset = XKPRANGE_CC_SHADOW_OFFSET;
42 			break;
43 		case XKPRANGE_UC_SEG:
44 			offset = XKPRANGE_UC_SHADOW_OFFSET;
45 			break;
46 		case XKVRANGE_VC_SEG:
47 			offset = XKVRANGE_VC_SHADOW_OFFSET;
48 			break;
49 		default:
50 			WARN_ON(1);
51 			return NULL;
52 		}
53 
54 		return (void *)((maddr >> KASAN_SHADOW_SCALE_SHIFT) + offset);
55 	}
56 }
57 
kasan_shadow_to_mem(const void *shadow_addr)58 const void *kasan_shadow_to_mem(const void *shadow_addr)
59 {
60 	unsigned long addr = (unsigned long)shadow_addr;
61 
62 	if (unlikely(addr > KASAN_SHADOW_END) ||
63 		unlikely(addr < KASAN_SHADOW_START)) {
64 		WARN_ON(1);
65 		return NULL;
66 	}
67 
68 	if (addr >= XKVRANGE_VC_SHADOW_OFFSET)
69 		return (void *)(((addr - XKVRANGE_VC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKVRANGE_VC_START);
70 	else if (addr >= XKPRANGE_UC_SHADOW_OFFSET)
71 		return (void *)(((addr - XKPRANGE_UC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKPRANGE_UC_START);
72 	else if (addr >= XKPRANGE_CC_SHADOW_OFFSET)
73 		return (void *)(((addr - XKPRANGE_CC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKPRANGE_CC_START);
74 	else {
75 		WARN_ON(1);
76 		return NULL;
77 	}
78 }
79 
80 /*
81  * Alloc memory for shadow memory page table.
82  */
kasan_alloc_zeroed_page(int node)83 static phys_addr_t __init kasan_alloc_zeroed_page(int node)
84 {
85 	void *p = memblock_alloc_try_nid(PAGE_SIZE, PAGE_SIZE,
86 					__pa(MAX_DMA_ADDRESS),
87 					MEMBLOCK_ALLOC_ACCESSIBLE, node);
88 	return __pa(p);
89 }
90 
kasan_pte_offset(pmd_t *pmdp, unsigned long addr, int node, bool early)91 static pte_t *kasan_pte_offset(pmd_t *pmdp, unsigned long addr, int node,
92 				      bool early)
93 {
94 	if (__pmd_none(early, READ_ONCE(*pmdp))) {
95 		phys_addr_t pte_phys = early ?
96 				__pa_symbol(kasan_early_shadow_pte)
97 					: kasan_alloc_zeroed_page(node);
98 		if (!early)
99 			memcpy(__va(pte_phys), kasan_early_shadow_pte,
100 				sizeof(kasan_early_shadow_pte));
101 
102 		pmd_populate_kernel(NULL, pmdp, (pte_t *)__va(pte_phys));
103 	}
104 
105 	return pte_offset_kernel(pmdp, addr);
106 }
107 
kasan_set_pgd(pgd_t *pgdp, pgd_t pgdval)108 static inline void kasan_set_pgd(pgd_t *pgdp, pgd_t pgdval)
109 {
110 	WRITE_ONCE(*pgdp, pgdval);
111 }
112 
kasan_pmd_offset(pgd_t *pgdp, unsigned long addr, int node, bool early)113 static pmd_t *kasan_pmd_offset(pgd_t *pgdp, unsigned long addr, int node,
114 				      bool early)
115 {
116 	if (__pgd_none(early, READ_ONCE(*pgdp))) {
117 		phys_addr_t pmd_phys = early ?
118 				__pa_symbol(kasan_early_shadow_pmd)
119 					: kasan_alloc_zeroed_page(node);
120 		if (!early)
121 			memcpy(__va(pmd_phys), kasan_early_shadow_pmd,
122 				sizeof(kasan_early_shadow_pmd));
123 		kasan_set_pgd(pgdp, __pgd((unsigned long)__va(pmd_phys)));
124 	}
125 
126 	return (pmd_t *)((pmd_t *)pgd_val(*pgdp) + pmd_index(addr));
127 }
128 
kasan_pte_populate(pmd_t *pmdp, unsigned long addr, unsigned long end, int node, bool early)129 static void  kasan_pte_populate(pmd_t *pmdp, unsigned long addr,
130 				      unsigned long end, int node, bool early)
131 {
132 	unsigned long next;
133 	pte_t *ptep = kasan_pte_offset(pmdp, addr, node, early);
134 
135 	do {
136 		phys_addr_t page_phys = early ?
137 					__pa_symbol(kasan_early_shadow_page)
138 					      : kasan_alloc_zeroed_page(node);
139 		next = addr + PAGE_SIZE;
140 		set_pte(ptep, pfn_pte(__phys_to_pfn(page_phys), PAGE_KERNEL));
141 	} while (ptep++, addr = next, addr != end && __pte_none(early, READ_ONCE(*ptep)));
142 }
143 
kasan_pmd_populate(pgd_t *pgdp, unsigned long addr, unsigned long end, int node, bool early)144 static void kasan_pmd_populate(pgd_t *pgdp, unsigned long addr,
145 				      unsigned long end, int node, bool early)
146 {
147 	unsigned long next;
148 	pmd_t *pmdp = kasan_pmd_offset(pgdp, addr, node, early);
149 
150 	do {
151 		next = pmd_addr_end(addr, end);
152 		kasan_pte_populate(pmdp, addr, next, node, early);
153 	} while (pmdp++, addr = next, addr != end && __pmd_none(early, READ_ONCE(*pmdp)));
154 }
155 
kasan_pgd_populate(unsigned long addr, unsigned long end, int node, bool early)156 static void __init kasan_pgd_populate(unsigned long addr, unsigned long end,
157 				      int node, bool early)
158 {
159 	unsigned long next;
160 	pgd_t *pgdp;
161 
162 	pgdp = pgd_offset_k(addr);
163 
164 	do {
165 		next = pgd_addr_end(addr, end);
166 		kasan_pmd_populate(pgdp, addr, next, node, early);
167 	} while (pgdp++, addr = next, addr != end);
168 
169 }
170 
kasan_early_init(void)171 asmlinkage void __init kasan_early_init(void)
172 {
173 	BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, PGDIR_SIZE));
174 	BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PGDIR_SIZE));
175 }
176 
177 /* Set up full kasan mappings, ensuring that the mapped pages are zeroed */
kasan_map_populate(unsigned long start, unsigned long end, int node)178 static void __init kasan_map_populate(unsigned long start, unsigned long end,
179 				      int node)
180 {
181 	kasan_pgd_populate(start & PAGE_MASK, PAGE_ALIGN(end), node, false);
182 }
183 
clear_pgds(unsigned long start, unsigned long end)184 static void __init clear_pgds(unsigned long start,
185 			unsigned long end)
186 {
187 	for (; start < end; start += PGDIR_SIZE)
188 		kasan_set_pgd((pgd_t *)pgd_offset_k(start), __pgd(0));
189 }
190 
kasan_init(void)191 void __init kasan_init(void)
192 {
193 	u64 i;
194 	phys_addr_t pa_start, pa_end;
195 
196 	/*
197 	 * PGD was populated as invalid_pmd_table or invalid_pud_table
198 	 * in pagetable_init() which depends on how many levels of page
199 	 * table you are using, but we had to clean the gpd of kasan
200 	 * shadow memory, as the pgd value is none-zero.
201 	 * The assertion pgd_none is going to be false and the formal populate
202 	 * afterwards is not going to create any new pgd at all.
203 	 */
204 	memcpy(kasan_pg_dir, swapper_pg_dir, sizeof(kasan_pg_dir));
205 	csr_write64(__pa_symbol(kasan_pg_dir), LOONGARCH_CSR_PGDH);
206 	local_flush_tlb_all();
207 
208 	clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
209 
210 	/* Maps everything to a single page of zeroes */
211 	kasan_pgd_populate(KASAN_SHADOW_START, KASAN_SHADOW_END, NUMA_NO_NODE, true);
212 
213 	kasan_populate_early_shadow(kasan_mem_to_shadow((void *)VMALLOC_START),
214 					kasan_mem_to_shadow((void *)VMEMMAP_END));
215 
216 	kasan_early_stage = false;
217 
218 	/* Populate the linear mapping */
219 	for_each_mem_range(i, &pa_start, &pa_end) {
220 		void *start = (void *)phys_to_virt(pa_start);
221 		void *end   = (void *)phys_to_virt(pa_end);
222 
223 		if (start >= end)
224 			break;
225 
226 		kasan_map_populate((unsigned long)kasan_mem_to_shadow(start),
227 			(unsigned long)kasan_mem_to_shadow(end), NUMA_NO_NODE);
228 	}
229 
230 	/* Populate modules mapping */
231 	kasan_map_populate((unsigned long)kasan_mem_to_shadow((void *)MODULES_VADDR),
232 		(unsigned long)kasan_mem_to_shadow((void *)MODULES_END), NUMA_NO_NODE);
233 	/*
234 	 * KAsan may reuse the contents of kasan_zero_pte directly, so we
235 	 * should make sure that it maps the zero page read-only.
236 	 */
237 	for (i = 0; i < PTRS_PER_PTE; i++)
238 		set_pte(&kasan_early_shadow_pte[i],
239 			pfn_pte(__phys_to_pfn(__pa_symbol(kasan_early_shadow_page)),
240 				PAGE_KERNEL_RO));
241 
242 	memset(kasan_early_shadow_page, 0, PAGE_SIZE);
243 	csr_write64(__pa_symbol(swapper_pg_dir), LOONGARCH_CSR_PGDH);
244 	local_flush_tlb_all();
245 
246 	/* At this point kasan is fully initialized. Enable error messages */
247 	init_task.kasan_depth = 0;
248 	pr_info("KernelAddressSanitizer initialized.\n");
249 }
250