1 /*
2 * Copyright (c) 2023 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #define _GNU_SOURCE
17 #include <sys/mman.h>
18 #include <sys/prctl.h>
19 #include "cfi.h"
20 #include "ld_log.h"
21 #include "namespace.h"
22
23 /* This module provides support for LLVM CFI Cross-DSO by implementing the __cfi_slowpath() and __cfi_slowpath_diag()
24 * functions. These two functions will be called before visiting other dso's resources. The responsibility is to
25 * calculate the __cfi_check() of the target dso, and call it. So use CFI shadow and shadow value to store the
26 * relationship between dso and its __cfi_check addr while loading a dso. CFI shadow is an array which stores shadow
27 * values. Shadow value is used to store the relationship. A shadow value can map 1 LIBRARY_ALIGNMENT memory range. So
28 * each dso will be mapped to one or more shadow values in the CFI shadow, this depends on the address range of the
29 * dso.
30 * There are 3 types for shadow value:
31 * - invalid(0) : the target addr does not belongs to any loaded dso.
32 * - uncheck(1) : this LIBRARY_ALIGNMENT memory range belongs to a dso but it is no need to do the CFI check.
33 * - valid(2 - 0xFFFF) : this LIBRARY_ALIGNMENT memory range belongs to a dso and need to do the CFI check.
34 * The valid shadow value records the distance from the end of a LIBRARY_ALIGNMENT memory range to the __cfi_check addr
35 * of the dso (The unit is 4096, because the __cfi_check is aligned with 4096).
36 * The valid shadow value is calculated as below:
37 * sv = (AlignUp(__cfi_check, LIBRARY_ALIGNMENT) - __cfi_check + N * LIBRARY_ALIGNMENT) / 4096 + 2;
38 *
39 * N : starts at 0, is the index of LIBRARY_ALIGNMENT memory range that belongs to a dso.
40 * + 2 : to avoid conflict with invalid and uncheck shadow value.
41 *
42 * Below is a example for calculating shadow values of a dso.
43 * liba.so
44 * /\
45 * /'''''''''''''''''''''''''''''''''''' '''''''''''''''''''''''''''''''''''''\
46 * 0x40000 __cfi_check addr = 0x42000 0x80000 0xA0000 0xC0000
47 * +---------^----------------------------------------^-------------------------^-------------------------+
48 * Memory | | | | |
49 * +------------------------------------------------------------------------------------------------------+
50 * \........... LIBRARY_ALIGNMENT ..................../\........... LIBRARY_ALIGNMENT ..................../
51 * \ / /
52 * \ / /
53 * \ / /
54 * \ / /
55 * \ / /
56 * +-----------------------------------------------------------------------------------------------------+
57 * CFI shadow | invalid | sv1 | sv2 | invalid |
58 * +-----------------------------------------------------------------------------------------------------+
59 * sv1 = (0x80000 - 0x42000 + 0 * LIBRARY_ALIGNMENT) / 4096 + 2 = 64
60 * sv2 = (0x80000 - 0x42000 + 1 * LIBRARY_ALIGNMENT) / 4096 + 2 = 126
61 *
62 * Calculating the __cfi_check address is a reverse process:
63 * - First align up the target addr with LIBRARY_ALIGNMENT to locate the corresponding shadow value.
64 * - Then calculate the __cfi_check addr.
65 *
66 * In order for the algorithm to work well, the start addr of each dso should be aligned with LIBRARY_ALIGNMENT. */
67
68 #define MAX(a, b) (((a) > (b)) ? (a) : (b))
69 #define MIN(a, b) (((a) < (b)) ? (a) : (b))
70 #define ALIGN_UP(a, b) (((a) + (b) - 1) & -(b))
71 #define ALIGN_DOWN(a, b) ((a) & -(b))
72 #if DL_FDPIC
73 #define LADDR(p, v) laddr((p), (v))
74 #else
75 #define LADDR(p, v) (void *)((p)->base + (v))
76 #endif
77
78 /* Function ptr for __cfi_check() */
79 typedef int (*cfi_check_t)(uint64_t, void *, void *);
80
81 static const uintptr_t shadow_granularity = LIBRARY_ALIGNMENT_BITS;
82 static const uintptr_t cfi_check_granularity = 12;
83 // __cfi_check should be 4k aligned.
84 static const uintptr_t cfi_check_alignment = 1UL << cfi_check_granularity;
85 static const uintptr_t shadow_alignment = 1UL << shadow_granularity;
86 static const uint16_t shadow_value_step = 1 << (shadow_granularity - cfi_check_granularity);
87
88 static uintptr_t shadow_size = 0;
89 /* Start addr of the CFI shadow */
90 static char *cfi_shadow_start = NULL;
91 /* List head of all the DSOs loaded by the process */
92 static struct dso *dso_list_head = NULL;
93
94 static struct dso *pldso = NULL;
95 static struct dso *r_app = NULL;
96 static struct dso *r_vdso = NULL;
97
98 /* Shadow value */
99 /* The related shadow value(s) will be set to `sv_invalid` when:
100 * - init CFI shadow.
101 * - removing a dso. */
102 static const uint16_t sv_invalid = 0;
103 /* The related shadow value(s) will be set to `sv_uncheck` if:
104 * - the DSO does not enable CFI Cross-Dso.
105 * - the DSO enabled CFI Cross-Dso, but this DSO is larger than 16G, for the part of the dso that exceeds 16G,
106 * its shadow value will be set to `sv_uncheck`. */
107 static const uint16_t sv_uncheck = 1;
108 /* If a DSO enabled CFI Cross-Dso, the DSO's shadow value should be valid. Because of the defination of `sv_invalid`
109 * and `sv_unchecked`, the valid shadow value should be at least 2. */
110 static const uint16_t sv_valid_min = 2;
111
112 #if defined(__LP64__)
113 static const uintptr_t max_target_addr = 0xffffffffffff;
114 #else
115 static const uintptr_t max_target_addr = 0xffffffff;
116 #endif
117
118 /* Create a cfi shadow */
119 static int create_cfi_shadow(void);
120
121 /* Map dsos to CFI shadow */
122 static int add_dso_to_cfi_shadow(struct dso *dso);
123 static int fill_shadow_value_to_shadow(uintptr_t begin, uintptr_t end, uintptr_t cfi_check, uint16_t type);
124
125 /* Find the __cfi_check() of target dso and call it */
126 void __cfi_slowpath(uint64_t call_site_type_id, void *func_ptr);
127 void __cfi_slowpath_diag(uint64_t call_site_type_id, void *func_ptr, void *diag_data);
128
addr_to_offset(uintptr_t addr, int bits)129 static inline uintptr_t addr_to_offset(uintptr_t addr, int bits)
130 {
131 /* Convert addr to CFI shadow offset.
132 * Shift left 1 bit because the shadow value is uint16_t. */
133 return (addr >> bits) << 1;
134 }
135
find_cfi_check_sym(struct dso *p)136 static struct symdef find_cfi_check_sym(struct dso *p)
137 {
138 LD_LOGD("[CFI] [%{public}s] start!\n", __FUNCTION__);
139
140 struct verinfo verinfo = { .s = "__cfi_check", .v = "", .use_vna_hash = false };
141 struct sym_info_pair s_info_p = gnu_hash(verinfo.s);
142 return find_sym_impl(p, &verinfo, s_info_p, 0, p->namespace);
143 }
144
145
addr_in_dso(struct dso *dso, size_t addr)146 static int addr_in_dso(struct dso *dso, size_t addr)
147 {
148 Phdr *ph = dso->phdr;
149 size_t phcnt = dso->phnum;
150 size_t entsz = dso->phentsize;
151 size_t base = (size_t)dso->base;
152 for (; phcnt--; ph = (void *)((char *)ph + entsz)) {
153 if (ph->p_type != PT_LOAD) continue;
154 if (addr - base - ph->p_vaddr < ph->p_memsz)
155 return 1;
156 }
157 return 0;
158 }
159
addr_in_kernel_mapped_dso(size_t addr)160 static int addr_in_kernel_mapped_dso(size_t addr)
161 {
162 if (addr_in_dso(pldso, addr)) {
163 return 1;
164 }
165
166 if (addr_in_dso(r_app, addr)) {
167 return 1;
168 }
169
170 if (addr_in_dso(r_vdso, addr)) {
171 return 1;
172 }
173 return 0;
174 }
175
get_cfi_check_addr(uint16_t value, void* func_ptr)176 static uintptr_t get_cfi_check_addr(uint16_t value, void* func_ptr)
177 {
178 LD_LOGD("[CFI] [%{public}s] start!\n", __FUNCTION__);
179
180 uintptr_t addr = (uintptr_t)func_ptr;
181 uintptr_t aligned_addr = ALIGN_DOWN(addr, shadow_alignment) + shadow_alignment;
182 uintptr_t cfi_check_func_addr = aligned_addr - ((uintptr_t)(value - sv_valid_min) << cfi_check_granularity);
183 #ifdef __arm__
184 LD_LOGD("[CFI] [%{public}s] __arm__ defined!\n", __FUNCTION__);
185 cfi_check_func_addr++;
186 #endif
187 LD_LOGD("[CFI] [%{public}s] cfi_check_func_addr[%{public}p] in dso[%{public}s]\n",
188 __FUNCTION__, cfi_check_func_addr, ((struct dso *)addr2dso((size_t)cfi_check_func_addr))->name);
189
190 return cfi_check_func_addr;
191 }
192
cfi_slowpath_common(uint64_t call_site_type_id, void *func_ptr, void *diag_data)193 static inline void cfi_slowpath_common(uint64_t call_site_type_id, void *func_ptr, void *diag_data)
194 {
195 uint16_t value = sv_invalid;
196
197 if (func_ptr == NULL) {
198 return;
199 }
200
201 #if defined(__aarch64__)
202 LD_LOGD("[CFI] [%{public}s] __aarch64__ defined!\n", __FUNCTION__);
203 uintptr_t addr = (uintptr_t)func_ptr & ((1ULL << 56) - 1);
204 #else
205 LD_LOGD("[CFI] [%{public}s] __aarch64__ not defined!\n", __FUNCTION__);
206 uintptr_t addr = func_ptr;
207 #endif
208
209 /* Get shadow value */
210 uintptr_t offset = addr_to_offset(addr, shadow_granularity);
211
212 if (cfi_shadow_start == NULL) {
213 LD_LOGE("[CFI] [%{public}s] the cfi_shadow_start is null!\n", __FUNCTION__);
214 __builtin_trap();
215 }
216
217 if (offset > shadow_size) {
218 LD_LOGE("[CFI] set value to sv_invalid because offset(%{public}x) > shadow_size(%{public}x), "
219 "addr:%{public}p lr:%{public}p.\n",
220 offset, shadow_size, func_ptr, __builtin_return_address(0));
221 value = sv_invalid;
222 } else {
223 value = *((uint16_t*)(cfi_shadow_start + offset));
224 }
225 LD_LOGD("[CFI] [%{public}s] called from %{public}s to %{public}s func_ptr:0x%{public}p shadow value:%{public}d diag_data:0x%{public}p call_site_type_id[%{public}p.\n",
226 __FUNCTION__,
227 ((struct dso *)addr2dso((size_t)__builtin_return_address(0)))->name,
228 ((struct dso *)addr2dso((size_t)func_ptr))->name,
229 func_ptr, value, diag_data, call_site_type_id);
230
231 struct dso *dso = NULL;
232 switch (value)
233 {
234 case sv_invalid:
235 // Kernel mapped sos don't guarantee the alignment requirements of the CFI,
236 // there will be potential to get to the wrong shadow value, For example:
237 // If another so is mapped to the same "LibraryAligment" as kernel mapped so,
238 // then they will use the same shadow, the shadow value will be set to invalid If this so is unloaded later,
239 // and then call the address in the kernel mapped so will get an invalid shadow value.
240 // We fall back to uncheck for this scene.
241 if (addr_in_kernel_mapped_dso((size_t)func_ptr)) {
242 LD_LOGI("[CFI] [%{public}s] uncheck for kernel mapped so.\n", __FUNCTION__);
243 return;
244 }
245
246 LD_LOGE("[CFI] Invalid shadow value of address:%{public}p, lr:%{public}p.\n",
247 func_ptr, __builtin_return_address(0));
248
249 dso = (struct dso *)addr2dso((size_t)__builtin_return_address(0));
250 if (dso == NULL) {
251 LD_LOGE("[CFI] [%{public}s] can not find matched dso of %{public}p !\n",
252 __FUNCTION__, __builtin_return_address(0));
253 __builtin_trap();
254 }
255 LD_LOGD("[CFI] [%{public}s] dso name[%{public}s]!\n", __FUNCTION__, dso->name);
256
257 struct symdef cfi_check_sym = find_cfi_check_sym(dso);
258 if (!cfi_check_sym.sym) {
259 LD_LOGE("[CFI] [%{public}s] can not find the __cfi_check in the dso!\n", __FUNCTION__);
260 __builtin_trap();
261 }
262 LD_LOGD("[CFI] [%{public}s] cfi_check addr[%{public}p]!\n", __FUNCTION__,
263 LADDR(cfi_check_sym.dso, cfi_check_sym.sym->st_value));
264 ((cfi_check_t)LADDR(cfi_check_sym.dso, cfi_check_sym.sym->st_value))(call_site_type_id, func_ptr, diag_data);
265 break;
266 case sv_uncheck:
267 break;
268 default:
269 ((cfi_check_t)get_cfi_check_addr(value, func_ptr))(call_site_type_id, func_ptr, diag_data);
270 break;
271 }
272
273 return;
274 }
275
init_cfi_shadow(struct dso *dso_list, struct dso *ldso, struct dso *app, struct dso *vdso)276 int init_cfi_shadow(struct dso *dso_list, struct dso *ldso, struct dso *app, struct dso *vdso)
277 {
278 LD_LOGD("[CFI] [%{public}s] start!\n", __FUNCTION__);
279
280 if (dso_list == NULL) {
281 LD_LOGW("[CFI] [%{public}s] has null param!\n", __FUNCTION__);
282 return CFI_SUCCESS;
283 }
284
285 /* Save the head node of dso list */
286 dso_list_head = dso_list;
287 pldso = ldso;
288 r_app = app;
289 r_vdso = vdso;
290
291 return map_dso_to_cfi_shadow(dso_list);
292 }
293
map_dso_to_cfi_shadow(struct dso *dso)294 int map_dso_to_cfi_shadow(struct dso *dso)
295 {
296 bool has_cfi_check = false;
297
298 if (dso == NULL) {
299 LD_LOGW("[CFI] [%{public}s] has null param!\n", __FUNCTION__);
300 return CFI_SUCCESS;
301 }
302
303 /* If the cfi shadow does not exist, create it and map all the dsos and its dependents to it. */
304 if (cfi_shadow_start == NULL) {
305 /* Find __cfi_check symbol in dso list */
306 for (struct dso *p = dso; p; p = p->next) {
307 if (find_cfi_check_sym(p).sym) {
308 LD_LOGD("[CFI] [%{public}s] find __cfi_check function in dso %{public}s!\n", __FUNCTION__, p->name);
309 has_cfi_check = true;
310 break;
311 }
312 }
313
314 if (has_cfi_check) {
315 if (create_cfi_shadow() == CFI_FAILED) {
316 LD_LOGE("[CFI] [%{public}s] create cfi shadow failed!\n", __FUNCTION__);
317 return CFI_FAILED;
318 }
319
320 if (add_dso_to_cfi_shadow(dso_list_head) == CFI_FAILED) {
321 return CFI_FAILED;
322 }
323
324 prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, cfi_shadow_start, shadow_size, "cfi_shadow:musl");
325 }
326 /* If the cfi shadow exists, map the current dso and its dependents to it. */
327 } else {
328 if (add_dso_to_cfi_shadow(dso) == CFI_FAILED) {
329 return CFI_FAILED;
330 }
331 prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, cfi_shadow_start, shadow_size, "cfi_shadow:musl");
332 }
333
334 return CFI_SUCCESS;
335 }
336
unmap_dso_from_cfi_shadow(struct dso *dso)337 void unmap_dso_from_cfi_shadow(struct dso *dso)
338 {
339 if (dso == NULL) {
340 LD_LOGD("[CFI] [%{public}s] has null param!\n", __FUNCTION__);
341 return;
342 }
343
344 LD_LOGD("[CFI] [%{public}s] unmap dso %{public}s from shadow!\n", __FUNCTION__, dso->name);
345
346 if (cfi_shadow_start == NULL)
347 return;
348
349 if (dso->map == 0 || dso->map_len == 0)
350 return;
351
352 if (dso->is_mapped_to_shadow == false)
353 return;
354
355 if (((size_t)dso->map & (LIBRARY_ALIGNMENT - 1)) != 0) {
356 if (!(dso == pldso || dso == r_app || dso == r_vdso)) {
357 LD_LOGE("[CFI] [warning] %{public}s isn't aligned to %{public}x"
358 "begin[%{public}x] end[%{public}x] cfi_check[%{public}x] type[%{public}x]!\n",
359 dso->name, LIBRARY_ALIGNMENT, dso->map, dso->map + dso->map_len, 0, sv_invalid);
360 }
361 }
362
363 /* Set the dso's shadow value as invalid. */
364 fill_shadow_value_to_shadow(dso->map, dso->map + dso->map_len, 0, sv_invalid);
365 dso->is_mapped_to_shadow = false;
366 prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, cfi_shadow_start, shadow_size, "cfi_shadow:musl");
367
368 return;
369 }
370
create_cfi_shadow(void)371 static int create_cfi_shadow(void)
372 {
373 LD_LOGD("[CFI] [%{public}s] start!\n", __FUNCTION__);
374
375 /* Each process can load up to (max_target_addr >> shadow_granularity) dsos. Shift left 1 bit because the shadow
376 * value is uint16_t. The size passed to mmap() should be aligned with 4096, so shadow_size should be aligned. */
377 shadow_size = ALIGN_UP(((max_target_addr >> shadow_granularity) << 1), PAGE_SIZE);
378
379 uintptr_t *mmap_addr = mmap(NULL, shadow_size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE, -1, 0);
380
381 if (mmap_addr == MAP_FAILED) {
382 LD_LOGE("[CFI] [%{public}s] mmap failed!\n", __FUNCTION__);
383 return CFI_FAILED;
384 }
385
386 cfi_shadow_start = (char*)mmap_addr;
387 LD_LOGD("[CFI] [%{public}s] the cfi_shadow_start addr is %{public}p!\n", __FUNCTION__, cfi_shadow_start);
388
389 return CFI_SUCCESS;
390 }
391
add_dso_to_cfi_shadow(struct dso *dso)392 static int add_dso_to_cfi_shadow(struct dso *dso)
393 {
394 LD_LOGD("[CFI] [%{public}s] start with %{public}s !\n", __FUNCTION__, dso->name);
395 for (struct dso *p = dso; p; p = p->next) {
396 LD_LOGD("[CFI] [%{public}s] adding %{public}s to cfi shadow!\n", __FUNCTION__, p->name);
397 if (p->map == 0 || p->map_len == 0) {
398 LD_LOGW("[CFI] [%{public}s] the dso has no data! map[%{public}p] map_len[0x%{public}x]\n",
399 __FUNCTION__, p->map, p->map_len);
400 continue;
401 }
402
403 if (p->is_mapped_to_shadow == true) {
404 LD_LOGW("[CFI] [%{public}s] %{public}s is already in shadow!\n", __FUNCTION__, p->name);
405 continue;
406 }
407
408 struct symdef cfi_check_sym = find_cfi_check_sym(p);
409 /* If the dso doesn't have __cfi_check(), set it's shadow value unchecked. */
410 if (!cfi_check_sym.sym) {
411 LD_LOGD("[CFI] [%{public}s] %{public}s has no __cfi_check()!\n", __FUNCTION__, p->name);
412
413 if (((size_t)dso->map & (LIBRARY_ALIGNMENT - 1)) != 0) {
414 if (!(dso == pldso || dso == r_app || dso == r_vdso)) {
415 LD_LOGE("[CFI] [warning] %{public}s isn't aligned to %{public}x "
416 "begin[%{public}x] end[%{public}x] cfi_check[%{public}x] type[%{public}x]!\n",
417 dso->name, LIBRARY_ALIGNMENT, dso->map, dso->map + dso->map_len, 0, sv_uncheck);
418 }
419 }
420
421 if (fill_shadow_value_to_shadow(p->map, p->map + p->map_len, 0, sv_uncheck) == CFI_FAILED) {
422 LD_LOGE("[CFI] [%{public}s] add dso to cfi shadow failed!\n", __FUNCTION__);
423 return CFI_FAILED;
424 }
425 /* If the dso has __cfi_check(), set it's shadow value valid. */
426 } else {
427 LD_LOGD("[CFI] [%{public}s] %{public}s has __cfi_check()!\n", __FUNCTION__, p->name);
428 uintptr_t end = p->map + p->map_len;
429 uintptr_t cfi_check = LADDR(cfi_check_sym.dso, cfi_check_sym.sym->st_value);
430
431 if (cfi_check == 0) {
432 LD_LOGE("[CFI] [%{public}s] %{public}s has null cfi_check func!\n", __FUNCTION__, p->name);
433 return CFI_FAILED;
434 }
435 #ifdef __arm__
436 // cfi_check function address ends with 1 on the ARM platform.
437 if ((cfi_check & 1UL) != 1UL) {
438 LD_LOGE("[CFI] [%{public}s] __cfi_check address isn't a thumb function in %{public}s!\n",
439 __FUNCTION__, p->name);
440 return CFI_FAILED;
441 }
442 cfi_check &= ~1UL;
443 #endif
444 if ((cfi_check & (cfi_check_alignment - 1)) != 0) {
445 LD_LOGE("[CFI] [%{public}s] unaligned __cfi_check address in %{public}s!\n", __FUNCTION__, p->name);
446 return CFI_FAILED;
447 }
448
449 if (((size_t)dso->map & (LIBRARY_ALIGNMENT - 1)) != 0) {
450 if (!(dso == pldso || dso == r_app || dso == r_vdso)) {
451 LD_LOGE("[CFI] [warning] %{public}s isn't aligned to %{public}x"
452 "begin[%{public}x] end[%{public}x] cfi_check[%{public}x] type[%{public}x]!\n",
453 dso->name, LIBRARY_ALIGNMENT, dso->map, dso->map + dso->map_len, cfi_check, sv_valid_min);
454 }
455 }
456
457 if (fill_shadow_value_to_shadow(p->map, end, cfi_check, sv_valid_min) == CFI_FAILED) {
458 LD_LOGE("[CFI] [%{public}s] add %{public}s to cfi shadow failed!\n", __FUNCTION__, p->name);
459 return CFI_FAILED;
460 }
461 }
462 p->is_mapped_to_shadow = true;
463 LD_LOGD("[CFI] [%{public}s] add %{public}s to cfi shadow succeed.\n", __FUNCTION__, p->name);
464 }
465 LD_LOGD("[CFI] [%{public}s] %{public}s done.\n", __FUNCTION__, dso->name);
466
467 return CFI_SUCCESS;
468 }
469
fill_shadow_value_to_shadow(uintptr_t begin, uintptr_t end, uintptr_t cfi_check, uint16_t type)470 static int fill_shadow_value_to_shadow(uintptr_t begin, uintptr_t end, uintptr_t cfi_check, uint16_t type)
471 {
472 LD_LOGD("[CFI] [%{public}s] begin[%{public}x] end[%{public}x] cfi_check[%{public}x] type[%{public}x]!\n",
473 __FUNCTION__, begin, end, cfi_check, type);
474
475 /* To ensure the atomicity of the CFI shadow operation, we create a temp_shadow, write the shadow value to
476 * the temp_shadow, and then write it back to the CFI shadow by mremap(). */
477 begin = ALIGN_DOWN(MAX(begin, cfi_check), shadow_alignment);
478 char* shadow_begin = cfi_shadow_start + addr_to_offset(begin, LIBRARY_ALIGNMENT_BITS);
479 char* shadow_end = (char*)(((uint16_t*)(cfi_shadow_start + addr_to_offset(end - 1, LIBRARY_ALIGNMENT_BITS))) + 1);
480 char* aligned_shadow_begin = (char*)ALIGN_DOWN((uintptr_t)shadow_begin, PAGE_SIZE);
481 char* aligned_shadow_end = (char*)ALIGN_UP((uintptr_t)shadow_end, PAGE_SIZE);
482
483 uint16_t tmp_shadow_size = aligned_shadow_end - aligned_shadow_begin;
484 uint16_t offset_begin = shadow_begin - aligned_shadow_begin;
485 uint16_t offset_end = shadow_end - aligned_shadow_begin;
486
487 char* tmp_shadow_start = (char*)mmap(NULL, tmp_shadow_size,
488 PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
489
490 if (tmp_shadow_start == MAP_FAILED) {
491 LD_LOGE("[CFI] [%{public}s] mmap failed!\n", __FUNCTION__);
492 return CFI_FAILED;
493 }
494
495 LD_LOGD("[CFI] [%{public}s] tmp_shadow_start is %{public}p\t tmp_shadow_size is 0x%{public}x!\n",
496 __FUNCTION__, tmp_shadow_start, tmp_shadow_size);
497 if (mprotect(aligned_shadow_begin, tmp_shadow_size, PROT_READ) == -1) {
498 LD_LOGE("[CFI] [%{public}s] mprotect failed!\n", __FUNCTION__);
499 return CFI_FAILED;
500 }
501 if (type == sv_valid_min) {
502 // We need to copy the whole area because we will read the old value below.
503 memcpy(tmp_shadow_start, aligned_shadow_begin, tmp_shadow_size);
504 } else {
505 memcpy(tmp_shadow_start, aligned_shadow_begin, offset_begin);
506 memcpy(tmp_shadow_start + offset_end, shadow_end, aligned_shadow_end - shadow_end);
507 }
508
509 /* If the dso has __cfi_check(), calculate valid shadow value */
510 if (type == sv_valid_min) {
511 uint16_t shadow_value_begin = ((begin + shadow_alignment - cfi_check)
512 >> cfi_check_granularity) + sv_valid_min;
513 LD_LOGD("[CFI] [%{public}s] shadow_value_begin is 0x%{public}x!\n", __FUNCTION__, shadow_value_begin);
514 uint32_t shadow_value = shadow_value_begin;
515 /* Set shadow_value */
516 for (uint16_t *shadow_addr = tmp_shadow_start + offset_begin;
517 shadow_addr != tmp_shadow_start + offset_end; shadow_addr++) {
518 // We fall back to uncheck if the length of so is larger than 256M((UINT16_MAX - 2) * cfi_check_alignment).
519 if (shadow_value > UINT16_MAX) {
520 *shadow_addr = sv_uncheck;
521 continue;
522 }
523
524 *shadow_addr = (*shadow_addr == sv_invalid) ? (uint16_t)shadow_value : sv_uncheck;
525 shadow_value += shadow_value_step;
526 }
527 /* in these cases, shadow_value will always be sv_uncheck or sv_invalid */
528 } else if (type == sv_uncheck || type == sv_invalid) {
529 /* Set shadow_value */
530 for (uint16_t *shadow_addr = tmp_shadow_start + offset_begin;
531 shadow_addr != tmp_shadow_start + offset_end; shadow_addr++) {
532 *shadow_addr = type;
533 }
534 } else {
535 LD_LOGE("[CFI] [%{public}s] has error param!\n", __FUNCTION__);
536 munmap(tmp_shadow_start, tmp_shadow_size);
537 return CFI_FAILED;
538 }
539
540 mprotect(tmp_shadow_start, tmp_shadow_size, PROT_READ);
541 /* Remap temp_shadow to CFI shadow. */
542 uint16_t* mremap_addr = mremap(tmp_shadow_start, tmp_shadow_size, tmp_shadow_size,
543 MREMAP_MAYMOVE | MREMAP_FIXED, aligned_shadow_begin);
544
545 if (mremap_addr == MAP_FAILED) {
546 LD_LOGE("[CFI] [%{public}s] mremap failed!\n", __FUNCTION__);
547 munmap(tmp_shadow_start, tmp_shadow_size);
548 return CFI_FAILED;
549 }
550
551 LD_LOGD("[CFI] [%{public}s] fill completed!\n", __FUNCTION__);
552 return CFI_SUCCESS;
553 }
554
__cfi_slowpath(uint64_t call_site_type_id, void *func_ptr)555 void __cfi_slowpath(uint64_t call_site_type_id, void *func_ptr)
556 {
557 LD_LOGD("[CFI] [%{public}s] called from dso[%{public}s] to dso[%{public}s] func_ptr[%{public}p]\n",
558 __FUNCTION__,
559 ((struct dso *)addr2dso((size_t)__builtin_return_address(0)))->name,
560 ((struct dso *)addr2dso((size_t)func_ptr))->name,
561 func_ptr);
562
563 cfi_slowpath_common(call_site_type_id, func_ptr, NULL);
564 return;
565 }
566
__cfi_slowpath_diag(uint64_t call_site_type_id, void *func_ptr, void *diag_data)567 void __cfi_slowpath_diag(uint64_t call_site_type_id, void *func_ptr, void *diag_data)
568 {
569 LD_LOGD("[CFI] [%{public}s] called from dso[%{public}s] to dso[%{public}s] func_ptr[%{public}p]\n",
570 __FUNCTION__,
571 ((struct dso *)addr2dso((size_t)__builtin_return_address(0)))->name,
572 ((struct dso *)addr2dso((size_t)func_ptr))->name,
573 func_ptr);
574
575 cfi_slowpath_common(call_site_type_id, func_ptr, diag_data);
576 return;
577 }
578