1// SPDX-License-Identifier: GPL-2.0-or-later 2/* 3 * Copyright (C) 2015-2017 Josh Poimboeuf <jpoimboe@redhat.com> 4 */ 5 6#include <string.h> 7#include <stdlib.h> 8#include <inttypes.h> 9#include <sys/mman.h> 10 11#include "builtin.h" 12#include "cfi.h" 13#include "arch.h" 14#include "check.h" 15#include "special.h" 16#include "warn.h" 17#include "arch_elf.h" 18 19#include <linux/objtool.h> 20#include <linux/hashtable.h> 21#include <linux/kernel.h> 22#include <linux/static_call_types.h> 23 24struct alternative { 25 struct list_head list; 26 struct instruction *insn; 27 bool skip_orig; 28}; 29 30static unsigned long nr_cfi, nr_cfi_reused, nr_cfi_cache; 31 32static struct cfi_init_state initial_func_cfi; 33static struct cfi_state init_cfi; 34static struct cfi_state func_cfi; 35 36struct instruction *find_insn(struct objtool_file *file, 37 struct section *sec, unsigned long offset) 38{ 39 struct instruction *insn; 40 41 hash_for_each_possible(file->insn_hash, insn, hash, sec_offset_hash(sec, offset)) { 42 if (insn->sec == sec && insn->offset == offset) 43 return insn; 44 } 45 46 return NULL; 47} 48 49static struct instruction *next_insn_same_sec(struct objtool_file *file, 50 struct instruction *insn) 51{ 52 struct instruction *next = list_next_entry(insn, list); 53 54 if (!next || &next->list == &file->insn_list || next->sec != insn->sec) 55 return NULL; 56 57 return next; 58} 59 60static struct instruction *next_insn_same_func(struct objtool_file *file, 61 struct instruction *insn) 62{ 63 struct instruction *next = list_next_entry(insn, list); 64 struct symbol *func = insn->func; 65 66 if (!func) 67 return NULL; 68 69 if (&next->list != &file->insn_list && next->func == func) 70 return next; 71 72 /* Check if we're already in the subfunction: */ 73 if (func == func->cfunc) 74 return NULL; 75 76 /* Move to the subfunction: */ 77 return find_insn(file, func->cfunc->sec, func->cfunc->offset); 78} 79 80static struct instruction *prev_insn_same_sym(struct objtool_file *file, 81 struct instruction *insn) 82{ 83 struct instruction *prev = list_prev_entry(insn, list); 84 85 if (&prev->list != &file->insn_list && prev->func == insn->func) 86 return prev; 87 88 return NULL; 89} 90 91#define func_for_each_insn(file, func, insn) \ 92 for (insn = find_insn(file, func->sec, func->offset); \ 93 insn; \ 94 insn = next_insn_same_func(file, insn)) 95 96#define sym_for_each_insn(file, sym, insn) \ 97 for (insn = find_insn(file, sym->sec, sym->offset); \ 98 insn && &insn->list != &file->insn_list && \ 99 insn->sec == sym->sec && \ 100 insn->offset < sym->offset + sym->len; \ 101 insn = list_next_entry(insn, list)) 102 103#define sym_for_each_insn_continue_reverse(file, sym, insn) \ 104 for (insn = list_prev_entry(insn, list); \ 105 &insn->list != &file->insn_list && \ 106 insn->sec == sym->sec && insn->offset >= sym->offset; \ 107 insn = list_prev_entry(insn, list)) 108 109#define sec_for_each_insn_from(file, insn) \ 110 for (; insn; insn = next_insn_same_sec(file, insn)) 111 112#define sec_for_each_insn_continue(file, insn) \ 113 for (insn = next_insn_same_sec(file, insn); insn; \ 114 insn = next_insn_same_sec(file, insn)) 115 116static bool is_jump_table_jump(struct instruction *insn) 117{ 118 struct alt_group *alt_group = insn->alt_group; 119 120 if (insn->jump_table) 121 return true; 122 123 /* Retpoline alternative for a jump table? */ 124 return alt_group && alt_group->orig_group && 125 alt_group->orig_group->first_insn->jump_table; 126} 127 128static bool is_sibling_call(struct instruction *insn) 129{ 130 /* 131 * Assume only ELF functions can make sibling calls. This ensures 132 * sibling call detection consistency between vmlinux.o and individual 133 * objects. 134 */ 135 if (!insn->func) 136 return false; 137 138 /* An indirect jump is either a sibling call or a jump to a table. */ 139 if (insn->type == INSN_JUMP_DYNAMIC) 140 return !is_jump_table_jump(insn); 141 142 /* add_jump_destinations() sets insn->call_dest for sibling calls. */ 143 return (is_static_jump(insn) && insn->call_dest); 144} 145 146/* 147 * This checks to see if the given function is a "noreturn" function. 148 * 149 * For global functions which are outside the scope of this object file, we 150 * have to keep a manual list of them. 151 * 152 * For local functions, we have to detect them manually by simply looking for 153 * the lack of a return instruction. 154 */ 155static bool __dead_end_function(struct objtool_file *file, struct symbol *func, 156 int recursion) 157{ 158 int i; 159 struct instruction *insn; 160 bool empty = true; 161 162 /* 163 * Unfortunately these have to be hard coded because the noreturn 164 * attribute isn't provided in ELF data. 165 */ 166 static const char * const global_noreturns[] = { 167 "__stack_chk_fail", 168 "panic", 169 "do_exit", 170 "do_task_dead", 171 "make_task_dead", 172 "__module_put_and_exit", 173 "complete_and_exit", 174 "__reiserfs_panic", 175 "lbug_with_loc", 176 "fortify_panic", 177 "usercopy_abort", 178 "machine_real_restart", 179 "rewind_stack_and_make_dead", 180 "kunit_try_catch_throw", 181 "xen_start_kernel", 182 "cpu_bringup_and_idle", 183 "stop_this_cpu", 184 }; 185 186 if (!func) 187 return false; 188 189 if (func->bind == STB_WEAK) 190 return false; 191 192 if (func->bind == STB_GLOBAL) 193 for (i = 0; i < ARRAY_SIZE(global_noreturns); i++) 194 if (!strcmp(func->name, global_noreturns[i])) 195 return true; 196 197 if (!func->len) 198 return false; 199 200 insn = find_insn(file, func->sec, func->offset); 201 if (!insn || !insn->func) 202 return false; 203 204 func_for_each_insn(file, func, insn) { 205 empty = false; 206 207 if (insn->type == INSN_RETURN) 208 return false; 209 } 210 211 if (empty) 212 return false; 213 214 /* 215 * A function can have a sibling call instead of a return. In that 216 * case, the function's dead-end status depends on whether the target 217 * of the sibling call returns. 218 */ 219 func_for_each_insn(file, func, insn) { 220 if (is_sibling_call(insn)) { 221 struct instruction *dest = insn->jump_dest; 222 223 if (!dest) 224 /* sibling call to another file */ 225 return false; 226 227 /* local sibling call */ 228 if (recursion == 5) { 229 /* 230 * Infinite recursion: two functions have 231 * sibling calls to each other. This is a very 232 * rare case. It means they aren't dead ends. 233 */ 234 return false; 235 } 236 237 return __dead_end_function(file, dest->func, recursion+1); 238 } 239 } 240 241 return true; 242} 243 244static bool dead_end_function(struct objtool_file *file, struct symbol *func) 245{ 246 return __dead_end_function(file, func, 0); 247} 248 249static void init_cfi_state(struct cfi_state *cfi) 250{ 251 int i; 252 253 for (i = 0; i < CFI_NUM_REGS; i++) { 254 cfi->regs[i].base = CFI_UNDEFINED; 255 cfi->vals[i].base = CFI_UNDEFINED; 256 } 257 cfi->cfa.base = CFI_UNDEFINED; 258 cfi->drap_reg = CFI_UNDEFINED; 259 cfi->drap_offset = -1; 260} 261 262static void init_insn_state(struct insn_state *state, struct section *sec) 263{ 264 memset(state, 0, sizeof(*state)); 265 init_cfi_state(&state->cfi); 266 267 /* 268 * We need the full vmlinux for noinstr validation, otherwise we can 269 * not correctly determine insn->call_dest->sec (external symbols do 270 * not have a section). 271 */ 272 if (vmlinux && sec) 273 state->noinstr = sec->noinstr; 274} 275 276static struct cfi_state *cfi_alloc(void) 277{ 278 struct cfi_state *cfi = calloc(sizeof(struct cfi_state), 1); 279 if (!cfi) { 280 WARN("calloc failed"); 281 exit(1); 282 } 283 nr_cfi++; 284 return cfi; 285} 286 287static int cfi_bits; 288static struct hlist_head *cfi_hash; 289 290static inline bool cficmp(struct cfi_state *cfi1, struct cfi_state *cfi2) 291{ 292 return memcmp((void *)cfi1 + sizeof(cfi1->hash), 293 (void *)cfi2 + sizeof(cfi2->hash), 294 sizeof(struct cfi_state) - sizeof(struct hlist_node)); 295} 296 297static inline u32 cfi_key(struct cfi_state *cfi) 298{ 299 return jhash((void *)cfi + sizeof(cfi->hash), 300 sizeof(*cfi) - sizeof(cfi->hash), 0); 301} 302 303static struct cfi_state *cfi_hash_find_or_add(struct cfi_state *cfi) 304{ 305 struct hlist_head *head = &cfi_hash[hash_min(cfi_key(cfi), cfi_bits)]; 306 struct cfi_state *obj; 307 308 hlist_for_each_entry(obj, head, hash) { 309 if (!cficmp(cfi, obj)) { 310 nr_cfi_cache++; 311 return obj; 312 } 313 } 314 315 obj = cfi_alloc(); 316 *obj = *cfi; 317 hlist_add_head(&obj->hash, head); 318 319 return obj; 320} 321 322static void cfi_hash_add(struct cfi_state *cfi) 323{ 324 struct hlist_head *head = &cfi_hash[hash_min(cfi_key(cfi), cfi_bits)]; 325 326 hlist_add_head(&cfi->hash, head); 327} 328 329static void *cfi_hash_alloc(void) 330{ 331 cfi_bits = vmlinux ? ELF_HASH_BITS - 3 : 13; 332 cfi_hash = mmap(NULL, sizeof(struct hlist_head) << cfi_bits, 333 PROT_READ|PROT_WRITE, 334 MAP_PRIVATE|MAP_ANON, -1, 0); 335 if (cfi_hash == (void *)-1L) { 336 WARN("mmap fail cfi_hash"); 337 cfi_hash = NULL; 338 } else if (stats) { 339 printf("cfi_bits: %d\n", cfi_bits); 340 } 341 342 return cfi_hash; 343} 344 345static unsigned long nr_insns; 346static unsigned long nr_insns_visited; 347 348/* 349 * Call the arch-specific instruction decoder for all the instructions and add 350 * them to the global instruction list. 351 */ 352static int decode_instructions(struct objtool_file *file) 353{ 354 struct section *sec; 355 struct symbol *func; 356 unsigned long offset; 357 struct instruction *insn; 358 int ret; 359 360 for_each_sec(file, sec) { 361 362 if (!(sec->sh.sh_flags & SHF_EXECINSTR)) 363 continue; 364 365 if (strcmp(sec->name, ".altinstr_replacement") && 366 strcmp(sec->name, ".altinstr_aux") && 367 strncmp(sec->name, ".discard.", 9)) 368 sec->text = true; 369 370 if (!strcmp(sec->name, ".noinstr.text") || 371 !strcmp(sec->name, ".entry.text") || 372 !strncmp(sec->name, ".text..__x86.", 13)) 373 sec->noinstr = true; 374 375 for (offset = 0; offset < sec->len; offset += insn->len) { 376 insn = malloc(sizeof(*insn)); 377 if (!insn) { 378 WARN("malloc failed"); 379 return -1; 380 } 381 memset(insn, 0, sizeof(*insn)); 382 INIT_LIST_HEAD(&insn->alts); 383 INIT_LIST_HEAD(&insn->stack_ops); 384 385 insn->sec = sec; 386 insn->offset = offset; 387 388 ret = arch_decode_instruction(file->elf, sec, offset, 389 sec->len - offset, 390 &insn->len, &insn->type, 391 &insn->immediate, 392 &insn->stack_ops); 393 if (ret) 394 goto err; 395 396 hash_add(file->insn_hash, &insn->hash, sec_offset_hash(sec, insn->offset)); 397 list_add_tail(&insn->list, &file->insn_list); 398 nr_insns++; 399 } 400 401 list_for_each_entry(func, &sec->symbol_list, list) { 402 if (func->type != STT_FUNC || func->alias != func) 403 continue; 404 405 if (!find_insn(file, sec, func->offset)) { 406 WARN("%s(): can't find starting instruction", 407 func->name); 408 return -1; 409 } 410 411 sym_for_each_insn(file, func, insn) 412 insn->func = func; 413 } 414 } 415 416 if (stats) 417 printf("nr_insns: %lu\n", nr_insns); 418 419 return 0; 420 421err: 422 free(insn); 423 return ret; 424} 425 426static struct instruction *find_last_insn(struct objtool_file *file, 427 struct section *sec) 428{ 429 struct instruction *insn = NULL; 430 unsigned int offset; 431 unsigned int end = (sec->len > 10) ? sec->len - 10 : 0; 432 433 for (offset = sec->len - 1; offset >= end && !insn; offset--) 434 insn = find_insn(file, sec, offset); 435 436 return insn; 437} 438 439/* 440 * Mark "ud2" instructions and manually annotated dead ends. 441 */ 442static int add_dead_ends(struct objtool_file *file) 443{ 444 struct section *sec; 445 struct reloc *reloc; 446 struct instruction *insn; 447 448 /* 449 * By default, "ud2" is a dead end unless otherwise annotated, because 450 * GCC 7 inserts it for certain divide-by-zero cases. 451 */ 452 for_each_insn(file, insn) 453 if (insn->type == INSN_BUG) 454 insn->dead_end = true; 455 456 /* 457 * Check for manually annotated dead ends. 458 */ 459 sec = find_section_by_name(file->elf, ".rela.discard.unreachable"); 460 if (!sec) 461 goto reachable; 462 463 list_for_each_entry(reloc, &sec->reloc_list, list) { 464 if (reloc->sym->type != STT_SECTION) { 465 WARN("unexpected relocation symbol type in %s", sec->name); 466 return -1; 467 } 468 insn = find_insn(file, reloc->sym->sec, reloc->addend); 469 if (insn) 470 insn = list_prev_entry(insn, list); 471 else if (reloc->addend == reloc->sym->sec->len) { 472 insn = find_last_insn(file, reloc->sym->sec); 473 if (!insn) { 474 WARN("can't find unreachable insn at %s+0x%" PRIx64, 475 reloc->sym->sec->name, reloc->addend); 476 return -1; 477 } 478 } else { 479 WARN("can't find unreachable insn at %s+0x%" PRIx64, 480 reloc->sym->sec->name, reloc->addend); 481 return -1; 482 } 483 484 insn->dead_end = true; 485 } 486 487reachable: 488 /* 489 * These manually annotated reachable checks are needed for GCC 4.4, 490 * where the Linux unreachable() macro isn't supported. In that case 491 * GCC doesn't know the "ud2" is fatal, so it generates code as if it's 492 * not a dead end. 493 */ 494 sec = find_section_by_name(file->elf, ".rela.discard.reachable"); 495 if (!sec) 496 return 0; 497 498 list_for_each_entry(reloc, &sec->reloc_list, list) { 499 if (reloc->sym->type != STT_SECTION) { 500 WARN("unexpected relocation symbol type in %s", sec->name); 501 return -1; 502 } 503 insn = find_insn(file, reloc->sym->sec, reloc->addend); 504 if (insn) 505 insn = list_prev_entry(insn, list); 506 else if (reloc->addend == reloc->sym->sec->len) { 507 insn = find_last_insn(file, reloc->sym->sec); 508 if (!insn) { 509 WARN("can't find reachable insn at %s+0x%" PRIx64, 510 reloc->sym->sec->name, reloc->addend); 511 return -1; 512 } 513 } else { 514 WARN("can't find reachable insn at %s+0x%" PRIx64, 515 reloc->sym->sec->name, reloc->addend); 516 return -1; 517 } 518 519 insn->dead_end = false; 520 } 521 522 return 0; 523} 524 525static int create_static_call_sections(struct objtool_file *file) 526{ 527 struct section *sec; 528 struct static_call_site *site; 529 struct instruction *insn; 530 struct symbol *key_sym; 531 char *key_name, *tmp; 532 int idx; 533 534 sec = find_section_by_name(file->elf, ".static_call_sites"); 535 if (sec) { 536 INIT_LIST_HEAD(&file->static_call_list); 537 WARN("file already has .static_call_sites section, skipping"); 538 return 0; 539 } 540 541 if (list_empty(&file->static_call_list)) 542 return 0; 543 544 idx = 0; 545 list_for_each_entry(insn, &file->static_call_list, call_node) 546 idx++; 547 548 sec = elf_create_section(file->elf, ".static_call_sites", SHF_WRITE, 549 sizeof(struct static_call_site), idx); 550 if (!sec) 551 return -1; 552 553 idx = 0; 554 list_for_each_entry(insn, &file->static_call_list, call_node) { 555 556 site = (struct static_call_site *)sec->data->d_buf + idx; 557 memset(site, 0, sizeof(struct static_call_site)); 558 559 /* populate reloc for 'addr' */ 560 if (elf_add_reloc_to_insn(file->elf, sec, 561 idx * sizeof(struct static_call_site), 562 R_X86_64_PC32, 563 insn->sec, insn->offset)) 564 return -1; 565 566 /* find key symbol */ 567 key_name = strdup(insn->call_dest->name); 568 if (!key_name) { 569 perror("strdup"); 570 return -1; 571 } 572 if (strncmp(key_name, STATIC_CALL_TRAMP_PREFIX_STR, 573 STATIC_CALL_TRAMP_PREFIX_LEN)) { 574 WARN("static_call: trampoline name malformed: %s", key_name); 575 free(key_name); 576 return -1; 577 } 578 tmp = key_name + STATIC_CALL_TRAMP_PREFIX_LEN - STATIC_CALL_KEY_PREFIX_LEN; 579 memcpy(tmp, STATIC_CALL_KEY_PREFIX_STR, STATIC_CALL_KEY_PREFIX_LEN); 580 581 key_sym = find_symbol_by_name(file->elf, tmp); 582 if (!key_sym) { 583 if (!module) { 584 WARN("static_call: can't find static_call_key symbol: %s", tmp); 585 free(key_name); 586 return -1; 587 } 588 589 /* 590 * For modules(), the key might not be exported, which 591 * means the module can make static calls but isn't 592 * allowed to change them. 593 * 594 * In that case we temporarily set the key to be the 595 * trampoline address. This is fixed up in 596 * static_call_add_module(). 597 */ 598 key_sym = insn->call_dest; 599 } 600 free(key_name); 601 602 /* populate reloc for 'key' */ 603 if (elf_add_reloc(file->elf, sec, 604 idx * sizeof(struct static_call_site) + 4, 605 R_X86_64_PC32, key_sym, 606 is_sibling_call(insn) * STATIC_CALL_SITE_TAIL)) 607 return -1; 608 609 idx++; 610 } 611 612 return 0; 613} 614 615static int create_retpoline_sites_sections(struct objtool_file *file) 616{ 617 struct instruction *insn; 618 struct section *sec; 619 int idx; 620 621 sec = find_section_by_name(file->elf, ".retpoline_sites"); 622 if (sec) { 623 WARN("file already has .retpoline_sites, skipping"); 624 return 0; 625 } 626 627 idx = 0; 628 list_for_each_entry(insn, &file->retpoline_call_list, call_node) 629 idx++; 630 631 if (!idx) 632 return 0; 633 634 sec = elf_create_section(file->elf, ".retpoline_sites", 0, 635 sizeof(int), idx); 636 if (!sec) { 637 WARN("elf_create_section: .retpoline_sites"); 638 return -1; 639 } 640 641 idx = 0; 642 list_for_each_entry(insn, &file->retpoline_call_list, call_node) { 643 644 int *site = (int *)sec->data->d_buf + idx; 645 *site = 0; 646 647 if (elf_add_reloc_to_insn(file->elf, sec, 648 idx * sizeof(int), 649 R_X86_64_PC32, 650 insn->sec, insn->offset)) { 651 WARN("elf_add_reloc_to_insn: .retpoline_sites"); 652 return -1; 653 } 654 655 idx++; 656 } 657 658 return 0; 659} 660 661static int create_return_sites_sections(struct objtool_file *file) 662{ 663 struct instruction *insn; 664 struct section *sec; 665 int idx; 666 667 sec = find_section_by_name(file->elf, ".return_sites"); 668 if (sec) { 669 WARN("file already has .return_sites, skipping"); 670 return 0; 671 } 672 673 idx = 0; 674 list_for_each_entry(insn, &file->return_thunk_list, call_node) 675 idx++; 676 677 if (!idx) 678 return 0; 679 680 sec = elf_create_section(file->elf, ".return_sites", 0, 681 sizeof(int), idx); 682 if (!sec) { 683 WARN("elf_create_section: .return_sites"); 684 return -1; 685 } 686 687 idx = 0; 688 list_for_each_entry(insn, &file->return_thunk_list, call_node) { 689 690 int *site = (int *)sec->data->d_buf + idx; 691 *site = 0; 692 693 if (elf_add_reloc_to_insn(file->elf, sec, 694 idx * sizeof(int), 695 R_X86_64_PC32, 696 insn->sec, insn->offset)) { 697 WARN("elf_add_reloc_to_insn: .return_sites"); 698 return -1; 699 } 700 701 idx++; 702 } 703 704 return 0; 705} 706 707/* 708 * Warnings shouldn't be reported for ignored functions. 709 */ 710static void add_ignores(struct objtool_file *file) 711{ 712 struct instruction *insn; 713 struct section *sec; 714 struct symbol *func; 715 struct reloc *reloc; 716 717 sec = find_section_by_name(file->elf, ".rela.discard.func_stack_frame_non_standard"); 718 if (!sec) 719 return; 720 721 list_for_each_entry(reloc, &sec->reloc_list, list) { 722 switch (reloc->sym->type) { 723 case STT_FUNC: 724 func = reloc->sym; 725 break; 726 727 case STT_SECTION: 728 func = find_func_by_offset(reloc->sym->sec, reloc->addend); 729 if (!func) 730 continue; 731 break; 732 733 default: 734 WARN("unexpected relocation symbol type in %s: %d", sec->name, reloc->sym->type); 735 continue; 736 } 737 738 func_for_each_insn(file, func, insn) 739 insn->ignore = true; 740 } 741} 742 743/* 744 * This is a whitelist of functions that is allowed to be called with AC set. 745 * The list is meant to be minimal and only contains compiler instrumentation 746 * ABI and a few functions used to implement *_{to,from}_user() functions. 747 * 748 * These functions must not directly change AC, but may PUSHF/POPF. 749 */ 750static const char *uaccess_safe_builtin[] = { 751 /* KASAN */ 752 "kasan_report", 753 "check_memory_region", 754 /* KASAN out-of-line */ 755 "__asan_loadN_noabort", 756 "__asan_load1_noabort", 757 "__asan_load2_noabort", 758 "__asan_load4_noabort", 759 "__asan_load8_noabort", 760 "__asan_load16_noabort", 761 "__asan_storeN_noabort", 762 "__asan_store1_noabort", 763 "__asan_store2_noabort", 764 "__asan_store4_noabort", 765 "__asan_store8_noabort", 766 "__asan_store16_noabort", 767 "__kasan_check_read", 768 "__kasan_check_write", 769 /* KASAN in-line */ 770 "__asan_report_load_n_noabort", 771 "__asan_report_load1_noabort", 772 "__asan_report_load2_noabort", 773 "__asan_report_load4_noabort", 774 "__asan_report_load8_noabort", 775 "__asan_report_load16_noabort", 776 "__asan_report_store_n_noabort", 777 "__asan_report_store1_noabort", 778 "__asan_report_store2_noabort", 779 "__asan_report_store4_noabort", 780 "__asan_report_store8_noabort", 781 "__asan_report_store16_noabort", 782 /* KCSAN */ 783 "__kcsan_check_access", 784 "kcsan_found_watchpoint", 785 "kcsan_setup_watchpoint", 786 "kcsan_check_scoped_accesses", 787 "kcsan_disable_current", 788 "kcsan_enable_current_nowarn", 789 /* KCSAN/TSAN */ 790 "__tsan_func_entry", 791 "__tsan_func_exit", 792 "__tsan_read_range", 793 "__tsan_write_range", 794 "__tsan_read1", 795 "__tsan_read2", 796 "__tsan_read4", 797 "__tsan_read8", 798 "__tsan_read16", 799 "__tsan_write1", 800 "__tsan_write2", 801 "__tsan_write4", 802 "__tsan_write8", 803 "__tsan_write16", 804 "__tsan_read_write1", 805 "__tsan_read_write2", 806 "__tsan_read_write4", 807 "__tsan_read_write8", 808 "__tsan_read_write16", 809 "__tsan_volatile_read1", 810 "__tsan_volatile_read2", 811 "__tsan_volatile_read4", 812 "__tsan_volatile_read8", 813 "__tsan_volatile_read16", 814 "__tsan_volatile_write1", 815 "__tsan_volatile_write2", 816 "__tsan_volatile_write4", 817 "__tsan_volatile_write8", 818 "__tsan_volatile_write16", 819 "__tsan_atomic8_load", 820 "__tsan_atomic16_load", 821 "__tsan_atomic32_load", 822 "__tsan_atomic64_load", 823 "__tsan_atomic8_store", 824 "__tsan_atomic16_store", 825 "__tsan_atomic32_store", 826 "__tsan_atomic64_store", 827 "__tsan_atomic8_exchange", 828 "__tsan_atomic16_exchange", 829 "__tsan_atomic32_exchange", 830 "__tsan_atomic64_exchange", 831 "__tsan_atomic8_fetch_add", 832 "__tsan_atomic16_fetch_add", 833 "__tsan_atomic32_fetch_add", 834 "__tsan_atomic64_fetch_add", 835 "__tsan_atomic8_fetch_sub", 836 "__tsan_atomic16_fetch_sub", 837 "__tsan_atomic32_fetch_sub", 838 "__tsan_atomic64_fetch_sub", 839 "__tsan_atomic8_fetch_and", 840 "__tsan_atomic16_fetch_and", 841 "__tsan_atomic32_fetch_and", 842 "__tsan_atomic64_fetch_and", 843 "__tsan_atomic8_fetch_or", 844 "__tsan_atomic16_fetch_or", 845 "__tsan_atomic32_fetch_or", 846 "__tsan_atomic64_fetch_or", 847 "__tsan_atomic8_fetch_xor", 848 "__tsan_atomic16_fetch_xor", 849 "__tsan_atomic32_fetch_xor", 850 "__tsan_atomic64_fetch_xor", 851 "__tsan_atomic8_fetch_nand", 852 "__tsan_atomic16_fetch_nand", 853 "__tsan_atomic32_fetch_nand", 854 "__tsan_atomic64_fetch_nand", 855 "__tsan_atomic8_compare_exchange_strong", 856 "__tsan_atomic16_compare_exchange_strong", 857 "__tsan_atomic32_compare_exchange_strong", 858 "__tsan_atomic64_compare_exchange_strong", 859 "__tsan_atomic8_compare_exchange_weak", 860 "__tsan_atomic16_compare_exchange_weak", 861 "__tsan_atomic32_compare_exchange_weak", 862 "__tsan_atomic64_compare_exchange_weak", 863 "__tsan_atomic8_compare_exchange_val", 864 "__tsan_atomic16_compare_exchange_val", 865 "__tsan_atomic32_compare_exchange_val", 866 "__tsan_atomic64_compare_exchange_val", 867 "__tsan_atomic_thread_fence", 868 "__tsan_atomic_signal_fence", 869 "__tsan_unaligned_read16", 870 "__tsan_unaligned_write16", 871 /* KCOV */ 872 "write_comp_data", 873 "check_kcov_mode", 874 "__sanitizer_cov_trace_pc", 875 "__sanitizer_cov_trace_const_cmp1", 876 "__sanitizer_cov_trace_const_cmp2", 877 "__sanitizer_cov_trace_const_cmp4", 878 "__sanitizer_cov_trace_const_cmp8", 879 "__sanitizer_cov_trace_cmp1", 880 "__sanitizer_cov_trace_cmp2", 881 "__sanitizer_cov_trace_cmp4", 882 "__sanitizer_cov_trace_cmp8", 883 "__sanitizer_cov_trace_switch", 884 /* UBSAN */ 885 "ubsan_type_mismatch_common", 886 "__ubsan_handle_type_mismatch", 887 "__ubsan_handle_type_mismatch_v1", 888 "__ubsan_handle_shift_out_of_bounds", 889 /* misc */ 890 "csum_partial_copy_generic", 891 "copy_mc_fragile", 892 "copy_mc_fragile_handle_tail", 893 "copy_mc_enhanced_fast_string", 894 "ftrace_likely_update", /* CONFIG_TRACE_BRANCH_PROFILING */ 895 NULL 896}; 897 898static void add_uaccess_safe(struct objtool_file *file) 899{ 900 struct symbol *func; 901 const char **name; 902 903 if (!uaccess) 904 return; 905 906 for (name = uaccess_safe_builtin; *name; name++) { 907 func = find_symbol_by_name(file->elf, *name); 908 if (!func) 909 continue; 910 911 func->uaccess_safe = true; 912 } 913} 914 915/* 916 * FIXME: For now, just ignore any alternatives which add retpolines. This is 917 * a temporary hack, as it doesn't allow ORC to unwind from inside a retpoline. 918 * But it at least allows objtool to understand the control flow *around* the 919 * retpoline. 920 */ 921static int add_ignore_alternatives(struct objtool_file *file) 922{ 923 struct section *sec; 924 struct reloc *reloc; 925 struct instruction *insn; 926 927 sec = find_section_by_name(file->elf, ".rela.discard.ignore_alts"); 928 if (!sec) 929 return 0; 930 931 list_for_each_entry(reloc, &sec->reloc_list, list) { 932 if (reloc->sym->type != STT_SECTION) { 933 WARN("unexpected relocation symbol type in %s", sec->name); 934 return -1; 935 } 936 937 insn = find_insn(file, reloc->sym->sec, reloc->addend); 938 if (!insn) { 939 WARN("bad .discard.ignore_alts entry"); 940 return -1; 941 } 942 943 insn->ignore_alts = true; 944 } 945 946 return 0; 947} 948 949/* 950 * Symbols that replace INSN_CALL_DYNAMIC, every (tail) call to such a symbol 951 * will be added to the .retpoline_sites section. 952 */ 953__weak bool arch_is_retpoline(struct symbol *sym) 954{ 955 return false; 956} 957 958/* 959 * Symbols that replace INSN_RETURN, every (tail) call to such a symbol 960 * will be added to the .return_sites section. 961 */ 962__weak bool arch_is_rethunk(struct symbol *sym) 963{ 964 return false; 965} 966 967/* 968 * Symbols that are embedded inside other instructions, because sometimes crazy 969 * code exists. These are mostly ignored for validation purposes. 970 */ 971__weak bool arch_is_embedded_insn(struct symbol *sym) 972{ 973 return false; 974} 975 976#define NEGATIVE_RELOC ((void *)-1L) 977 978static struct reloc *insn_reloc(struct objtool_file *file, struct instruction *insn) 979{ 980 if (insn->reloc == NEGATIVE_RELOC) 981 return NULL; 982 983 if (!insn->reloc) { 984 insn->reloc = find_reloc_by_dest_range(file->elf, insn->sec, 985 insn->offset, insn->len); 986 if (!insn->reloc) { 987 insn->reloc = NEGATIVE_RELOC; 988 return NULL; 989 } 990 } 991 992 return insn->reloc; 993} 994 995static void remove_insn_ops(struct instruction *insn) 996{ 997 struct stack_op *op, *tmp; 998 999 list_for_each_entry_safe(op, tmp, &insn->stack_ops, list) { 1000 list_del(&op->list); 1001 free(op); 1002 } 1003} 1004 1005static void annotate_call_site(struct objtool_file *file, 1006 struct instruction *insn, bool sibling) 1007{ 1008 struct reloc *reloc = insn_reloc(file, insn); 1009 struct symbol *sym = insn->call_dest; 1010 1011 if (!sym) 1012 sym = reloc->sym; 1013 1014 /* 1015 * Alternative replacement code is just template code which is 1016 * sometimes copied to the original instruction. For now, don't 1017 * annotate it. (In the future we might consider annotating the 1018 * original instruction if/when it ever makes sense to do so.) 1019 */ 1020 if (!strcmp(insn->sec->name, ".altinstr_replacement")) 1021 return; 1022 1023 if (sym->static_call_tramp) { 1024 list_add_tail(&insn->call_node, &file->static_call_list); 1025 return; 1026 } 1027 1028 if (sym->retpoline_thunk) { 1029 list_add_tail(&insn->call_node, &file->retpoline_call_list); 1030 return; 1031 } 1032 1033 /* 1034 * Many compilers cannot disable KCOV with a function attribute 1035 * so they need a little help, NOP out any KCOV calls from noinstr 1036 * text. 1037 */ 1038 if (insn->sec->noinstr && sym->kcov) { 1039 if (reloc) { 1040 reloc->type = R_NONE; 1041 elf_write_reloc(file->elf, reloc); 1042 } 1043 1044 elf_write_insn(file->elf, insn->sec, 1045 insn->offset, insn->len, 1046 sibling ? arch_ret_insn(insn->len) 1047 : arch_nop_insn(insn->len)); 1048 1049 insn->type = sibling ? INSN_RETURN : INSN_NOP; 1050 1051 if (sibling) { 1052 /* 1053 * We've replaced the tail-call JMP insn by two new 1054 * insn: RET; INT3, except we only have a single struct 1055 * insn here. Mark it retpoline_safe to avoid the SLS 1056 * warning, instead of adding another insn. 1057 */ 1058 insn->retpoline_safe = true; 1059 } 1060 1061 return; 1062 } 1063} 1064 1065static void add_call_dest(struct objtool_file *file, struct instruction *insn, 1066 struct symbol *dest, bool sibling) 1067{ 1068 insn->call_dest = dest; 1069 if (!dest) 1070 return; 1071 1072 /* 1073 * Whatever stack impact regular CALLs have, should be undone 1074 * by the RETURN of the called function. 1075 * 1076 * Annotated intra-function calls retain the stack_ops but 1077 * are converted to JUMP, see read_intra_function_calls(). 1078 */ 1079 remove_insn_ops(insn); 1080 1081 annotate_call_site(file, insn, sibling); 1082} 1083 1084static void add_retpoline_call(struct objtool_file *file, struct instruction *insn) 1085{ 1086 /* 1087 * Retpoline calls/jumps are really dynamic calls/jumps in disguise, 1088 * so convert them accordingly. 1089 */ 1090 switch (insn->type) { 1091 case INSN_CALL: 1092 insn->type = INSN_CALL_DYNAMIC; 1093 break; 1094 case INSN_JUMP_UNCONDITIONAL: 1095 insn->type = INSN_JUMP_DYNAMIC; 1096 break; 1097 case INSN_JUMP_CONDITIONAL: 1098 insn->type = INSN_JUMP_DYNAMIC_CONDITIONAL; 1099 break; 1100 default: 1101 return; 1102 } 1103 1104 insn->retpoline_safe = true; 1105 1106 /* 1107 * Whatever stack impact regular CALLs have, should be undone 1108 * by the RETURN of the called function. 1109 * 1110 * Annotated intra-function calls retain the stack_ops but 1111 * are converted to JUMP, see read_intra_function_calls(). 1112 */ 1113 remove_insn_ops(insn); 1114 1115 annotate_call_site(file, insn, false); 1116} 1117 1118static void add_return_call(struct objtool_file *file, struct instruction *insn, bool add) 1119{ 1120 /* 1121 * Return thunk tail calls are really just returns in disguise, 1122 * so convert them accordingly. 1123 */ 1124 insn->type = INSN_RETURN; 1125 insn->retpoline_safe = true; 1126 1127 /* Skip the non-text sections, specially .discard ones */ 1128 if (add && insn->sec->text) 1129 list_add_tail(&insn->call_node, &file->return_thunk_list); 1130} 1131 1132/* 1133 * Find the destination instructions for all jumps. 1134 */ 1135static int add_jump_destinations(struct objtool_file *file) 1136{ 1137 struct instruction *insn; 1138 struct reloc *reloc; 1139 struct section *dest_sec; 1140 unsigned long dest_off; 1141 1142 for_each_insn(file, insn) { 1143 if (!is_static_jump(insn)) 1144 continue; 1145 1146 reloc = insn_reloc(file, insn); 1147 if (!reloc) { 1148 dest_sec = insn->sec; 1149 dest_off = arch_jump_destination(insn); 1150 } else if (reloc->sym->type == STT_SECTION) { 1151 dest_sec = reloc->sym->sec; 1152 dest_off = arch_dest_reloc_offset(reloc->addend); 1153 } else if (reloc->sym->retpoline_thunk) { 1154 add_retpoline_call(file, insn); 1155 continue; 1156 } else if (reloc->sym->return_thunk) { 1157 add_return_call(file, insn, true); 1158 continue; 1159 } else if (insn->func) { 1160 /* internal or external sibling call (with reloc) */ 1161 add_call_dest(file, insn, reloc->sym, true); 1162 continue; 1163 } else if (reloc->sym->sec->idx) { 1164 dest_sec = reloc->sym->sec; 1165 dest_off = reloc->sym->sym.st_value + 1166 arch_dest_reloc_offset(reloc->addend); 1167 } else { 1168 /* non-func asm code jumping to another file */ 1169 continue; 1170 } 1171 1172 insn->jump_dest = find_insn(file, dest_sec, dest_off); 1173 if (!insn->jump_dest) { 1174 struct symbol *sym = find_symbol_by_offset(dest_sec, dest_off); 1175 1176 /* 1177 * This is a special case where an alt instruction 1178 * jumps past the end of the section. These are 1179 * handled later in handle_group_alt(). 1180 */ 1181 if (!strcmp(insn->sec->name, ".altinstr_replacement")) 1182 continue; 1183 1184 /* 1185 * This is a special case for retbleed_untrain_ret(). 1186 * It jumps to __x86_return_thunk(), but objtool 1187 * can't find the thunk's starting RET 1188 * instruction, because the RET is also in the 1189 * middle of another instruction. Objtool only 1190 * knows about the outer instruction. 1191 */ 1192 if (sym && sym->embedded_insn) { 1193 add_return_call(file, insn, false); 1194 continue; 1195 } 1196 1197 WARN_FUNC("can't find jump dest instruction at %s+0x%lx", 1198 insn->sec, insn->offset, dest_sec->name, 1199 dest_off); 1200 return -1; 1201 } 1202 1203 /* 1204 * Cross-function jump. 1205 */ 1206 if (insn->func && insn->jump_dest->func && 1207 insn->func != insn->jump_dest->func) { 1208 1209 /* 1210 * For GCC 8+, create parent/child links for any cold 1211 * subfunctions. This is _mostly_ redundant with a 1212 * similar initialization in read_symbols(). 1213 * 1214 * If a function has aliases, we want the *first* such 1215 * function in the symbol table to be the subfunction's 1216 * parent. In that case we overwrite the 1217 * initialization done in read_symbols(). 1218 * 1219 * However this code can't completely replace the 1220 * read_symbols() code because this doesn't detect the 1221 * case where the parent function's only reference to a 1222 * subfunction is through a jump table. 1223 */ 1224 if (!strstr(insn->func->name, ".cold") && 1225 strstr(insn->jump_dest->func->name, ".cold")) { 1226 insn->func->cfunc = insn->jump_dest->func; 1227 insn->jump_dest->func->pfunc = insn->func; 1228 1229 } else if (insn->jump_dest->func->pfunc != insn->func->pfunc && 1230 insn->jump_dest->offset == insn->jump_dest->func->offset) { 1231 /* internal sibling call (without reloc) */ 1232 add_call_dest(file, insn, insn->jump_dest->func, true); 1233 } 1234 } 1235 } 1236 1237 return 0; 1238} 1239 1240static struct symbol *find_call_destination(struct section *sec, unsigned long offset) 1241{ 1242 struct symbol *call_dest; 1243 1244 call_dest = find_func_by_offset(sec, offset); 1245 if (!call_dest) 1246 call_dest = find_symbol_by_offset(sec, offset); 1247 1248 return call_dest; 1249} 1250 1251/* 1252 * Find the destination instructions for all calls. 1253 */ 1254static int add_call_destinations(struct objtool_file *file) 1255{ 1256 struct instruction *insn; 1257 unsigned long dest_off; 1258 struct symbol *dest; 1259 struct reloc *reloc; 1260 1261 for_each_insn(file, insn) { 1262 if (insn->type != INSN_CALL) 1263 continue; 1264 1265 reloc = insn_reloc(file, insn); 1266 if (!reloc) { 1267 dest_off = arch_jump_destination(insn); 1268 dest = find_call_destination(insn->sec, dest_off); 1269 1270 add_call_dest(file, insn, dest, false); 1271 1272 if (insn->ignore) 1273 continue; 1274 1275 if (!insn->call_dest) { 1276 WARN_FUNC("unannotated intra-function call", insn->sec, insn->offset); 1277 return -1; 1278 } 1279 1280 if (insn->func && insn->call_dest->type != STT_FUNC) { 1281 WARN_FUNC("unsupported call to non-function", 1282 insn->sec, insn->offset); 1283 return -1; 1284 } 1285 1286 } else if (reloc->sym->type == STT_SECTION) { 1287 dest_off = arch_dest_reloc_offset(reloc->addend); 1288 dest = find_call_destination(reloc->sym->sec, dest_off); 1289 if (!dest) { 1290 WARN_FUNC("can't find call dest symbol at %s+0x%lx", 1291 insn->sec, insn->offset, 1292 reloc->sym->sec->name, 1293 dest_off); 1294 return -1; 1295 } 1296 1297 add_call_dest(file, insn, dest, false); 1298 1299 } else if (reloc->sym->retpoline_thunk) { 1300 add_retpoline_call(file, insn); 1301 1302 } else 1303 add_call_dest(file, insn, reloc->sym, false); 1304 } 1305 1306 return 0; 1307} 1308 1309/* 1310 * The .alternatives section requires some extra special care over and above 1311 * other special sections because alternatives are patched in place. 1312 */ 1313static int handle_group_alt(struct objtool_file *file, 1314 struct special_alt *special_alt, 1315 struct instruction *orig_insn, 1316 struct instruction **new_insn) 1317{ 1318 struct instruction *last_orig_insn, *last_new_insn = NULL, *insn, *nop = NULL; 1319 struct alt_group *orig_alt_group, *new_alt_group; 1320 unsigned long dest_off; 1321 1322 1323 orig_alt_group = malloc(sizeof(*orig_alt_group)); 1324 if (!orig_alt_group) { 1325 WARN("malloc failed"); 1326 return -1; 1327 } 1328 orig_alt_group->cfi = calloc(special_alt->orig_len, 1329 sizeof(struct cfi_state *)); 1330 if (!orig_alt_group->cfi) { 1331 WARN("calloc failed"); 1332 return -1; 1333 } 1334 1335 last_orig_insn = NULL; 1336 insn = orig_insn; 1337 sec_for_each_insn_from(file, insn) { 1338 if (insn->offset >= special_alt->orig_off + special_alt->orig_len) 1339 break; 1340 1341 insn->alt_group = orig_alt_group; 1342 last_orig_insn = insn; 1343 } 1344 orig_alt_group->orig_group = NULL; 1345 orig_alt_group->first_insn = orig_insn; 1346 orig_alt_group->last_insn = last_orig_insn; 1347 1348 1349 new_alt_group = malloc(sizeof(*new_alt_group)); 1350 if (!new_alt_group) { 1351 WARN("malloc failed"); 1352 return -1; 1353 } 1354 1355 if (special_alt->new_len < special_alt->orig_len) { 1356 /* 1357 * Insert a fake nop at the end to make the replacement 1358 * alt_group the same size as the original. This is needed to 1359 * allow propagate_alt_cfi() to do its magic. When the last 1360 * instruction affects the stack, the instruction after it (the 1361 * nop) will propagate the new state to the shared CFI array. 1362 */ 1363 nop = malloc(sizeof(*nop)); 1364 if (!nop) { 1365 WARN("malloc failed"); 1366 return -1; 1367 } 1368 memset(nop, 0, sizeof(*nop)); 1369 INIT_LIST_HEAD(&nop->alts); 1370 INIT_LIST_HEAD(&nop->stack_ops); 1371 1372 nop->sec = special_alt->new_sec; 1373 nop->offset = special_alt->new_off + special_alt->new_len; 1374 nop->len = special_alt->orig_len - special_alt->new_len; 1375 nop->type = INSN_NOP; 1376 nop->func = orig_insn->func; 1377 nop->alt_group = new_alt_group; 1378 nop->ignore = orig_insn->ignore_alts; 1379 } 1380 1381 if (!special_alt->new_len) { 1382 *new_insn = nop; 1383 goto end; 1384 } 1385 1386 insn = *new_insn; 1387 sec_for_each_insn_from(file, insn) { 1388 struct reloc *alt_reloc; 1389 1390 if (insn->offset >= special_alt->new_off + special_alt->new_len) 1391 break; 1392 1393 last_new_insn = insn; 1394 1395 insn->ignore = orig_insn->ignore_alts; 1396 insn->func = orig_insn->func; 1397 insn->alt_group = new_alt_group; 1398 1399 /* 1400 * Since alternative replacement code is copy/pasted by the 1401 * kernel after applying relocations, generally such code can't 1402 * have relative-address relocation references to outside the 1403 * .altinstr_replacement section, unless the arch's 1404 * alternatives code can adjust the relative offsets 1405 * accordingly. 1406 */ 1407 alt_reloc = insn_reloc(file, insn); 1408 if (alt_reloc && 1409 !arch_support_alt_relocation(special_alt, insn, alt_reloc)) { 1410 1411 WARN_FUNC("unsupported relocation in alternatives section", 1412 insn->sec, insn->offset); 1413 return -1; 1414 } 1415 1416 if (!is_static_jump(insn)) 1417 continue; 1418 1419 if (!insn->immediate) 1420 continue; 1421 1422 dest_off = arch_jump_destination(insn); 1423 if (dest_off == special_alt->new_off + special_alt->new_len) 1424 insn->jump_dest = next_insn_same_sec(file, last_orig_insn); 1425 1426 if (!insn->jump_dest) { 1427 WARN_FUNC("can't find alternative jump destination", 1428 insn->sec, insn->offset); 1429 return -1; 1430 } 1431 } 1432 1433 if (!last_new_insn) { 1434 WARN_FUNC("can't find last new alternative instruction", 1435 special_alt->new_sec, special_alt->new_off); 1436 return -1; 1437 } 1438 1439 if (nop) 1440 list_add(&nop->list, &last_new_insn->list); 1441end: 1442 new_alt_group->orig_group = orig_alt_group; 1443 new_alt_group->first_insn = *new_insn; 1444 new_alt_group->last_insn = nop ? : last_new_insn; 1445 new_alt_group->cfi = orig_alt_group->cfi; 1446 return 0; 1447} 1448 1449/* 1450 * A jump table entry can either convert a nop to a jump or a jump to a nop. 1451 * If the original instruction is a jump, make the alt entry an effective nop 1452 * by just skipping the original instruction. 1453 */ 1454static int handle_jump_alt(struct objtool_file *file, 1455 struct special_alt *special_alt, 1456 struct instruction *orig_insn, 1457 struct instruction **new_insn) 1458{ 1459 if (orig_insn->type == INSN_NOP) 1460 return 0; 1461 1462 if (orig_insn->type != INSN_JUMP_UNCONDITIONAL) { 1463 WARN_FUNC("unsupported instruction at jump label", 1464 orig_insn->sec, orig_insn->offset); 1465 return -1; 1466 } 1467 1468 *new_insn = list_next_entry(orig_insn, list); 1469 return 0; 1470} 1471 1472/* 1473 * Read all the special sections which have alternate instructions which can be 1474 * patched in or redirected to at runtime. Each instruction having alternate 1475 * instruction(s) has them added to its insn->alts list, which will be 1476 * traversed in validate_branch(). 1477 */ 1478static int add_special_section_alts(struct objtool_file *file) 1479{ 1480 struct list_head special_alts; 1481 struct instruction *orig_insn, *new_insn; 1482 struct special_alt *special_alt, *tmp; 1483 struct alternative *alt; 1484 int ret; 1485 1486 ret = special_get_alts(file->elf, &special_alts); 1487 if (ret) 1488 return ret; 1489 1490 list_for_each_entry_safe(special_alt, tmp, &special_alts, list) { 1491 1492 orig_insn = find_insn(file, special_alt->orig_sec, 1493 special_alt->orig_off); 1494 if (!orig_insn) { 1495 WARN_FUNC("special: can't find orig instruction", 1496 special_alt->orig_sec, special_alt->orig_off); 1497 ret = -1; 1498 goto out; 1499 } 1500 1501 new_insn = NULL; 1502 if (!special_alt->group || special_alt->new_len) { 1503 new_insn = find_insn(file, special_alt->new_sec, 1504 special_alt->new_off); 1505 if (!new_insn) { 1506 WARN_FUNC("special: can't find new instruction", 1507 special_alt->new_sec, 1508 special_alt->new_off); 1509 ret = -1; 1510 goto out; 1511 } 1512 } 1513 1514 if (special_alt->group) { 1515 if (!special_alt->orig_len) { 1516 WARN_FUNC("empty alternative entry", 1517 orig_insn->sec, orig_insn->offset); 1518 continue; 1519 } 1520 1521 ret = handle_group_alt(file, special_alt, orig_insn, 1522 &new_insn); 1523 if (ret) 1524 goto out; 1525 } else if (special_alt->jump_or_nop) { 1526 ret = handle_jump_alt(file, special_alt, orig_insn, 1527 &new_insn); 1528 if (ret) 1529 goto out; 1530 } 1531 1532 alt = malloc(sizeof(*alt)); 1533 if (!alt) { 1534 WARN("malloc failed"); 1535 ret = -1; 1536 goto out; 1537 } 1538 1539 alt->insn = new_insn; 1540 alt->skip_orig = special_alt->skip_orig; 1541 orig_insn->ignore_alts |= special_alt->skip_alt; 1542 list_add_tail(&alt->list, &orig_insn->alts); 1543 1544 list_del(&special_alt->list); 1545 free(special_alt); 1546 } 1547 1548out: 1549 return ret; 1550} 1551 1552static int add_jump_table(struct objtool_file *file, struct instruction *insn, 1553 struct reloc *table) 1554{ 1555 struct reloc *reloc = table; 1556 struct instruction *dest_insn; 1557 struct alternative *alt; 1558 struct symbol *pfunc = insn->func->pfunc; 1559 unsigned int prev_offset = 0; 1560 1561 /* 1562 * Each @reloc is a switch table relocation which points to the target 1563 * instruction. 1564 */ 1565 list_for_each_entry_from(reloc, &table->sec->reloc_list, list) { 1566 1567 /* Check for the end of the table: */ 1568 if (reloc != table && reloc->jump_table_start) 1569 break; 1570 1571 /* Make sure the table entries are consecutive: */ 1572 if (prev_offset && reloc->offset != prev_offset + 8) 1573 break; 1574 1575 /* Detect function pointers from contiguous objects: */ 1576 if (reloc->sym->sec == pfunc->sec && 1577 reloc->addend == pfunc->offset) 1578 break; 1579 1580 dest_insn = find_insn(file, reloc->sym->sec, reloc->addend); 1581 if (!dest_insn) 1582 break; 1583 1584 /* Make sure the destination is in the same function: */ 1585 if (!dest_insn->func || dest_insn->func->pfunc != pfunc) 1586 break; 1587 1588 alt = malloc(sizeof(*alt)); 1589 if (!alt) { 1590 WARN("malloc failed"); 1591 return -1; 1592 } 1593 1594 alt->insn = dest_insn; 1595 list_add_tail(&alt->list, &insn->alts); 1596 prev_offset = reloc->offset; 1597 } 1598 1599 if (!prev_offset) { 1600 WARN_FUNC("can't find switch jump table", 1601 insn->sec, insn->offset); 1602 return -1; 1603 } 1604 1605 return 0; 1606} 1607 1608/* 1609 * find_jump_table() - Given a dynamic jump, find the switch jump table 1610 * associated with it. 1611 */ 1612static struct reloc *find_jump_table(struct objtool_file *file, 1613 struct symbol *func, 1614 struct instruction *insn) 1615{ 1616 struct reloc *table_reloc; 1617 struct instruction *dest_insn, *orig_insn = insn; 1618 1619 /* 1620 * Backward search using the @first_jump_src links, these help avoid 1621 * much of the 'in between' code. Which avoids us getting confused by 1622 * it. 1623 */ 1624 for (; 1625 insn && insn->func && insn->func->pfunc == func; 1626 insn = insn->first_jump_src ?: prev_insn_same_sym(file, insn)) { 1627 1628 if (insn != orig_insn && insn->type == INSN_JUMP_DYNAMIC) 1629 break; 1630 1631 /* allow small jumps within the range */ 1632 if (insn->type == INSN_JUMP_UNCONDITIONAL && 1633 insn->jump_dest && 1634 (insn->jump_dest->offset <= insn->offset || 1635 insn->jump_dest->offset > orig_insn->offset)) 1636 break; 1637 1638 table_reloc = arch_find_switch_table(file, insn); 1639 if (!table_reloc) 1640 continue; 1641 dest_insn = find_insn(file, table_reloc->sym->sec, table_reloc->addend); 1642 if (!dest_insn || !dest_insn->func || dest_insn->func->pfunc != func) 1643 continue; 1644 1645 return table_reloc; 1646 } 1647 1648 return NULL; 1649} 1650 1651/* 1652 * First pass: Mark the head of each jump table so that in the next pass, 1653 * we know when a given jump table ends and the next one starts. 1654 */ 1655static void mark_func_jump_tables(struct objtool_file *file, 1656 struct symbol *func) 1657{ 1658 struct instruction *insn, *last = NULL; 1659 struct reloc *reloc; 1660 1661 func_for_each_insn(file, func, insn) { 1662 if (!last) 1663 last = insn; 1664 1665 /* 1666 * Store back-pointers for unconditional forward jumps such 1667 * that find_jump_table() can back-track using those and 1668 * avoid some potentially confusing code. 1669 */ 1670 if (insn->type == INSN_JUMP_UNCONDITIONAL && insn->jump_dest && 1671 insn->offset > last->offset && 1672 insn->jump_dest->offset > insn->offset && 1673 !insn->jump_dest->first_jump_src) { 1674 1675 insn->jump_dest->first_jump_src = insn; 1676 last = insn->jump_dest; 1677 } 1678 1679 if (insn->type != INSN_JUMP_DYNAMIC) 1680 continue; 1681 1682 reloc = find_jump_table(file, func, insn); 1683 if (reloc) { 1684 reloc->jump_table_start = true; 1685 insn->jump_table = reloc; 1686 } 1687 } 1688} 1689 1690static int add_func_jump_tables(struct objtool_file *file, 1691 struct symbol *func) 1692{ 1693 struct instruction *insn; 1694 int ret; 1695 1696 func_for_each_insn(file, func, insn) { 1697 if (!insn->jump_table) 1698 continue; 1699 1700 ret = add_jump_table(file, insn, insn->jump_table); 1701 if (ret) 1702 return ret; 1703 } 1704 1705 return 0; 1706} 1707 1708/* 1709 * For some switch statements, gcc generates a jump table in the .rodata 1710 * section which contains a list of addresses within the function to jump to. 1711 * This finds these jump tables and adds them to the insn->alts lists. 1712 */ 1713static int add_jump_table_alts(struct objtool_file *file) 1714{ 1715 struct section *sec; 1716 struct symbol *func; 1717 int ret; 1718 1719 if (!file->rodata) 1720 return 0; 1721 1722 for_each_sec(file, sec) { 1723 list_for_each_entry(func, &sec->symbol_list, list) { 1724 if (func->type != STT_FUNC) 1725 continue; 1726 1727 mark_func_jump_tables(file, func); 1728 ret = add_func_jump_tables(file, func); 1729 if (ret) 1730 return ret; 1731 } 1732 } 1733 1734 return 0; 1735} 1736 1737static void set_func_state(struct cfi_state *state) 1738{ 1739 state->cfa = initial_func_cfi.cfa; 1740 memcpy(&state->regs, &initial_func_cfi.regs, 1741 CFI_NUM_REGS * sizeof(struct cfi_reg)); 1742 state->stack_size = initial_func_cfi.cfa.offset; 1743} 1744 1745static int read_unwind_hints(struct objtool_file *file) 1746{ 1747 struct cfi_state cfi = init_cfi; 1748 struct section *sec, *relocsec; 1749 struct unwind_hint *hint; 1750 struct instruction *insn; 1751 struct reloc *reloc; 1752 int i; 1753 1754 sec = find_section_by_name(file->elf, ".discard.unwind_hints"); 1755 if (!sec) 1756 return 0; 1757 1758 relocsec = sec->reloc; 1759 if (!relocsec) { 1760 WARN("missing .rela.discard.unwind_hints section"); 1761 return -1; 1762 } 1763 1764 if (sec->len % sizeof(struct unwind_hint)) { 1765 WARN("struct unwind_hint size mismatch"); 1766 return -1; 1767 } 1768 1769 file->hints = true; 1770 1771 for (i = 0; i < sec->len / sizeof(struct unwind_hint); i++) { 1772 hint = (struct unwind_hint *)sec->data->d_buf + i; 1773 1774 reloc = find_reloc_by_dest(file->elf, sec, i * sizeof(*hint)); 1775 if (!reloc) { 1776 WARN("can't find reloc for unwind_hints[%d]", i); 1777 return -1; 1778 } 1779 1780 insn = find_insn(file, reloc->sym->sec, reloc->addend); 1781 if (!insn) { 1782 WARN("can't find insn for unwind_hints[%d]", i); 1783 return -1; 1784 } 1785 1786 insn->hint = true; 1787 1788 if (hint->type == UNWIND_HINT_TYPE_SAVE) { 1789 insn->hint = false; 1790 insn->save = true; 1791 continue; 1792 } 1793 1794 if (hint->type == UNWIND_HINT_TYPE_RESTORE) { 1795 insn->restore = true; 1796 continue; 1797 } 1798 1799 if (hint->type == UNWIND_HINT_TYPE_REGS_PARTIAL) { 1800 struct symbol *sym = find_symbol_by_offset(insn->sec, insn->offset); 1801 1802 if (sym && sym->bind == STB_GLOBAL) { 1803 insn->entry = 1; 1804 } 1805 } 1806 1807 if (hint->type == UNWIND_HINT_TYPE_ENTRY) { 1808 hint->type = UNWIND_HINT_TYPE_CALL; 1809 insn->entry = 1; 1810 } 1811 1812 if (hint->type == UNWIND_HINT_TYPE_FUNC) { 1813 insn->cfi = &func_cfi; 1814 continue; 1815 } 1816 1817 if (insn->cfi) 1818 cfi = *(insn->cfi); 1819 1820 if (arch_decode_hint_reg(hint->sp_reg, &cfi.cfa.base)) { 1821 WARN_FUNC("unsupported unwind_hint sp base reg %d", 1822 insn->sec, insn->offset, hint->sp_reg); 1823 return -1; 1824 } 1825 1826 cfi.cfa.offset = hint->sp_offset; 1827 cfi.type = hint->type; 1828 cfi.end = hint->end; 1829 1830 insn->cfi = cfi_hash_find_or_add(&cfi); 1831 } 1832 1833 return 0; 1834} 1835 1836static int read_retpoline_hints(struct objtool_file *file) 1837{ 1838 struct section *sec; 1839 struct instruction *insn; 1840 struct reloc *reloc; 1841 1842 sec = find_section_by_name(file->elf, ".rela.discard.retpoline_safe"); 1843 if (!sec) 1844 return 0; 1845 1846 list_for_each_entry(reloc, &sec->reloc_list, list) { 1847 if (reloc->sym->type != STT_SECTION) { 1848 WARN("unexpected relocation symbol type in %s", sec->name); 1849 return -1; 1850 } 1851 1852 insn = find_insn(file, reloc->sym->sec, reloc->addend); 1853 if (!insn) { 1854 WARN("bad .discard.retpoline_safe entry"); 1855 return -1; 1856 } 1857 1858 if (insn->type != INSN_JUMP_DYNAMIC && 1859 insn->type != INSN_CALL_DYNAMIC && 1860 insn->type != INSN_RETURN && 1861 insn->type != INSN_NOP) { 1862 WARN_FUNC("retpoline_safe hint not an indirect jump/call/ret/nop", 1863 insn->sec, insn->offset); 1864 return -1; 1865 } 1866 1867 insn->retpoline_safe = true; 1868 } 1869 1870 return 0; 1871} 1872 1873static int read_instr_hints(struct objtool_file *file) 1874{ 1875 struct section *sec; 1876 struct instruction *insn; 1877 struct reloc *reloc; 1878 1879 sec = find_section_by_name(file->elf, ".rela.discard.instr_end"); 1880 if (!sec) 1881 return 0; 1882 1883 list_for_each_entry(reloc, &sec->reloc_list, list) { 1884 if (reloc->sym->type != STT_SECTION) { 1885 WARN("unexpected relocation symbol type in %s", sec->name); 1886 return -1; 1887 } 1888 1889 insn = find_insn(file, reloc->sym->sec, reloc->addend); 1890 if (!insn) { 1891 WARN("bad .discard.instr_end entry"); 1892 return -1; 1893 } 1894 1895 insn->instr--; 1896 } 1897 1898 sec = find_section_by_name(file->elf, ".rela.discard.instr_begin"); 1899 if (!sec) 1900 return 0; 1901 1902 list_for_each_entry(reloc, &sec->reloc_list, list) { 1903 if (reloc->sym->type != STT_SECTION) { 1904 WARN("unexpected relocation symbol type in %s", sec->name); 1905 return -1; 1906 } 1907 1908 insn = find_insn(file, reloc->sym->sec, reloc->addend); 1909 if (!insn) { 1910 WARN("bad .discard.instr_begin entry"); 1911 return -1; 1912 } 1913 1914 insn->instr++; 1915 } 1916 1917 return 0; 1918} 1919 1920static int read_intra_function_calls(struct objtool_file *file) 1921{ 1922 struct instruction *insn; 1923 struct section *sec; 1924 struct reloc *reloc; 1925 1926 sec = find_section_by_name(file->elf, ".rela.discard.intra_function_calls"); 1927 if (!sec) 1928 return 0; 1929 1930 list_for_each_entry(reloc, &sec->reloc_list, list) { 1931 unsigned long dest_off; 1932 1933 if (reloc->sym->type != STT_SECTION) { 1934 WARN("unexpected relocation symbol type in %s", 1935 sec->name); 1936 return -1; 1937 } 1938 1939 insn = find_insn(file, reloc->sym->sec, reloc->addend); 1940 if (!insn) { 1941 WARN("bad .discard.intra_function_call entry"); 1942 return -1; 1943 } 1944 1945 if (insn->type != INSN_CALL) { 1946 WARN_FUNC("intra_function_call not a direct call", 1947 insn->sec, insn->offset); 1948 return -1; 1949 } 1950 1951 /* 1952 * Treat intra-function CALLs as JMPs, but with a stack_op. 1953 * See add_call_destinations(), which strips stack_ops from 1954 * normal CALLs. 1955 */ 1956 insn->type = INSN_JUMP_UNCONDITIONAL; 1957 1958 dest_off = insn->offset + insn->len + insn->immediate; 1959 insn->jump_dest = find_insn(file, insn->sec, dest_off); 1960 if (!insn->jump_dest) { 1961 WARN_FUNC("can't find call dest at %s+0x%lx", 1962 insn->sec, insn->offset, 1963 insn->sec->name, dest_off); 1964 return -1; 1965 } 1966 } 1967 1968 return 0; 1969} 1970 1971static int classify_symbols(struct objtool_file *file) 1972{ 1973 struct section *sec; 1974 struct symbol *func; 1975 1976 for_each_sec(file, sec) { 1977 list_for_each_entry(func, &sec->symbol_list, list) { 1978 if (func->bind != STB_GLOBAL) 1979 continue; 1980 1981 if (!strncmp(func->name, STATIC_CALL_TRAMP_PREFIX_STR, 1982 strlen(STATIC_CALL_TRAMP_PREFIX_STR))) 1983 func->static_call_tramp = true; 1984 1985 if (arch_is_retpoline(func)) 1986 func->retpoline_thunk = true; 1987 1988 if (arch_is_rethunk(func)) 1989 func->return_thunk = true; 1990 1991 if (arch_is_embedded_insn(func)) 1992 func->embedded_insn = true; 1993 1994 if (!strcmp(func->name, "__fentry__")) 1995 func->fentry = true; 1996 1997 if (!strncmp(func->name, "__sanitizer_cov_", 16)) 1998 func->kcov = true; 1999 } 2000 } 2001 2002 return 0; 2003} 2004 2005static void mark_rodata(struct objtool_file *file) 2006{ 2007 struct section *sec; 2008 bool found = false; 2009 2010 /* 2011 * Search for the following rodata sections, each of which can 2012 * potentially contain jump tables: 2013 * 2014 * - .rodata: can contain GCC switch tables 2015 * - .rodata.<func>: same, if -fdata-sections is being used 2016 * - .rodata..c_jump_table: contains C annotated jump tables 2017 * 2018 * .rodata.str1.* sections are ignored; they don't contain jump tables. 2019 */ 2020 for_each_sec(file, sec) { 2021 if (!strncmp(sec->name, ".rodata", 7) && 2022 !strstr(sec->name, ".str1.")) { 2023 sec->rodata = true; 2024 found = true; 2025 } 2026 } 2027 2028 file->rodata = found; 2029} 2030 2031static int decode_sections(struct objtool_file *file) 2032{ 2033 int ret; 2034 2035 mark_rodata(file); 2036 2037 ret = decode_instructions(file); 2038 if (ret) 2039 return ret; 2040 2041 ret = add_dead_ends(file); 2042 if (ret) 2043 return ret; 2044 2045 add_ignores(file); 2046 add_uaccess_safe(file); 2047 2048 ret = add_ignore_alternatives(file); 2049 if (ret) 2050 return ret; 2051 2052 /* 2053 * Must be before add_{jump_call}_destination. 2054 */ 2055 ret = classify_symbols(file); 2056 if (ret) 2057 return ret; 2058 2059 /* 2060 * Must be before add_special_section_alts() as that depends on 2061 * jump_dest being set. 2062 */ 2063 ret = add_jump_destinations(file); 2064 if (ret) 2065 return ret; 2066 2067 ret = add_special_section_alts(file); 2068 if (ret) 2069 return ret; 2070 2071 /* 2072 * Must be before add_call_destination(); it changes INSN_CALL to 2073 * INSN_JUMP. 2074 */ 2075 ret = read_intra_function_calls(file); 2076 if (ret) 2077 return ret; 2078 2079 ret = add_call_destinations(file); 2080 if (ret) 2081 return ret; 2082 2083 ret = add_jump_table_alts(file); 2084 if (ret) 2085 return ret; 2086 2087 ret = read_unwind_hints(file); 2088 if (ret) 2089 return ret; 2090 2091 ret = read_retpoline_hints(file); 2092 if (ret) 2093 return ret; 2094 2095 ret = read_instr_hints(file); 2096 if (ret) 2097 return ret; 2098 2099 return 0; 2100} 2101 2102static bool is_special_call(struct instruction *insn) 2103{ 2104 if (insn->type == INSN_CALL) { 2105 struct symbol *dest = insn->call_dest; 2106 2107 if (!dest) 2108 return false; 2109 2110 if (dest->fentry || dest->embedded_insn) 2111 return true; 2112 } 2113 2114 return false; 2115} 2116 2117static bool has_modified_stack_frame(struct instruction *insn, struct insn_state *state) 2118{ 2119 struct cfi_state *cfi = &state->cfi; 2120 int i; 2121 2122 if (cfi->cfa.base != initial_func_cfi.cfa.base || cfi->drap) 2123 return true; 2124 2125 if (cfi->cfa.offset != initial_func_cfi.cfa.offset) 2126 return true; 2127 2128 if (cfi->stack_size != initial_func_cfi.cfa.offset) 2129 return true; 2130 2131 for (i = 0; i < CFI_NUM_REGS; i++) { 2132 if (cfi->regs[i].base != initial_func_cfi.regs[i].base || 2133 cfi->regs[i].offset != initial_func_cfi.regs[i].offset) 2134 return true; 2135 } 2136 2137 return false; 2138} 2139 2140static bool has_valid_stack_frame(struct insn_state *state) 2141{ 2142 struct cfi_state *cfi = &state->cfi; 2143 2144 if (cfi->cfa.base == CFI_BP && cfi->regs[CFI_BP].base == CFI_CFA && 2145 cfi->regs[CFI_BP].offset == -16) 2146 return true; 2147 2148 if (cfi->drap && cfi->regs[CFI_BP].base == CFI_BP) 2149 return true; 2150 2151 return false; 2152} 2153 2154static int update_cfi_state_regs(struct instruction *insn, 2155 struct cfi_state *cfi, 2156 struct stack_op *op) 2157{ 2158 struct cfi_reg *cfa = &cfi->cfa; 2159 2160 if (cfa->base != CFI_SP && cfa->base != CFI_SP_INDIRECT) 2161 return 0; 2162 2163 /* push */ 2164 if (op->dest.type == OP_DEST_PUSH || op->dest.type == OP_DEST_PUSHF) 2165 cfa->offset += 8; 2166 2167 /* pop */ 2168 if (op->src.type == OP_SRC_POP || op->src.type == OP_SRC_POPF) 2169 cfa->offset -= 8; 2170 2171 /* add immediate to sp */ 2172 if (op->dest.type == OP_DEST_REG && op->src.type == OP_SRC_ADD && 2173 op->dest.reg == CFI_SP && op->src.reg == CFI_SP) 2174 cfa->offset -= op->src.offset; 2175 2176 return 0; 2177} 2178 2179static void save_reg(struct cfi_state *cfi, unsigned char reg, int base, int offset) 2180{ 2181 if (arch_callee_saved_reg(reg) && 2182 cfi->regs[reg].base == CFI_UNDEFINED) { 2183 cfi->regs[reg].base = base; 2184 cfi->regs[reg].offset = offset; 2185 } 2186} 2187 2188static void restore_reg(struct cfi_state *cfi, unsigned char reg) 2189{ 2190 cfi->regs[reg].base = initial_func_cfi.regs[reg].base; 2191 cfi->regs[reg].offset = initial_func_cfi.regs[reg].offset; 2192} 2193 2194/* 2195 * A note about DRAP stack alignment: 2196 * 2197 * GCC has the concept of a DRAP register, which is used to help keep track of 2198 * the stack pointer when aligning the stack. r10 or r13 is used as the DRAP 2199 * register. The typical DRAP pattern is: 2200 * 2201 * 4c 8d 54 24 08 lea 0x8(%rsp),%r10 2202 * 48 83 e4 c0 and $0xffffffffffffffc0,%rsp 2203 * 41 ff 72 f8 pushq -0x8(%r10) 2204 * 55 push %rbp 2205 * 48 89 e5 mov %rsp,%rbp 2206 * (more pushes) 2207 * 41 52 push %r10 2208 * ... 2209 * 41 5a pop %r10 2210 * (more pops) 2211 * 5d pop %rbp 2212 * 49 8d 62 f8 lea -0x8(%r10),%rsp 2213 * c3 retq 2214 * 2215 * There are some variations in the epilogues, like: 2216 * 2217 * 5b pop %rbx 2218 * 41 5a pop %r10 2219 * 41 5c pop %r12 2220 * 41 5d pop %r13 2221 * 41 5e pop %r14 2222 * c9 leaveq 2223 * 49 8d 62 f8 lea -0x8(%r10),%rsp 2224 * c3 retq 2225 * 2226 * and: 2227 * 2228 * 4c 8b 55 e8 mov -0x18(%rbp),%r10 2229 * 48 8b 5d e0 mov -0x20(%rbp),%rbx 2230 * 4c 8b 65 f0 mov -0x10(%rbp),%r12 2231 * 4c 8b 6d f8 mov -0x8(%rbp),%r13 2232 * c9 leaveq 2233 * 49 8d 62 f8 lea -0x8(%r10),%rsp 2234 * c3 retq 2235 * 2236 * Sometimes r13 is used as the DRAP register, in which case it's saved and 2237 * restored beforehand: 2238 * 2239 * 41 55 push %r13 2240 * 4c 8d 6c 24 10 lea 0x10(%rsp),%r13 2241 * 48 83 e4 f0 and $0xfffffffffffffff0,%rsp 2242 * ... 2243 * 49 8d 65 f0 lea -0x10(%r13),%rsp 2244 * 41 5d pop %r13 2245 * c3 retq 2246 */ 2247static int update_cfi_state(struct instruction *insn, struct cfi_state *cfi, 2248 struct stack_op *op) 2249{ 2250 struct cfi_reg *cfa = &cfi->cfa; 2251 struct cfi_reg *regs = cfi->regs; 2252 2253 /* stack operations don't make sense with an undefined CFA */ 2254 if (cfa->base == CFI_UNDEFINED) { 2255 if (insn->func) { 2256 WARN_FUNC("undefined stack state", insn->sec, insn->offset); 2257 return -1; 2258 } 2259 return 0; 2260 } 2261 2262 if (cfi->type == UNWIND_HINT_TYPE_REGS || 2263 cfi->type == UNWIND_HINT_TYPE_REGS_PARTIAL) 2264 return update_cfi_state_regs(insn, cfi, op); 2265 2266 switch (op->dest.type) { 2267 2268 case OP_DEST_REG: 2269 switch (op->src.type) { 2270 2271 case OP_SRC_REG: 2272 if (op->src.reg == CFI_SP && op->dest.reg == CFI_BP && 2273 cfa->base == CFI_SP && 2274 regs[CFI_BP].base == CFI_CFA && 2275 regs[CFI_BP].offset == -cfa->offset) { 2276 2277 /* mov %rsp, %rbp */ 2278 cfa->base = op->dest.reg; 2279 cfi->bp_scratch = false; 2280 } 2281 2282 else if (op->src.reg == CFI_SP && 2283 op->dest.reg == CFI_BP && cfi->drap) { 2284 2285 /* drap: mov %rsp, %rbp */ 2286 regs[CFI_BP].base = CFI_BP; 2287 regs[CFI_BP].offset = -cfi->stack_size; 2288 cfi->bp_scratch = false; 2289 } 2290 2291 else if (op->src.reg == CFI_SP && cfa->base == CFI_SP) { 2292 2293 /* 2294 * mov %rsp, %reg 2295 * 2296 * This is needed for the rare case where GCC 2297 * does: 2298 * 2299 * mov %rsp, %rax 2300 * ... 2301 * mov %rax, %rsp 2302 */ 2303 cfi->vals[op->dest.reg].base = CFI_CFA; 2304 cfi->vals[op->dest.reg].offset = -cfi->stack_size; 2305 } 2306 2307 else if (op->src.reg == CFI_BP && op->dest.reg == CFI_SP && 2308 cfa->base == CFI_BP) { 2309 2310 /* 2311 * mov %rbp, %rsp 2312 * 2313 * Restore the original stack pointer (Clang). 2314 */ 2315 cfi->stack_size = -cfi->regs[CFI_BP].offset; 2316 } 2317 2318 else if (op->dest.reg == cfa->base) { 2319 2320 /* mov %reg, %rsp */ 2321 if (cfa->base == CFI_SP && 2322 cfi->vals[op->src.reg].base == CFI_CFA) { 2323 2324 /* 2325 * This is needed for the rare case 2326 * where GCC does something dumb like: 2327 * 2328 * lea 0x8(%rsp), %rcx 2329 * ... 2330 * mov %rcx, %rsp 2331 */ 2332 cfa->offset = -cfi->vals[op->src.reg].offset; 2333 cfi->stack_size = cfa->offset; 2334 2335 } else { 2336 cfa->base = CFI_UNDEFINED; 2337 cfa->offset = 0; 2338 } 2339 } 2340 2341 break; 2342 2343 case OP_SRC_ADD: 2344 if (op->dest.reg == CFI_SP && op->src.reg == CFI_SP) { 2345 2346 /* add imm, %rsp */ 2347 cfi->stack_size -= op->src.offset; 2348 if (cfa->base == CFI_SP) 2349 cfa->offset -= op->src.offset; 2350 break; 2351 } 2352 2353 if (op->dest.reg == CFI_SP && op->src.reg == CFI_BP) { 2354 2355 /* lea disp(%rbp), %rsp */ 2356 cfi->stack_size = -(op->src.offset + regs[CFI_BP].offset); 2357 break; 2358 } 2359 2360 if (op->src.reg == CFI_SP && cfa->base == CFI_SP) { 2361 2362 /* drap: lea disp(%rsp), %drap */ 2363 cfi->drap_reg = op->dest.reg; 2364 2365 /* 2366 * lea disp(%rsp), %reg 2367 * 2368 * This is needed for the rare case where GCC 2369 * does something dumb like: 2370 * 2371 * lea 0x8(%rsp), %rcx 2372 * ... 2373 * mov %rcx, %rsp 2374 */ 2375 cfi->vals[op->dest.reg].base = CFI_CFA; 2376 cfi->vals[op->dest.reg].offset = \ 2377 -cfi->stack_size + op->src.offset; 2378 2379 break; 2380 } 2381 2382 if (cfi->drap && op->dest.reg == CFI_SP && 2383 op->src.reg == cfi->drap_reg) { 2384 2385 /* drap: lea disp(%drap), %rsp */ 2386 cfa->base = CFI_SP; 2387 cfa->offset = cfi->stack_size = -op->src.offset; 2388 cfi->drap_reg = CFI_UNDEFINED; 2389 cfi->drap = false; 2390 break; 2391 } 2392 2393 if (op->dest.reg == cfi->cfa.base) { 2394 WARN_FUNC("unsupported stack register modification", 2395 insn->sec, insn->offset); 2396 return -1; 2397 } 2398 2399 break; 2400 2401 case OP_SRC_AND: 2402 if (op->dest.reg != CFI_SP || 2403 (cfi->drap_reg != CFI_UNDEFINED && cfa->base != CFI_SP) || 2404 (cfi->drap_reg == CFI_UNDEFINED && cfa->base != CFI_BP)) { 2405 WARN_FUNC("unsupported stack pointer realignment", 2406 insn->sec, insn->offset); 2407 return -1; 2408 } 2409 2410 if (cfi->drap_reg != CFI_UNDEFINED) { 2411 /* drap: and imm, %rsp */ 2412 cfa->base = cfi->drap_reg; 2413 cfa->offset = cfi->stack_size = 0; 2414 cfi->drap = true; 2415 } 2416 2417 /* 2418 * Older versions of GCC (4.8ish) realign the stack 2419 * without DRAP, with a frame pointer. 2420 */ 2421 2422 break; 2423 2424 case OP_SRC_POP: 2425 case OP_SRC_POPF: 2426 if (!cfi->drap && op->dest.reg == cfa->base) { 2427 2428 /* pop %rbp */ 2429 cfa->base = CFI_SP; 2430 } 2431 2432 if (cfi->drap && cfa->base == CFI_BP_INDIRECT && 2433 op->dest.reg == cfi->drap_reg && 2434 cfi->drap_offset == -cfi->stack_size) { 2435 2436 /* drap: pop %drap */ 2437 cfa->base = cfi->drap_reg; 2438 cfa->offset = 0; 2439 cfi->drap_offset = -1; 2440 2441 } else if (regs[op->dest.reg].offset == -cfi->stack_size) { 2442 2443 /* pop %reg */ 2444 restore_reg(cfi, op->dest.reg); 2445 } 2446 2447 cfi->stack_size -= 8; 2448 if (cfa->base == CFI_SP) 2449 cfa->offset -= 8; 2450 2451 break; 2452 2453 case OP_SRC_REG_INDIRECT: 2454 if (cfi->drap && op->src.reg == CFI_BP && 2455 op->src.offset == cfi->drap_offset) { 2456 2457 /* drap: mov disp(%rbp), %drap */ 2458 cfa->base = cfi->drap_reg; 2459 cfa->offset = 0; 2460 cfi->drap_offset = -1; 2461 } 2462 2463 if (cfi->drap && op->src.reg == CFI_BP && 2464 op->src.offset == regs[op->dest.reg].offset) { 2465 2466 /* drap: mov disp(%rbp), %reg */ 2467 restore_reg(cfi, op->dest.reg); 2468 2469 } else if (op->src.reg == cfa->base && 2470 op->src.offset == regs[op->dest.reg].offset + cfa->offset) { 2471 2472 /* mov disp(%rbp), %reg */ 2473 /* mov disp(%rsp), %reg */ 2474 restore_reg(cfi, op->dest.reg); 2475 } 2476 2477 break; 2478 2479 default: 2480 WARN_FUNC("unknown stack-related instruction", 2481 insn->sec, insn->offset); 2482 return -1; 2483 } 2484 2485 break; 2486 2487 case OP_DEST_PUSH: 2488 case OP_DEST_PUSHF: 2489 cfi->stack_size += 8; 2490 if (cfa->base == CFI_SP) 2491 cfa->offset += 8; 2492 2493 if (op->src.type != OP_SRC_REG) 2494 break; 2495 2496 if (cfi->drap) { 2497 if (op->src.reg == cfa->base && op->src.reg == cfi->drap_reg) { 2498 2499 /* drap: push %drap */ 2500 cfa->base = CFI_BP_INDIRECT; 2501 cfa->offset = -cfi->stack_size; 2502 2503 /* save drap so we know when to restore it */ 2504 cfi->drap_offset = -cfi->stack_size; 2505 2506 } else if (op->src.reg == CFI_BP && cfa->base == cfi->drap_reg) { 2507 2508 /* drap: push %rbp */ 2509 cfi->stack_size = 0; 2510 2511 } else { 2512 2513 /* drap: push %reg */ 2514 save_reg(cfi, op->src.reg, CFI_BP, -cfi->stack_size); 2515 } 2516 2517 } else { 2518 2519 /* push %reg */ 2520 save_reg(cfi, op->src.reg, CFI_CFA, -cfi->stack_size); 2521 } 2522 2523 /* detect when asm code uses rbp as a scratch register */ 2524 if (!no_fp && insn->func && op->src.reg == CFI_BP && 2525 cfa->base != CFI_BP) 2526 cfi->bp_scratch = true; 2527 break; 2528 2529 case OP_DEST_REG_INDIRECT: 2530 2531 if (cfi->drap) { 2532 if (op->src.reg == cfa->base && op->src.reg == cfi->drap_reg) { 2533 2534 /* drap: mov %drap, disp(%rbp) */ 2535 cfa->base = CFI_BP_INDIRECT; 2536 cfa->offset = op->dest.offset; 2537 2538 /* save drap offset so we know when to restore it */ 2539 cfi->drap_offset = op->dest.offset; 2540 } else { 2541 2542 /* drap: mov reg, disp(%rbp) */ 2543 save_reg(cfi, op->src.reg, CFI_BP, op->dest.offset); 2544 } 2545 2546 } else if (op->dest.reg == cfa->base) { 2547 2548 /* mov reg, disp(%rbp) */ 2549 /* mov reg, disp(%rsp) */ 2550 save_reg(cfi, op->src.reg, CFI_CFA, 2551 op->dest.offset - cfi->cfa.offset); 2552 } 2553 2554 break; 2555 2556 case OP_DEST_LEAVE: 2557 if ((!cfi->drap && cfa->base != CFI_BP) || 2558 (cfi->drap && cfa->base != cfi->drap_reg)) { 2559 WARN_FUNC("leave instruction with modified stack frame", 2560 insn->sec, insn->offset); 2561 return -1; 2562 } 2563 2564 /* leave (mov %rbp, %rsp; pop %rbp) */ 2565 2566 cfi->stack_size = -cfi->regs[CFI_BP].offset - 8; 2567 restore_reg(cfi, CFI_BP); 2568 2569 if (!cfi->drap) { 2570 cfa->base = CFI_SP; 2571 cfa->offset -= 8; 2572 } 2573 2574 break; 2575 2576 case OP_DEST_MEM: 2577 if (op->src.type != OP_SRC_POP && op->src.type != OP_SRC_POPF) { 2578 WARN_FUNC("unknown stack-related memory operation", 2579 insn->sec, insn->offset); 2580 return -1; 2581 } 2582 2583 /* pop mem */ 2584 cfi->stack_size -= 8; 2585 if (cfa->base == CFI_SP) 2586 cfa->offset -= 8; 2587 2588 break; 2589 2590 default: 2591 WARN_FUNC("unknown stack-related instruction", 2592 insn->sec, insn->offset); 2593 return -1; 2594 } 2595 2596 return 0; 2597} 2598 2599/* 2600 * The stack layouts of alternatives instructions can sometimes diverge when 2601 * they have stack modifications. That's fine as long as the potential stack 2602 * layouts don't conflict at any given potential instruction boundary. 2603 * 2604 * Flatten the CFIs of the different alternative code streams (both original 2605 * and replacement) into a single shared CFI array which can be used to detect 2606 * conflicts and nicely feed a linear array of ORC entries to the unwinder. 2607 */ 2608static int propagate_alt_cfi(struct objtool_file *file, struct instruction *insn) 2609{ 2610 struct cfi_state **alt_cfi; 2611 int group_off; 2612 2613 if (!insn->alt_group) 2614 return 0; 2615 2616 if (!insn->cfi) { 2617 WARN("CFI missing"); 2618 return -1; 2619 } 2620 2621 alt_cfi = insn->alt_group->cfi; 2622 group_off = insn->offset - insn->alt_group->first_insn->offset; 2623 2624 if (!alt_cfi[group_off]) { 2625 alt_cfi[group_off] = insn->cfi; 2626 } else { 2627 if (cficmp(alt_cfi[group_off], insn->cfi)) { 2628 WARN_FUNC("stack layout conflict in alternatives", 2629 insn->sec, insn->offset); 2630 return -1; 2631 } 2632 } 2633 2634 return 0; 2635} 2636 2637static int handle_insn_ops(struct instruction *insn, struct insn_state *state) 2638{ 2639 struct stack_op *op; 2640 2641 list_for_each_entry(op, &insn->stack_ops, list) { 2642 2643 if (update_cfi_state(insn, &state->cfi, op)) 2644 return 1; 2645 2646 if (op->dest.type == OP_DEST_PUSHF) { 2647 if (!state->uaccess_stack) { 2648 state->uaccess_stack = 1; 2649 } else if (state->uaccess_stack >> 31) { 2650 WARN_FUNC("PUSHF stack exhausted", 2651 insn->sec, insn->offset); 2652 return 1; 2653 } 2654 state->uaccess_stack <<= 1; 2655 state->uaccess_stack |= state->uaccess; 2656 } 2657 2658 if (op->src.type == OP_SRC_POPF) { 2659 if (state->uaccess_stack) { 2660 state->uaccess = state->uaccess_stack & 1; 2661 state->uaccess_stack >>= 1; 2662 if (state->uaccess_stack == 1) 2663 state->uaccess_stack = 0; 2664 } 2665 } 2666 } 2667 2668 return 0; 2669} 2670 2671static bool insn_cfi_match(struct instruction *insn, struct cfi_state *cfi2) 2672{ 2673 struct cfi_state *cfi1 = insn->cfi; 2674 int i; 2675 2676 if (!cfi1) { 2677 WARN("CFI missing"); 2678 return false; 2679 } 2680 2681 if (memcmp(&cfi1->cfa, &cfi2->cfa, sizeof(cfi1->cfa))) { 2682 2683 WARN_FUNC("stack state mismatch: cfa1=%d%+d cfa2=%d%+d", 2684 insn->sec, insn->offset, 2685 cfi1->cfa.base, cfi1->cfa.offset, 2686 cfi2->cfa.base, cfi2->cfa.offset); 2687 2688 } else if (memcmp(&cfi1->regs, &cfi2->regs, sizeof(cfi1->regs))) { 2689 for (i = 0; i < CFI_NUM_REGS; i++) { 2690 if (!memcmp(&cfi1->regs[i], &cfi2->regs[i], 2691 sizeof(struct cfi_reg))) 2692 continue; 2693 2694 WARN_FUNC("stack state mismatch: reg1[%d]=%d%+d reg2[%d]=%d%+d", 2695 insn->sec, insn->offset, 2696 i, cfi1->regs[i].base, cfi1->regs[i].offset, 2697 i, cfi2->regs[i].base, cfi2->regs[i].offset); 2698 break; 2699 } 2700 2701 } else if (cfi1->type != cfi2->type) { 2702 2703 WARN_FUNC("stack state mismatch: type1=%d type2=%d", 2704 insn->sec, insn->offset, cfi1->type, cfi2->type); 2705 2706 } else if (cfi1->drap != cfi2->drap || 2707 (cfi1->drap && cfi1->drap_reg != cfi2->drap_reg) || 2708 (cfi1->drap && cfi1->drap_offset != cfi2->drap_offset)) { 2709 2710 WARN_FUNC("stack state mismatch: drap1=%d(%d,%d) drap2=%d(%d,%d)", 2711 insn->sec, insn->offset, 2712 cfi1->drap, cfi1->drap_reg, cfi1->drap_offset, 2713 cfi2->drap, cfi2->drap_reg, cfi2->drap_offset); 2714 2715 } else 2716 return true; 2717 2718 return false; 2719} 2720 2721static inline bool func_uaccess_safe(struct symbol *func) 2722{ 2723 if (func) 2724 return func->uaccess_safe; 2725 2726 return false; 2727} 2728 2729static inline const char *call_dest_name(struct instruction *insn) 2730{ 2731 if (insn->call_dest) 2732 return insn->call_dest->name; 2733 2734 return "{dynamic}"; 2735} 2736 2737static inline bool noinstr_call_dest(struct symbol *func) 2738{ 2739 /* 2740 * We can't deal with indirect function calls at present; 2741 * assume they're instrumented. 2742 */ 2743 if (!func) 2744 return false; 2745 2746 /* 2747 * If the symbol is from a noinstr section; we good. 2748 */ 2749 if (func->sec->noinstr) 2750 return true; 2751 2752 /* 2753 * The __ubsan_handle_*() calls are like WARN(), they only happen when 2754 * something 'BAD' happened. At the risk of taking the machine down, 2755 * let them proceed to get the message out. 2756 */ 2757 if (!strncmp(func->name, "__ubsan_handle_", 15)) 2758 return true; 2759 2760 return false; 2761} 2762 2763static int validate_call(struct instruction *insn, struct insn_state *state) 2764{ 2765 if (state->noinstr && state->instr <= 0 && 2766 !noinstr_call_dest(insn->call_dest)) { 2767 WARN_FUNC("call to %s() leaves .noinstr.text section", 2768 insn->sec, insn->offset, call_dest_name(insn)); 2769 return 1; 2770 } 2771 2772 if (state->uaccess && !func_uaccess_safe(insn->call_dest)) { 2773 WARN_FUNC("call to %s() with UACCESS enabled", 2774 insn->sec, insn->offset, call_dest_name(insn)); 2775 return 1; 2776 } 2777 2778 if (state->df) { 2779 WARN_FUNC("call to %s() with DF set", 2780 insn->sec, insn->offset, call_dest_name(insn)); 2781 return 1; 2782 } 2783 2784 return 0; 2785} 2786 2787static int validate_sibling_call(struct instruction *insn, struct insn_state *state) 2788{ 2789 if (has_modified_stack_frame(insn, state)) { 2790 WARN_FUNC("sibling call from callable instruction with modified stack frame", 2791 insn->sec, insn->offset); 2792 return 1; 2793 } 2794 2795 return validate_call(insn, state); 2796} 2797 2798static int validate_return(struct symbol *func, struct instruction *insn, struct insn_state *state) 2799{ 2800 if (state->noinstr && state->instr > 0) { 2801 WARN_FUNC("return with instrumentation enabled", 2802 insn->sec, insn->offset); 2803 return 1; 2804 } 2805 2806 if (state->uaccess && !func_uaccess_safe(func)) { 2807 WARN_FUNC("return with UACCESS enabled", 2808 insn->sec, insn->offset); 2809 return 1; 2810 } 2811 2812 if (!state->uaccess && func_uaccess_safe(func)) { 2813 WARN_FUNC("return with UACCESS disabled from a UACCESS-safe function", 2814 insn->sec, insn->offset); 2815 return 1; 2816 } 2817 2818 if (state->df) { 2819 WARN_FUNC("return with DF set", 2820 insn->sec, insn->offset); 2821 return 1; 2822 } 2823 2824 if (func && has_modified_stack_frame(insn, state)) { 2825 WARN_FUNC("return with modified stack frame", 2826 insn->sec, insn->offset); 2827 return 1; 2828 } 2829 2830 if (state->cfi.bp_scratch) { 2831 WARN_FUNC("BP used as a scratch register", 2832 insn->sec, insn->offset); 2833 return 1; 2834 } 2835 2836 return 0; 2837} 2838 2839static struct instruction *next_insn_to_validate(struct objtool_file *file, 2840 struct instruction *insn) 2841{ 2842 struct alt_group *alt_group = insn->alt_group; 2843 2844 /* 2845 * Simulate the fact that alternatives are patched in-place. When the 2846 * end of a replacement alt_group is reached, redirect objtool flow to 2847 * the end of the original alt_group. 2848 */ 2849 if (alt_group && insn == alt_group->last_insn && alt_group->orig_group) 2850 return next_insn_same_sec(file, alt_group->orig_group->last_insn); 2851 2852 return next_insn_same_sec(file, insn); 2853} 2854 2855/* 2856 * Follow the branch starting at the given instruction, and recursively follow 2857 * any other branches (jumps). Meanwhile, track the frame pointer state at 2858 * each instruction and validate all the rules described in 2859 * tools/objtool/Documentation/stack-validation.txt. 2860 */ 2861static int validate_branch(struct objtool_file *file, struct symbol *func, 2862 struct instruction *insn, struct insn_state state) 2863{ 2864 struct alternative *alt; 2865 struct instruction *next_insn, *prev_insn = NULL; 2866 struct section *sec; 2867 u8 visited; 2868 int ret; 2869 2870 sec = insn->sec; 2871 2872 while (1) { 2873 next_insn = next_insn_to_validate(file, insn); 2874 2875 if (file->c_file && func && insn->func && func != insn->func->pfunc) { 2876 WARN("%s() falls through to next function %s()", 2877 func->name, insn->func->name); 2878 return 1; 2879 } 2880 2881 if (func && insn->ignore) { 2882 WARN_FUNC("BUG: why am I validating an ignored function?", 2883 sec, insn->offset); 2884 return 1; 2885 } 2886 2887 visited = VISITED_BRANCH << state.uaccess; 2888 if (insn->visited & VISITED_BRANCH_MASK) { 2889 if (!insn->hint && !insn_cfi_match(insn, &state.cfi)) 2890 return 1; 2891 2892 if (insn->visited & visited) 2893 return 0; 2894 } else { 2895 nr_insns_visited++; 2896 } 2897 2898 if (state.noinstr) 2899 state.instr += insn->instr; 2900 2901 if (insn->hint) { 2902 if (insn->restore) { 2903 struct instruction *save_insn, *i; 2904 2905 i = insn; 2906 save_insn = NULL; 2907 2908 sym_for_each_insn_continue_reverse(file, func, i) { 2909 if (i->save) { 2910 save_insn = i; 2911 break; 2912 } 2913 } 2914 2915 if (!save_insn) { 2916 WARN_FUNC("no corresponding CFI save for CFI restore", 2917 sec, insn->offset); 2918 return 1; 2919 } 2920 2921 if (!save_insn->visited) { 2922 WARN_FUNC("objtool isn't smart enough to handle this CFI save/restore combo", 2923 sec, insn->offset); 2924 return 1; 2925 } 2926 2927 insn->cfi = save_insn->cfi; 2928 nr_cfi_reused++; 2929 } 2930 2931 state.cfi = *insn->cfi; 2932 } else { 2933 /* XXX track if we actually changed state.cfi */ 2934 2935 if (prev_insn && !cficmp(prev_insn->cfi, &state.cfi)) { 2936 insn->cfi = prev_insn->cfi; 2937 nr_cfi_reused++; 2938 } else { 2939 insn->cfi = cfi_hash_find_or_add(&state.cfi); 2940 } 2941 } 2942 2943 insn->visited |= visited; 2944 2945 if (propagate_alt_cfi(file, insn)) 2946 return 1; 2947 2948 if (!insn->ignore_alts && !list_empty(&insn->alts)) { 2949 bool skip_orig = false; 2950 2951 list_for_each_entry(alt, &insn->alts, list) { 2952 if (alt->skip_orig) 2953 skip_orig = true; 2954 2955 ret = validate_branch(file, func, alt->insn, state); 2956 if (ret) { 2957 if (backtrace) 2958 BT_FUNC("(alt)", insn); 2959 return ret; 2960 } 2961 } 2962 2963 if (skip_orig) 2964 return 0; 2965 } 2966 2967 if (handle_insn_ops(insn, &state)) 2968 return 1; 2969 2970 switch (insn->type) { 2971 2972 case INSN_RETURN: 2973 if (sls && !insn->retpoline_safe && 2974 next_insn && next_insn->type != INSN_TRAP) { 2975 WARN_FUNC("missing int3 after ret", 2976 insn->sec, insn->offset); 2977 } 2978 return validate_return(func, insn, &state); 2979 2980 case INSN_CALL: 2981 case INSN_CALL_DYNAMIC: 2982 ret = validate_call(insn, &state); 2983 if (ret) 2984 return ret; 2985 2986 if (!no_fp && func && !is_special_call(insn) && 2987 !has_valid_stack_frame(&state)) { 2988 WARN_FUNC("call without frame pointer save/setup", 2989 sec, insn->offset); 2990 return 1; 2991 } 2992 2993 if (dead_end_function(file, insn->call_dest)) 2994 return 0; 2995 2996 break; 2997 2998 case INSN_JUMP_CONDITIONAL: 2999 case INSN_JUMP_UNCONDITIONAL: 3000 if (is_sibling_call(insn)) { 3001 ret = validate_sibling_call(insn, &state); 3002 if (ret) 3003 return ret; 3004 3005 } else if (insn->jump_dest) { 3006 ret = validate_branch(file, func, 3007 insn->jump_dest, state); 3008 if (ret) { 3009 if (backtrace) 3010 BT_FUNC("(branch)", insn); 3011 return ret; 3012 } 3013 } 3014 3015 if (insn->type == INSN_JUMP_UNCONDITIONAL) 3016 return 0; 3017 3018 break; 3019 3020 case INSN_JUMP_DYNAMIC: 3021 if (sls && !insn->retpoline_safe && 3022 next_insn && next_insn->type != INSN_TRAP) { 3023 WARN_FUNC("missing int3 after indirect jump", 3024 insn->sec, insn->offset); 3025 } 3026 3027 /* fallthrough */ 3028 case INSN_JUMP_DYNAMIC_CONDITIONAL: 3029 if (is_sibling_call(insn)) { 3030 ret = validate_sibling_call(insn, &state); 3031 if (ret) 3032 return ret; 3033 } 3034 3035 if (insn->type == INSN_JUMP_DYNAMIC) 3036 return 0; 3037 3038 break; 3039 3040 case INSN_CONTEXT_SWITCH: 3041 if (func && (!next_insn || !next_insn->hint)) { 3042 WARN_FUNC("unsupported instruction in callable function", 3043 sec, insn->offset); 3044 return 1; 3045 } 3046 return 0; 3047 3048 case INSN_STAC: 3049 if (state.uaccess) { 3050 WARN_FUNC("recursive UACCESS enable", sec, insn->offset); 3051 return 1; 3052 } 3053 3054 state.uaccess = true; 3055 break; 3056 3057 case INSN_CLAC: 3058 if (!state.uaccess && func) { 3059 WARN_FUNC("redundant UACCESS disable", sec, insn->offset); 3060 return 1; 3061 } 3062 3063 if (func_uaccess_safe(func) && !state.uaccess_stack) { 3064 WARN_FUNC("UACCESS-safe disables UACCESS", sec, insn->offset); 3065 return 1; 3066 } 3067 3068 state.uaccess = false; 3069 break; 3070 3071 case INSN_STD: 3072 if (state.df) { 3073 WARN_FUNC("recursive STD", sec, insn->offset); 3074 return 1; 3075 } 3076 3077 state.df = true; 3078 break; 3079 3080 case INSN_CLD: 3081 if (!state.df && func) { 3082 WARN_FUNC("redundant CLD", sec, insn->offset); 3083 return 1; 3084 } 3085 3086 state.df = false; 3087 break; 3088 3089 default: 3090 break; 3091 } 3092 3093 if (insn->dead_end) 3094 return 0; 3095 3096 if (!next_insn) { 3097 if (state.cfi.cfa.base == CFI_UNDEFINED) 3098 return 0; 3099 WARN("%s: unexpected end of section", sec->name); 3100 return 1; 3101 } 3102 3103 prev_insn = insn; 3104 insn = next_insn; 3105 } 3106 3107 return 0; 3108} 3109 3110static int validate_unwind_hints(struct objtool_file *file, struct section *sec) 3111{ 3112 struct instruction *insn; 3113 struct insn_state state; 3114 int ret, warnings = 0; 3115 3116 if (!file->hints) 3117 return 0; 3118 3119 init_insn_state(&state, sec); 3120 3121 if (sec) { 3122 insn = find_insn(file, sec, 0); 3123 if (!insn) 3124 return 0; 3125 } else { 3126 insn = list_first_entry(&file->insn_list, typeof(*insn), list); 3127 } 3128 3129 while (&insn->list != &file->insn_list && (!sec || insn->sec == sec)) { 3130 if (insn->hint && !insn->visited) { 3131 ret = validate_branch(file, insn->func, insn, state); 3132 if (ret && backtrace) 3133 BT_FUNC("<=== (hint)", insn); 3134 warnings += ret; 3135 } 3136 3137 insn = list_next_entry(insn, list); 3138 } 3139 3140 return warnings; 3141} 3142 3143/* 3144 * Validate rethunk entry constraint: must untrain RET before the first RET. 3145 * 3146 * Follow every branch (intra-function) and ensure ANNOTATE_UNRET_END comes 3147 * before an actual RET instruction. 3148 */ 3149static int validate_entry(struct objtool_file *file, struct instruction *insn) 3150{ 3151 struct instruction *next, *dest; 3152 int ret, warnings = 0; 3153 3154 for (;;) { 3155 next = next_insn_to_validate(file, insn); 3156 3157 if (insn->visited & VISITED_ENTRY) 3158 return 0; 3159 3160 insn->visited |= VISITED_ENTRY; 3161 3162 if (!insn->ignore_alts && !list_empty(&insn->alts)) { 3163 struct alternative *alt; 3164 bool skip_orig = false; 3165 3166 list_for_each_entry(alt, &insn->alts, list) { 3167 if (alt->skip_orig) 3168 skip_orig = true; 3169 3170 ret = validate_entry(file, alt->insn); 3171 if (ret) { 3172 if (backtrace) 3173 BT_FUNC("(alt)", insn); 3174 return ret; 3175 } 3176 } 3177 3178 if (skip_orig) 3179 return 0; 3180 } 3181 3182 switch (insn->type) { 3183 3184 case INSN_CALL_DYNAMIC: 3185 case INSN_JUMP_DYNAMIC: 3186 case INSN_JUMP_DYNAMIC_CONDITIONAL: 3187 WARN_FUNC("early indirect call", insn->sec, insn->offset); 3188 return 1; 3189 3190 case INSN_JUMP_UNCONDITIONAL: 3191 case INSN_JUMP_CONDITIONAL: 3192 if (!is_sibling_call(insn)) { 3193 if (!insn->jump_dest) { 3194 WARN_FUNC("unresolved jump target after linking?!?", 3195 insn->sec, insn->offset); 3196 return -1; 3197 } 3198 ret = validate_entry(file, insn->jump_dest); 3199 if (ret) { 3200 if (backtrace) { 3201 BT_FUNC("(branch%s)", insn, 3202 insn->type == INSN_JUMP_CONDITIONAL ? "-cond" : ""); 3203 } 3204 return ret; 3205 } 3206 3207 if (insn->type == INSN_JUMP_UNCONDITIONAL) 3208 return 0; 3209 3210 break; 3211 } 3212 3213 /* fallthrough */ 3214 case INSN_CALL: 3215 dest = find_insn(file, insn->call_dest->sec, 3216 insn->call_dest->offset); 3217 if (!dest) { 3218 WARN("Unresolved function after linking!?: %s", 3219 insn->call_dest->name); 3220 return -1; 3221 } 3222 3223 ret = validate_entry(file, dest); 3224 if (ret) { 3225 if (backtrace) 3226 BT_FUNC("(call)", insn); 3227 return ret; 3228 } 3229 /* 3230 * If a call returns without error, it must have seen UNTRAIN_RET. 3231 * Therefore any non-error return is a success. 3232 */ 3233 return 0; 3234 3235 case INSN_RETURN: 3236 WARN_FUNC("RET before UNTRAIN", insn->sec, insn->offset); 3237 return 1; 3238 3239 case INSN_NOP: 3240 if (insn->retpoline_safe) 3241 return 0; 3242 break; 3243 3244 default: 3245 break; 3246 } 3247 3248 if (!next) { 3249 WARN_FUNC("teh end!", insn->sec, insn->offset); 3250 return -1; 3251 } 3252 insn = next; 3253 } 3254 3255 return warnings; 3256} 3257 3258/* 3259 * Validate that all branches starting at 'insn->entry' encounter UNRET_END 3260 * before RET. 3261 */ 3262static int validate_unret(struct objtool_file *file) 3263{ 3264 struct instruction *insn; 3265 int ret, warnings = 0; 3266 3267 for_each_insn(file, insn) { 3268 if (!insn->entry) 3269 continue; 3270 3271 ret = validate_entry(file, insn); 3272 if (ret < 0) { 3273 WARN_FUNC("Failed UNRET validation", insn->sec, insn->offset); 3274 return ret; 3275 } 3276 warnings += ret; 3277 } 3278 3279 return warnings; 3280} 3281 3282static int validate_retpoline(struct objtool_file *file) 3283{ 3284 struct instruction *insn; 3285 int warnings = 0; 3286 3287 for_each_insn(file, insn) { 3288 if (insn->type != INSN_JUMP_DYNAMIC && 3289 insn->type != INSN_CALL_DYNAMIC && 3290 insn->type != INSN_RETURN) 3291 continue; 3292 3293 if (insn->retpoline_safe) 3294 continue; 3295 3296 /* 3297 * .init.text code is ran before userspace and thus doesn't 3298 * strictly need retpolines, except for modules which are 3299 * loaded late, they very much do need retpoline in their 3300 * .init.text 3301 */ 3302 if (!strcmp(insn->sec->name, ".init.text") && !module) 3303 continue; 3304 3305 if (insn->type == INSN_RETURN) { 3306 if (rethunk) { 3307 WARN_FUNC("'naked' return found in RETHUNK build", 3308 insn->sec, insn->offset); 3309 } else 3310 continue; 3311 } else { 3312 WARN_FUNC("indirect %s found in RETPOLINE build", 3313 insn->sec, insn->offset, 3314 insn->type == INSN_JUMP_DYNAMIC ? "jump" : "call"); 3315 } 3316 3317 warnings++; 3318 } 3319 3320 return warnings; 3321} 3322 3323static bool is_kasan_insn(struct instruction *insn) 3324{ 3325 return (insn->type == INSN_CALL && 3326 !strcmp(insn->call_dest->name, "__asan_handle_no_return")); 3327} 3328 3329static bool is_ubsan_insn(struct instruction *insn) 3330{ 3331 return (insn->type == INSN_CALL && 3332 !strcmp(insn->call_dest->name, 3333 "__ubsan_handle_builtin_unreachable")); 3334} 3335 3336static bool ignore_unreachable_insn(struct objtool_file *file, struct instruction *insn) 3337{ 3338 int i; 3339 struct instruction *prev_insn; 3340 3341 if (insn->ignore || insn->type == INSN_NOP || insn->type == INSN_TRAP) 3342 return true; 3343 3344 /* 3345 * Ignore any unused exceptions. This can happen when a whitelisted 3346 * function has an exception table entry. 3347 * 3348 * Also ignore alternative replacement instructions. This can happen 3349 * when a whitelisted function uses one of the ALTERNATIVE macros. 3350 */ 3351 if (!strcmp(insn->sec->name, ".fixup") || 3352 !strcmp(insn->sec->name, ".altinstr_replacement") || 3353 !strcmp(insn->sec->name, ".altinstr_aux")) 3354 return true; 3355 3356 if (!insn->func) 3357 return false; 3358 3359 /* 3360 * CONFIG_UBSAN_TRAP inserts a UD2 when it sees 3361 * __builtin_unreachable(). The BUG() macro has an unreachable() after 3362 * the UD2, which causes GCC's undefined trap logic to emit another UD2 3363 * (or occasionally a JMP to UD2). 3364 * 3365 * It may also insert a UD2 after calling a __noreturn function. 3366 */ 3367 prev_insn = list_prev_entry(insn, list); 3368 if ((prev_insn->dead_end || dead_end_function(file, prev_insn->call_dest)) && 3369 (insn->type == INSN_BUG || 3370 (insn->type == INSN_JUMP_UNCONDITIONAL && 3371 insn->jump_dest && insn->jump_dest->type == INSN_BUG))) 3372 return true; 3373 3374 /* 3375 * Check if this (or a subsequent) instruction is related to 3376 * CONFIG_UBSAN or CONFIG_KASAN. 3377 * 3378 * End the search at 5 instructions to avoid going into the weeds. 3379 */ 3380 for (i = 0; i < 5; i++) { 3381 3382 if (is_kasan_insn(insn) || is_ubsan_insn(insn)) 3383 return true; 3384 3385 if (insn->type == INSN_JUMP_UNCONDITIONAL) { 3386 if (insn->jump_dest && 3387 insn->jump_dest->func == insn->func) { 3388 insn = insn->jump_dest; 3389 continue; 3390 } 3391 3392 break; 3393 } 3394 3395 if (insn->offset + insn->len >= insn->func->offset + insn->func->len) 3396 break; 3397 3398 insn = list_next_entry(insn, list); 3399 } 3400 3401 return false; 3402} 3403 3404static int validate_symbol(struct objtool_file *file, struct section *sec, 3405 struct symbol *sym, struct insn_state *state) 3406{ 3407 struct instruction *insn; 3408 int ret; 3409 3410 if (!sym->len) { 3411 WARN("%s() is missing an ELF size annotation", sym->name); 3412 return 1; 3413 } 3414 3415 if (sym->pfunc != sym || sym->alias != sym) 3416 return 0; 3417 3418 insn = find_insn(file, sec, sym->offset); 3419 if (!insn || insn->ignore || insn->visited) 3420 return 0; 3421 3422 state->uaccess = sym->uaccess_safe; 3423 3424 ret = validate_branch(file, insn->func, insn, *state); 3425 if (ret && backtrace) 3426 BT_FUNC("<=== (sym)", insn); 3427 return ret; 3428} 3429 3430static int validate_section(struct objtool_file *file, struct section *sec) 3431{ 3432 struct insn_state state; 3433 struct symbol *func; 3434 int warnings = 0; 3435 3436 list_for_each_entry(func, &sec->symbol_list, list) { 3437 if (func->type != STT_FUNC) 3438 continue; 3439 3440 init_insn_state(&state, sec); 3441 set_func_state(&state.cfi); 3442 3443 warnings += validate_symbol(file, sec, func, &state); 3444 } 3445 3446 return warnings; 3447} 3448 3449static int validate_vmlinux_functions(struct objtool_file *file) 3450{ 3451 struct section *sec; 3452 int warnings = 0; 3453 3454 sec = find_section_by_name(file->elf, ".noinstr.text"); 3455 if (sec) { 3456 warnings += validate_section(file, sec); 3457 warnings += validate_unwind_hints(file, sec); 3458 } 3459 3460 sec = find_section_by_name(file->elf, ".entry.text"); 3461 if (sec) { 3462 warnings += validate_section(file, sec); 3463 warnings += validate_unwind_hints(file, sec); 3464 } 3465 3466 return warnings; 3467} 3468 3469static int validate_functions(struct objtool_file *file) 3470{ 3471 struct section *sec; 3472 int warnings = 0; 3473 3474 for_each_sec(file, sec) { 3475 if (!(sec->sh.sh_flags & SHF_EXECINSTR)) 3476 continue; 3477 3478 warnings += validate_section(file, sec); 3479 } 3480 3481 return warnings; 3482} 3483 3484static int validate_reachable_instructions(struct objtool_file *file) 3485{ 3486 struct instruction *insn; 3487 3488 if (file->ignore_unreachables) 3489 return 0; 3490 3491 for_each_insn(file, insn) { 3492 if (insn->visited || ignore_unreachable_insn(file, insn)) 3493 continue; 3494 3495 WARN_FUNC("unreachable instruction", insn->sec, insn->offset); 3496 return 1; 3497 } 3498 3499 return 0; 3500} 3501 3502int check(struct objtool_file *file) 3503{ 3504 int ret, warnings = 0; 3505 3506 arch_initial_func_cfi_state(&initial_func_cfi); 3507 init_cfi_state(&init_cfi); 3508 init_cfi_state(&func_cfi); 3509 set_func_state(&func_cfi); 3510 3511 if (!cfi_hash_alloc()) 3512 goto out; 3513 3514 cfi_hash_add(&init_cfi); 3515 cfi_hash_add(&func_cfi); 3516 3517 ret = decode_sections(file); 3518 if (ret < 0) 3519 goto out; 3520 3521 warnings += ret; 3522 3523 if (list_empty(&file->insn_list)) 3524 goto out; 3525 3526 if (vmlinux && !validate_dup) { 3527 ret = validate_vmlinux_functions(file); 3528 if (ret < 0) 3529 goto out; 3530 3531 warnings += ret; 3532 goto out; 3533 } 3534 3535 if (retpoline) { 3536 ret = validate_retpoline(file); 3537 if (ret < 0) 3538 return ret; 3539 warnings += ret; 3540 } 3541 3542 ret = validate_functions(file); 3543 if (ret < 0) 3544 goto out; 3545 warnings += ret; 3546 3547 ret = validate_unwind_hints(file, NULL); 3548 if (ret < 0) 3549 goto out; 3550 warnings += ret; 3551 3552 if (unret) { 3553 /* 3554 * Must be after validate_branch() and friends, it plays 3555 * further games with insn->visited. 3556 */ 3557 ret = validate_unret(file); 3558 if (ret < 0) 3559 return ret; 3560 warnings += ret; 3561 } 3562 3563 if (!warnings) { 3564 ret = validate_reachable_instructions(file); 3565 if (ret < 0) 3566 goto out; 3567 warnings += ret; 3568 } 3569 3570 ret = create_static_call_sections(file); 3571 if (ret < 0) 3572 goto out; 3573 warnings += ret; 3574 3575 if (retpoline) { 3576 ret = create_retpoline_sites_sections(file); 3577 if (ret < 0) 3578 goto out; 3579 warnings += ret; 3580 } 3581 3582 if (rethunk) { 3583 ret = create_return_sites_sections(file); 3584 if (ret < 0) 3585 goto out; 3586 warnings += ret; 3587 } 3588 3589 if (stats) { 3590 printf("nr_insns_visited: %ld\n", nr_insns_visited); 3591 printf("nr_cfi: %ld\n", nr_cfi); 3592 printf("nr_cfi_reused: %ld\n", nr_cfi_reused); 3593 printf("nr_cfi_cache: %ld\n", nr_cfi_cache); 3594 } 3595 3596out: 3597 /* 3598 * For now, don't fail the kernel build on fatal warnings. These 3599 * errors are still fairly common due to the growing matrix of 3600 * supported toolchains and their recent pace of change. 3601 */ 3602 return 0; 3603} 3604