1// SPDX-License-Identifier: GPL-2.0-only 2/* 3 * linux/init/main.c 4 * 5 * Copyright (C) 1991, 1992 Linus Torvalds 6 * 7 * GK 2/5/95 - Changed to support mounting root fs via NFS 8 * Added initrd & change_root: Werner Almesberger & Hans Lermen, Feb '96 9 * Moan early if gcc is old, avoiding bogus kernels - Paul Gortmaker, May '96 10 * Simplified starting of init: Michael A. Griffith <grif@acm.org> 11 */ 12 13#define DEBUG /* Enable initcall_debug */ 14 15#include <linux/types.h> 16#include <linux/extable.h> 17#include <linux/module.h> 18#include <linux/proc_fs.h> 19#include <linux/binfmts.h> 20#include <linux/kernel.h> 21#include <linux/syscalls.h> 22#include <linux/stackprotector.h> 23#include <linux/string.h> 24#include <linux/ctype.h> 25#include <linux/delay.h> 26#include <linux/ioport.h> 27#include <linux/init.h> 28#include <linux/initrd.h> 29#include <linux/memblock.h> 30#include <linux/acpi.h> 31#include <linux/bootconfig.h> 32#include <linux/console.h> 33#include <linux/nmi.h> 34#include <linux/percpu.h> 35#include <linux/kmod.h> 36#include <linux/kprobes.h> 37#include <linux/vmalloc.h> 38#include <linux/kernel_stat.h> 39#include <linux/start_kernel.h> 40#include <linux/security.h> 41#include <linux/smp.h> 42#include <linux/profile.h> 43#include <linux/rcupdate.h> 44#include <linux/moduleparam.h> 45#include <linux/kallsyms.h> 46#include <linux/writeback.h> 47#include <linux/cpu.h> 48#include <linux/cpuset.h> 49#include <linux/cgroup.h> 50#include <linux/efi.h> 51#include <linux/tick.h> 52#include <linux/sched/isolation.h> 53#include <linux/interrupt.h> 54#include <linux/taskstats_kern.h> 55#include <linux/delayacct.h> 56#include <linux/unistd.h> 57#include <linux/utsname.h> 58#include <linux/rmap.h> 59#include <linux/mempolicy.h> 60#include <linux/key.h> 61#include <linux/buffer_head.h> 62#include <linux/page_ext.h> 63#include <linux/debug_locks.h> 64#include <linux/debugobjects.h> 65#include <linux/lockdep.h> 66#include <linux/kmemleak.h> 67#include <linux/padata.h> 68#include <linux/pid_namespace.h> 69#include <linux/device/driver.h> 70#include <linux/kthread.h> 71#include <linux/sched.h> 72#include <linux/sched/init.h> 73#include <linux/signal.h> 74#include <linux/idr.h> 75#include <linux/kgdb.h> 76#include <linux/ftrace.h> 77#include <linux/async.h> 78#include <linux/sfi.h> 79#include <linux/shmem_fs.h> 80#include <linux/slab.h> 81#include <linux/perf_event.h> 82#include <linux/ptrace.h> 83#include <linux/pti.h> 84#include <linux/blkdev.h> 85#include <linux/elevator.h> 86#include <linux/sched/clock.h> 87#include <linux/sched/task.h> 88#include <linux/sched/task_stack.h> 89#include <linux/context_tracking.h> 90#include <linux/random.h> 91#include <linux/list.h> 92#include <linux/integrity.h> 93#include <linux/proc_ns.h> 94#include <linux/io.h> 95#include <linux/cache.h> 96#include <linux/rodata_test.h> 97#include <linux/jump_label.h> 98#include <linux/mem_encrypt.h> 99#include <linux/kcsan.h> 100#include <linux/init_syscalls.h> 101#ifdef CONFIG_RECLAIM_ACCT 102#include <linux/reclaim_acct.h> 103#endif 104 105#include <asm/io.h> 106#include <asm/bugs.h> 107#include <asm/setup.h> 108#include <asm/sections.h> 109#include <asm/cacheflush.h> 110 111#define CREATE_TRACE_POINTS 112#include <trace/events/initcall.h> 113 114#include <kunit/test.h> 115 116static int kernel_init(void *); 117 118extern void init_IRQ(void); 119extern void radix_tree_init(void); 120 121/* 122 * Debug helper: via this flag we know that we are in 'early bootup code' 123 * where only the boot processor is running with IRQ disabled. This means 124 * two things - IRQ must not be enabled before the flag is cleared and some 125 * operations which are not allowed with IRQ disabled are allowed while the 126 * flag is set. 127 */ 128bool early_boot_irqs_disabled __read_mostly; 129 130enum system_states system_state __read_mostly; 131EXPORT_SYMBOL(system_state); 132 133/* 134 * Boot command-line arguments 135 */ 136#define MAX_INIT_ARGS CONFIG_INIT_ENV_ARG_LIMIT 137#define MAX_INIT_ENVS CONFIG_INIT_ENV_ARG_LIMIT 138 139extern void time_init(void); 140/* Default late time init is NULL. archs can override this later. */ 141void (*__initdata late_time_init)(void); 142 143/* Untouched command line saved by arch-specific code. */ 144char __initdata boot_command_line[COMMAND_LINE_SIZE]; 145/* Untouched saved command line (eg. for /proc) */ 146char *saved_command_line; 147/* Command line for parameter parsing */ 148static char *static_command_line; 149/* Untouched extra command line */ 150static char *extra_command_line; 151/* Extra init arguments */ 152static char *extra_init_args; 153 154#ifdef CONFIG_BOOT_CONFIG 155/* Is bootconfig on command line? */ 156static bool bootconfig_found; 157static bool initargs_found; 158#else 159#define bootconfig_found false 160#define initargs_found false 161#endif 162 163static char *execute_command; 164static char *ramdisk_execute_command = "/init"; 165 166/* 167 * Used to generate warnings if static_key manipulation functions are used 168 * before jump_label_init is called. 169 */ 170bool static_key_initialized __read_mostly; 171EXPORT_SYMBOL_GPL(static_key_initialized); 172 173/* 174 * If set, this is an indication to the drivers that reset the underlying 175 * device before going ahead with the initialization otherwise driver might 176 * rely on the BIOS and skip the reset operation. 177 * 178 * This is useful if kernel is booting in an unreliable environment. 179 * For ex. kdump situation where previous kernel has crashed, BIOS has been 180 * skipped and devices will be in unknown state. 181 */ 182unsigned int reset_devices; 183EXPORT_SYMBOL(reset_devices); 184 185static int __init set_reset_devices(char *str) 186{ 187 reset_devices = 1; 188 return 1; 189} 190 191__setup("reset_devices", set_reset_devices); 192 193static const char *argv_init[MAX_INIT_ARGS + 2] = { 194 "init", 195 NULL, 196}; 197const char *envp_init[MAX_INIT_ENVS + 2] = { 198 "HOME=/", 199 "TERM=linux", 200 NULL, 201}; 202static const char *panic_later, *panic_param; 203 204extern const struct obs_kernel_param __setup_start[], __setup_end[]; 205 206static bool __init obsolete_checksetup(char *line) 207{ 208 const struct obs_kernel_param *p; 209 bool had_early_param = false; 210 211 p = __setup_start; 212 do { 213 int n = strlen(p->str); 214 if (parameqn(line, p->str, n)) { 215 if (p->early) { 216 /* Already done in parse_early_param? 217 * (Needs exact match on param part). 218 * Keep iterating, as we can have early 219 * params and __setups of same names 8( */ 220 if (line[n] == '\0' || line[n] == '=') { 221 had_early_param = true; 222 } 223 } else if (!p->setup_func) { 224 pr_warn("Parameter %s is obsolete, ignored\n", p->str); 225 return true; 226 } else if (p->setup_func(line + n)) { 227 return true; 228 } 229 } 230 p++; 231 } while (p < __setup_end); 232 233 return had_early_param; 234} 235 236/* 237 * This should be approx 2 Bo*oMips to start (note initial shift), and will 238 * still work even if initially too large, it will just take slightly longer 239 */ 240unsigned long loops_per_jiffy = (1 << 12); 241EXPORT_SYMBOL(loops_per_jiffy); 242 243static int __init debug_kernel(char *str) 244{ 245 console_loglevel = CONSOLE_LOGLEVEL_DEBUG; 246 return 0; 247} 248 249static int __init quiet_kernel(char *str) 250{ 251 console_loglevel = CONSOLE_LOGLEVEL_QUIET; 252 return 0; 253} 254 255early_param("debug", debug_kernel); 256early_param("quiet", quiet_kernel); 257 258static int __init loglevel(char *str) 259{ 260 int newlevel; 261 262 /* 263 * Only update loglevel value when a correct setting was passed, 264 * to prevent blind crashes (when loglevel being set to 0) that 265 * are quite hard to debug 266 */ 267 if (get_option(&str, &newlevel)) { 268 console_loglevel = newlevel; 269 return 0; 270 } 271 272 return -EINVAL; 273} 274 275early_param("loglevel", loglevel); 276 277#ifdef CONFIG_BLK_DEV_INITRD 278static void *__init get_boot_config_from_initrd(u32 *_size, u32 *_csum) 279{ 280 u32 size, csum; 281 char *data; 282 u32 *hdr; 283 int i; 284 285 if (!initrd_end) { 286 return NULL; 287 } 288 289 data = (char *)initrd_end - BOOTCONFIG_MAGIC_LEN; 290 /* 291 * Since Grub may align the size of initrd to 4, we must 292 * check the preceding 3 bytes as well. 293 */ 294 for (i = 0; i < 4; i++) { 295 if (!memcmp(data, BOOTCONFIG_MAGIC, BOOTCONFIG_MAGIC_LEN)) { 296 goto found; 297 } 298 data--; 299 } 300 return NULL; 301 302found: 303 hdr = (u32 *)(data - 8); 304 size = le32_to_cpu(hdr[0]); 305 csum = le32_to_cpu(hdr[1]); 306 307 data = ((void *)hdr) - size; 308 if ((unsigned long)data < initrd_start) { 309 pr_err("bootconfig size %d is greater than initrd size %ld\n", size, initrd_end - initrd_start); 310 return NULL; 311 } 312 313 /* Remove bootconfig from initramfs/initrd */ 314 initrd_end = (unsigned long)data; 315 if (_size) { 316 *_size = size; 317 } 318 if (_csum) { 319 *_csum = csum; 320 } 321 322 return data; 323} 324#else 325static void *__init get_boot_config_from_initrd(u32 *_size, u32 *_csum) 326{ 327 return NULL; 328} 329#endif 330 331#ifdef CONFIG_BOOT_CONFIG 332 333static char xbc_namebuf[XBC_KEYLEN_MAX] __initdata; 334 335#define rest(dst, end) ((end) > (dst) ? (end) - (dst) : 0) 336 337static int __init xbc_snprint_cmdline(char *buf, size_t size, struct xbc_node *root) 338{ 339 struct xbc_node *knode, *vnode; 340 char *end = buf + size; 341 const char *val; 342 int ret; 343 344 xbc_node_for_each_key_value(root, knode, val) 345 { 346 ret = xbc_node_compose_key_after(root, knode, xbc_namebuf, XBC_KEYLEN_MAX); 347 if (ret < 0) { 348 return ret; 349 } 350 351 vnode = xbc_node_get_child(knode); 352 if (!vnode) { 353 ret = snprintf(buf, rest(buf, end), "%s ", xbc_namebuf); 354 if (ret < 0) { 355 return ret; 356 } 357 buf += ret; 358 continue; 359 } 360 xbc_array_for_each_value(vnode, val) 361 { 362 ret = snprintf(buf, rest(buf, end), "%s=\"%s\" ", xbc_namebuf, val); 363 if (ret < 0) { 364 return ret; 365 } 366 buf += ret; 367 } 368 } 369 370 return buf - (end - size); 371} 372#undef rest 373 374/* Make an extra command line under given key word */ 375static char *__init xbc_make_cmdline(const char *key) 376{ 377 struct xbc_node *root; 378 char *new_cmdline; 379 int ret, len = 0; 380 381 root = xbc_find_node(key); 382 if (!root) { 383 return NULL; 384 } 385 386 /* Count required buffer size */ 387 len = xbc_snprint_cmdline(NULL, 0, root); 388 if (len <= 0) { 389 return NULL; 390 } 391 392 new_cmdline = memblock_alloc(len + 1, SMP_CACHE_BYTES); 393 if (!new_cmdline) { 394 pr_err("Failed to allocate memory for extra kernel cmdline.\n"); 395 return NULL; 396 } 397 398 ret = xbc_snprint_cmdline(new_cmdline, len + 1, root); 399 if (ret < 0 || ret > len) { 400 pr_err("Failed to print extra kernel cmdline.\n"); 401 memblock_free(__pa(new_cmdline), len + 1); 402 return NULL; 403 } 404 405 return new_cmdline; 406} 407 408static u32 boot_config_checksum(unsigned char *p, u32 size) 409{ 410 u32 ret = 0; 411 412 while (size--) { 413 ret += *p++; 414 } 415 416 return ret; 417} 418 419static int __init bootconfig_params(char *param, char *val, const char *unused, void *arg) 420{ 421 if (strcmp(param, "bootconfig") == 0) { 422 bootconfig_found = true; 423 } 424 return 0; 425} 426 427static void __init setup_boot_config(const char *cmdline) 428{ 429 static char tmp_cmdline[COMMAND_LINE_SIZE] __initdata; 430 const char *msg; 431 int pos; 432 u32 size, csum; 433 char *data, *copy, *err; 434 int ret; 435 436 /* Cut out the bootconfig data even if we have no bootconfig option */ 437 data = get_boot_config_from_initrd(&size, &csum); 438 439 strlcpy(tmp_cmdline, boot_command_line, COMMAND_LINE_SIZE); 440 err = parse_args("bootconfig", tmp_cmdline, NULL, 0, 0, 0, NULL, bootconfig_params); 441 442 if (IS_ERR(err) || !bootconfig_found) { 443 return; 444 } 445 446 /* parse_args() stops at '--' and returns an address */ 447 if (err) { 448 initargs_found = true; 449 } 450 451 if (!data) { 452 pr_err("'bootconfig' found on command line, but no bootconfig found\n"); 453 return; 454 } 455 456 if (size >= XBC_DATA_MAX) { 457 pr_err("bootconfig size %d greater than max size %d\n", size, XBC_DATA_MAX); 458 return; 459 } 460 461 if (boot_config_checksum((unsigned char *)data, size) != csum) { 462 pr_err("bootconfig checksum failed\n"); 463 return; 464 } 465 466 copy = memblock_alloc(size + 1, SMP_CACHE_BYTES); 467 if (!copy) { 468 pr_err("Failed to allocate memory for bootconfig\n"); 469 return; 470 } 471 472 memcpy(copy, data, size); 473 copy[size] = '\0'; 474 475 ret = xbc_init(copy, &msg, &pos); 476 if (ret < 0) { 477 if (pos < 0) { 478 pr_err("Failed to init bootconfig: %s.\n", msg); 479 } else { 480 pr_err("Failed to parse bootconfig: %s at %d.\n", msg, pos); 481 } 482 } else { 483 pr_info("Load bootconfig: %d bytes %d nodes\n", size, ret); 484 /* keys starting with "kernel." are passed via cmdline */ 485 extra_command_line = xbc_make_cmdline("kernel"); 486 /* Also, "init." keys are init arguments */ 487 extra_init_args = xbc_make_cmdline("init"); 488 } 489 return; 490} 491 492#else 493 494static void __init setup_boot_config(const char *cmdline) 495{ 496 /* Remove bootconfig data from initrd */ 497 get_boot_config_from_initrd(NULL, NULL); 498} 499 500static int __init warn_bootconfig(char *str) 501{ 502 pr_warn("WARNING: 'bootconfig' found on the kernel command line but CONFIG_BOOT_CONFIG is not set.\n"); 503 return 0; 504} 505early_param("bootconfig", warn_bootconfig); 506 507#endif 508 509/* Change NUL term back to "=", to make "param" the whole string. */ 510static void __init repair_env_string(char *param, char *val) 511{ 512 if (val) { 513 /* param=val or param="val"? */ 514 if (val == param + strlen(param) + 1) { 515 val[-1] = '='; 516 } else if (val == param + strlen(param) + 2) { 517 val[-2] = '='; 518 memmove(val - 1, val, strlen(val) + 1); 519 } else { 520 BUG(); 521 } 522 } 523} 524 525/* Anything after -- gets handed straight to init. */ 526static int __init set_init_arg(char *param, char *val, const char *unused, void *arg) 527{ 528 unsigned int i; 529 530 if (panic_later) { 531 return 0; 532 } 533 534 repair_env_string(param, val); 535 536 for (i = 0; argv_init[i]; i++) { 537 if (i == MAX_INIT_ARGS) { 538 panic_later = "init"; 539 panic_param = param; 540 return 0; 541 } 542 } 543 argv_init[i] = param; 544 return 0; 545} 546 547/* 548 * Unknown boot options get handed to init, unless they look like 549 * unused parameters (modprobe will find them in /proc/cmdline). 550 */ 551static int __init unknown_bootoption(char *param, char *val, const char *unused, void *arg) 552{ 553 size_t len = strlen(param); 554 555 repair_env_string(param, val); 556 557 /* Handle obsolete-style parameters */ 558 if (obsolete_checksetup(param)) { 559 return 0; 560 } 561 562 /* Unused module parameter. */ 563 if (strnchr(param, len, '.')) { 564 return 0; 565 } 566 567 if (panic_later) { 568 return 0; 569 } 570 571 if (val) { 572 /* Environment option */ 573 unsigned int i; 574 for (i = 0; envp_init[i]; i++) { 575 if (i == MAX_INIT_ENVS) { 576 panic_later = "env"; 577 panic_param = param; 578 } 579 if (!strncmp(param, envp_init[i], len + 1)) { 580 break; 581 } 582 } 583 envp_init[i] = param; 584 } else { 585 /* Command line option */ 586 unsigned int i; 587 for (i = 0; argv_init[i]; i++) { 588 if (i == MAX_INIT_ARGS) { 589 panic_later = "init"; 590 panic_param = param; 591 } 592 } 593 argv_init[i] = param; 594 } 595 return 0; 596} 597 598static int __init init_setup(char *str) 599{ 600 unsigned int i; 601 602 execute_command = str; 603 /* 604 * In case LILO is going to boot us with default command line, 605 * it prepends "auto" before the whole cmdline which makes 606 * the shell think it should execute a script with such name. 607 * So we ignore all arguments entered _before_ init=... [MJ] 608 */ 609 for (i = 1; i < MAX_INIT_ARGS; i++) { 610 argv_init[i] = NULL; 611 } 612 return 1; 613} 614__setup("init=", init_setup); 615 616static int __init rdinit_setup(char *str) 617{ 618 unsigned int i; 619 620 ramdisk_execute_command = str; 621 /* See "auto" comment in init_setup */ 622 for (i = 1; i < MAX_INIT_ARGS; i++) { 623 argv_init[i] = NULL; 624 } 625 return 1; 626} 627__setup("rdinit=", rdinit_setup); 628 629#ifndef CONFIG_SMP 630static const unsigned int setup_max_cpus = NR_CPUS; 631static inline void setup_nr_cpu_ids(void) 632{ 633} 634static inline void smp_prepare_cpus(unsigned int maxcpus) 635{ 636} 637#endif 638 639/* 640 * We need to store the untouched command line for future reference. 641 * We also need to store the touched command line since the parameter 642 * parsing is performed in place, and we should allow a component to 643 * store reference of name/value for future reference. 644 */ 645static void __init setup_command_line(char *command_line) 646{ 647 size_t len, xlen = 0, ilen = 0; 648 649 if (extra_command_line) { 650 xlen = strlen(extra_command_line); 651 } 652 if (extra_init_args) { 653 ilen = strlen(extra_init_args) + 4; /* for " -- " */ 654 } 655 656 len = xlen + strlen(boot_command_line) + 1; 657 658 saved_command_line = memblock_alloc(len + ilen, SMP_CACHE_BYTES); 659 if (!saved_command_line) { 660 panic("%s: Failed to allocate %zu bytes\n", __func__, len + ilen); 661 } 662 663 static_command_line = memblock_alloc(len, SMP_CACHE_BYTES); 664 if (!static_command_line) { 665 panic("%s: Failed to allocate %zu bytes\n", __func__, len); 666 } 667 668 if (xlen) { 669 /* 670 * We have to put extra_command_line before boot command 671 * lines because there could be dashes (separator of init 672 * command line) in the command lines. 673 */ 674 strcpy(saved_command_line, extra_command_line); 675 strcpy(static_command_line, extra_command_line); 676 } 677 strcpy(saved_command_line + xlen, boot_command_line); 678 strcpy(static_command_line + xlen, command_line); 679 680 if (ilen) { 681 /* 682 * Append supplemental init boot args to saved_command_line 683 * so that user can check what command line options passed 684 * to init. 685 */ 686 len = strlen(saved_command_line); 687 if (initargs_found) { 688 saved_command_line[len++] = ' '; 689 } else { 690 strcpy(saved_command_line + len, " -- "); 691 len += 4; 692 } 693 694 strcpy(saved_command_line + len, extra_init_args); 695 } 696} 697 698/* 699 * We need to finalize in a non-__init function or else race conditions 700 * between the root thread and the init thread may cause start_kernel to 701 * be reaped by free_initmem before the root thread has proceeded to 702 * cpu_idle. 703 * 704 * gcc-3.4 accidentally inlines this function, so use noinline. 705 */ 706 707static __initdata DECLARE_COMPLETION(kthreadd_done); 708 709noinline void __ref rest_init(void) 710{ 711 struct task_struct *tsk; 712 int pid; 713 714 rcu_scheduler_starting(); 715 /* 716 * We need to spawn init first so that it obtains pid 1, however 717 * the init task will end up wanting to create kthreads, which, if 718 * we schedule it before we create kthreadd, will OOPS. 719 */ 720 pid = kernel_thread(kernel_init, NULL, CLONE_FS); 721 /* 722 * Pin init on the boot CPU. Task migration is not properly working 723 * until sched_init_smp() has been run. It will set the allowed 724 * CPUs for init to the non isolated CPUs. 725 */ 726 rcu_read_lock(); 727 tsk = find_task_by_pid_ns(pid, &init_pid_ns); 728 set_cpus_allowed_ptr(tsk, cpumask_of(smp_processor_id())); 729 rcu_read_unlock(); 730 731 numa_default_policy(); 732 pid = kernel_thread(kthreadd, NULL, CLONE_FS | CLONE_FILES); 733 rcu_read_lock(); 734 kthreadd_task = find_task_by_pid_ns(pid, &init_pid_ns); 735 rcu_read_unlock(); 736 737 /* 738 * Enable might_sleep() and smp_processor_id() checks. 739 * They cannot be enabled earlier because with CONFIG_PREEMPTION=y 740 * kernel_thread() would trigger might_sleep() splats. With 741 * CONFIG_PREEMPT_VOLUNTARY=y the init task might have scheduled 742 * already, but it's stuck on the kthreadd_done completion. 743 */ 744 system_state = SYSTEM_SCHEDULING; 745 746 complete(&kthreadd_done); 747 748 /* 749 * The boot idle thread must execute schedule() 750 * at least once to get things moving: 751 */ 752 schedule_preempt_disabled(); 753 /* Call into cpu_idle with preempt disabled */ 754 cpu_startup_entry(CPUHP_ONLINE); 755} 756 757/* Check for early params. */ 758static int __init do_early_param(char *param, char *val, const char *unused, void *arg) 759{ 760 const struct obs_kernel_param *p; 761 762 for (p = __setup_start; p < __setup_end; p++) { 763 if ((p->early && parameq(param, p->str)) || 764 (strcmp(param, "console") == 0 && strcmp(p->str, "earlycon") == 0)) { 765 if (p->setup_func(val) != 0) { 766 pr_warn("Malformed early option '%s'\n", param); 767 } 768 } 769 } 770 /* We accept everything at this stage. */ 771 return 0; 772} 773 774void __init parse_early_options(char *cmdline) 775{ 776 parse_args("early options", cmdline, NULL, 0, 0, 0, NULL, do_early_param); 777} 778 779/* Arch code calls this early on, or if not, just before other parsing. */ 780void __init parse_early_param(void) 781{ 782 static int done __initdata; 783 static char tmp_cmdline[COMMAND_LINE_SIZE] __initdata; 784 785 if (done) { 786 return; 787 } 788 789 /* All fall through to do_early_param. */ 790 strlcpy(tmp_cmdline, boot_command_line, COMMAND_LINE_SIZE); 791 parse_early_options(tmp_cmdline); 792 done = 1; 793} 794 795void __init __weak arch_post_acpi_subsys_init(void) 796{ 797} 798 799void __init __weak smp_setup_processor_id(void) 800{ 801} 802 803#if THREAD_SIZE >= PAGE_SIZE 804void __init __weak thread_stack_cache_init(void) 805{ 806} 807#endif 808 809void __init __weak mem_encrypt_init(void) 810{ 811} 812 813void __init __weak poking_init(void) 814{ 815} 816 817void __init __weak pgtable_cache_init(void) 818{ 819} 820 821bool initcall_debug; 822core_param(initcall_debug, initcall_debug, bool, 0644); 823 824#ifdef TRACEPOINTS_ENABLED 825static void __init initcall_debug_enable(void); 826#else 827static inline void initcall_debug_enable(void) 828{ 829} 830#endif 831 832/* Report memory auto-initialization states for this boot. */ 833static void __init report_meminit(void) 834{ 835 const char *stack; 836 837 if (IS_ENABLED(CONFIG_INIT_STACK_ALL_PATTERN)) { 838 stack = "all(pattern)"; 839 } else if (IS_ENABLED(CONFIG_INIT_STACK_ALL_ZERO)) { 840 stack = "all(zero)"; 841 } else if (IS_ENABLED(CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF_ALL)) { 842 stack = "byref_all(zero)"; 843 } else if (IS_ENABLED(CONFIG_GCC_PLUGIN_STRUCTLEAK_BYREF)) { 844 stack = "byref(zero)"; 845 } else if (IS_ENABLED(CONFIG_GCC_PLUGIN_STRUCTLEAK_USER)) { 846 stack = "__user(zero)"; 847 } else { 848 stack = "off"; 849 } 850 851 pr_info("mem auto-init: stack:%s, heap alloc:%s, heap free:%s\n", stack, 852 want_init_on_alloc(GFP_KERNEL) ? "on" : "off", want_init_on_free() ? "on" : "off"); 853 if (want_init_on_free()) { 854 pr_info("mem auto-init: clearing system memory may take some time...\n"); 855 } 856} 857 858/* 859 * Set up kernel memory allocators 860 */ 861static void __init mm_init(void) 862{ 863 /* 864 * page_ext requires contiguous pages, 865 * bigger than MAX_ORDER unless SPARSEMEM. 866 */ 867 page_ext_init_flatmem(); 868 init_debug_pagealloc(); 869 report_meminit(); 870 mem_init(); 871 /* page_owner must be initialized after buddy is ready */ 872 page_ext_init_flatmem_late(); 873 kmem_cache_init(); 874 kmemleak_init(); 875 pgtable_init(); 876 debug_objects_mem_init(); 877 vmalloc_init(); 878 ioremap_huge_init(); 879 /* Should be run before the first non-init thread is created */ 880 init_espfix_bsp(); 881 /* Should be run after espfix64 is set up. */ 882 pti_init(); 883} 884 885void __init __weak arch_call_rest_init(void) 886{ 887 rest_init(); 888} 889 890asmlinkage __visible void __init __no_sanitize_address start_kernel(void) 891{ 892 char *command_line; 893 char *after_dashes; 894 895 set_task_stack_end_magic(&init_task); 896 smp_setup_processor_id(); 897 debug_objects_early_init(); 898 899 cgroup_init_early(); 900 901 local_irq_disable(); 902 early_boot_irqs_disabled = true; 903 904 /* 905 * Interrupts are still disabled. Do necessary setups, then 906 * enable them. 907 */ 908 boot_cpu_init(); 909 page_address_init(); 910 pr_notice("%s", linux_banner); 911 early_security_init(); 912 setup_arch(&command_line); 913 setup_boot_config(command_line); 914 setup_command_line(command_line); 915 setup_nr_cpu_ids(); 916 setup_per_cpu_areas(); 917 smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */ 918 boot_cpu_hotplug_init(); 919 920 build_all_zonelists(NULL); 921 page_alloc_init(); 922 923 pr_notice("Kernel command line: %s\n", saved_command_line); 924 /* parameters may set static keys */ 925 jump_label_init(); 926 parse_early_param(); 927 after_dashes = parse_args("Booting kernel", static_command_line, __start___param, __stop___param - __start___param, 928 -1, -1, NULL, &unknown_bootoption); 929 if (!IS_ERR_OR_NULL(after_dashes)) { 930 parse_args("Setting init args", after_dashes, NULL, 0, -1, -1, NULL, set_init_arg); 931 } 932 if (extra_init_args) { 933 parse_args("Setting extra init args", extra_init_args, NULL, 0, -1, -1, NULL, set_init_arg); 934 } 935 936 /* 937 * These use large bootmem allocations and must precede 938 * kmem_cache_init() 939 */ 940 setup_log_buf(0); 941 vfs_caches_init_early(); 942 sort_main_extable(); 943 trap_init(); 944 mm_init(); 945 946 ftrace_init(); 947 948 /* trace_printk can be enabled here */ 949 early_trace_init(); 950 951 /* 952 * Set up the scheduler prior starting any interrupts (such as the 953 * timer interrupt). Full topology setup happens at smp_init() 954 * time - but meanwhile we still have a functioning scheduler. 955 */ 956 sched_init(); 957 958 if (WARN(!irqs_disabled(), "Interrupts were enabled *very* early, fixing it\n")) { 959 local_irq_disable(); 960 } 961 radix_tree_init(); 962 963 /* 964 * Set up housekeeping before setting up workqueues to allow the unbound 965 * workqueue to take non-housekeeping into account. 966 */ 967 housekeeping_init(); 968 969 /* 970 * Allow workqueue creation and work item queueing/cancelling 971 * early. Work item execution depends on kthreads and starts after 972 * workqueue_init(). 973 */ 974 workqueue_init_early(); 975 976 rcu_init(); 977 978 /* Trace events are available after this */ 979 trace_init(); 980 981 if (initcall_debug) { 982 initcall_debug_enable(); 983 } 984 985 context_tracking_init(); 986 /* init some links before init_ISA_irqs() */ 987 early_irq_init(); 988 init_IRQ(); 989 tick_init(); 990 rcu_init_nohz(); 991 init_timers(); 992 hrtimers_init(); 993 softirq_init(); 994 timekeeping_init(); 995 time_init(); 996 997 /* 998 * For best initial stack canary entropy, prepare it after: 999 * - setup_arch() for any UEFI RNG entropy and boot cmdline access 1000 * - timekeeping_init() for ktime entropy used in rand_initialize() 1001 * - rand_initialize() to get any arch-specific entropy like RDRAND 1002 * - add_latent_entropy() to get any latent entropy 1003 * - adding command line entropy 1004 */ 1005 random_init(command_line); 1006 boot_init_stack_canary(); 1007 1008 perf_event_init(); 1009 profile_init(); 1010 call_function_init(); 1011 WARN(!irqs_disabled(), "Interrupts were enabled early\n"); 1012 1013 early_boot_irqs_disabled = false; 1014 local_irq_enable(); 1015 1016 kmem_cache_init_late(); 1017 1018 /* 1019 * HACK ALERT! This is early. We're enabling the console before 1020 * we've done PCI setups etc, and console_init() must be aware of 1021 * this. But we do want output early, in case something goes wrong. 1022 */ 1023 console_init(); 1024 if (panic_later) { 1025 panic("Too many boot %s vars at `%s'", panic_later, panic_param); 1026 } 1027 1028 lockdep_init(); 1029 1030 /* 1031 * Need to run this when irqs are enabled, because it wants 1032 * to self-test [hard/soft]-irqs on/off lock inversion bugs 1033 * too: 1034 */ 1035 locking_selftest(); 1036 1037 /* 1038 * This needs to be called before any devices perform DMA 1039 * operations that might use the SWIOTLB bounce buffers. It will 1040 * mark the bounce buffers as decrypted so that their usage will 1041 * not cause "plain-text" data to be decrypted when accessed. 1042 */ 1043 mem_encrypt_init(); 1044 1045#ifdef CONFIG_BLK_DEV_INITRD 1046 if (initrd_start && !initrd_below_start_ok && page_to_pfn(virt_to_page((void *)initrd_start)) < min_low_pfn) { 1047 pr_crit("initrd overwritten (0x%08lx < 0x%08lx) - disabling it.\n", 1048 page_to_pfn(virt_to_page((void *)initrd_start)), min_low_pfn); 1049 initrd_start = 0; 1050 } 1051#endif 1052 setup_per_cpu_pageset(); 1053 numa_policy_init(); 1054 acpi_early_init(); 1055 if (late_time_init) { 1056 late_time_init(); 1057 } 1058 sched_clock_init(); 1059 calibrate_delay(); 1060 pid_idr_init(); 1061 anon_vma_init(); 1062#ifdef CONFIG_X86 1063 if (efi_enabled(EFI_RUNTIME_SERVICES)) { 1064 efi_enter_virtual_mode(); 1065 } 1066#endif 1067 thread_stack_cache_init(); 1068 cred_init(); 1069 fork_init(); 1070 proc_caches_init(); 1071 uts_ns_init(); 1072 buffer_init(); 1073 key_init(); 1074 security_init(); 1075 dbg_late_init(); 1076 vfs_caches_init(); 1077 pagecache_init(); 1078 signals_init(); 1079 seq_file_init(); 1080 proc_root_init(); 1081 nsfs_init(); 1082 cpuset_init(); 1083 cgroup_init(); 1084 taskstats_init_early(); 1085 delayacct_init(); 1086#ifdef CONFIG_RECLAIM_ACCT 1087 reclaimacct_init(); 1088#endif 1089 1090 poking_init(); 1091 check_bugs(); 1092 1093 acpi_subsystem_init(); 1094 arch_post_acpi_subsys_init(); 1095 sfi_init_late(); 1096 kcsan_init(); 1097 1098 /* Do the rest non-__init'ed, we're now alive */ 1099 arch_call_rest_init(); 1100 1101 prevent_tail_call_optimization(); 1102} 1103 1104/* Call all constructor functions linked into the kernel. */ 1105static void __init do_ctors(void) 1106{ 1107#ifdef CONFIG_CONSTRUCTORS 1108 ctor_fn_t *fn = (ctor_fn_t *)__ctors_start; 1109 1110 for (; fn < (ctor_fn_t *)__ctors_end; fn++) { 1111 (*fn)(); 1112 } 1113#endif 1114} 1115 1116#ifdef CONFIG_KALLSYMS 1117struct blacklist_entry { 1118 struct list_head next; 1119 char *buf; 1120}; 1121 1122static __initdata_or_module LIST_HEAD(blacklisted_initcalls); 1123 1124static int __init initcall_blacklist(char *str) 1125{ 1126 char *str_entry; 1127 struct blacklist_entry *entry; 1128 1129 /* str argument is a comma-separated list of functions */ 1130 do { 1131 str_entry = strsep(&str, ","); 1132 if (str_entry) { 1133 pr_debug("blacklisting initcall %s\n", str_entry); 1134 entry = memblock_alloc(sizeof(*entry), SMP_CACHE_BYTES); 1135 if (!entry) { 1136 panic("%s: Failed to allocate %zu bytes\n", __func__, sizeof(*entry)); 1137 } 1138 entry->buf = memblock_alloc(strlen(str_entry) + 1, SMP_CACHE_BYTES); 1139 if (!entry->buf) { 1140 panic("%s: Failed to allocate %zu bytes\n", __func__, strlen(str_entry) + 1); 1141 } 1142 strcpy(entry->buf, str_entry); 1143 list_add(&entry->next, &blacklisted_initcalls); 1144 } 1145 } while (str_entry); 1146 1147 return 1; 1148} 1149 1150static bool __init_or_module initcall_blacklisted(initcall_t fn) 1151{ 1152 struct blacklist_entry *entry; 1153 char fn_name[KSYM_SYMBOL_LEN]; 1154 unsigned long addr; 1155 1156 if (list_empty(&blacklisted_initcalls)) { 1157 return false; 1158 } 1159 1160 addr = (unsigned long)dereference_function_descriptor(fn); 1161 sprint_symbol_no_offset(fn_name, addr); 1162 1163 /* 1164 * fn will be "function_name [module_name]" where [module_name] is not 1165 * displayed for built-in init functions. Strip off the [module_name]. 1166 */ 1167 strreplace(fn_name, ' ', '\0'); 1168 1169 list_for_each_entry(entry, &blacklisted_initcalls, next) 1170 { 1171 if (!strcmp(fn_name, entry->buf)) { 1172 pr_debug("initcall %s blacklisted\n", fn_name); 1173 return true; 1174 } 1175 } 1176 1177 return false; 1178} 1179#else 1180static int __init initcall_blacklist(char *str) 1181{ 1182 pr_warn("initcall_blacklist requires CONFIG_KALLSYMS\n"); 1183 return 0; 1184} 1185 1186static bool __init_or_module initcall_blacklisted(initcall_t fn) 1187{ 1188 return false; 1189} 1190#endif 1191__setup("initcall_blacklist=", initcall_blacklist); 1192 1193static __init_or_module void trace_initcall_start_cb(void *data, initcall_t fn) 1194{ 1195 ktime_t *calltime = (ktime_t *)data; 1196 1197 printk(KERN_DEBUG "calling %pS @ %i\n", fn, task_pid_nr(current)); 1198 *calltime = ktime_get(); 1199} 1200 1201static __init_or_module void trace_initcall_finish_cb(void *data, initcall_t fn, int ret) 1202{ 1203 ktime_t *calltime = (ktime_t *)data; 1204 ktime_t delta, rettime; 1205 unsigned long long duration; 1206 1207 rettime = ktime_get(); 1208 delta = ktime_sub(rettime, *calltime); 1209 duration = (unsigned long long)ktime_to_ns(delta) >> 10; 1210 printk(KERN_DEBUG "initcall %pS returned %d after %lld usecs\n", fn, ret, duration); 1211} 1212 1213static ktime_t initcall_calltime; 1214 1215#ifdef TRACEPOINTS_ENABLED 1216static void __init initcall_debug_enable(void) 1217{ 1218 int ret; 1219 1220 ret = register_trace_initcall_start(trace_initcall_start_cb, &initcall_calltime); 1221 ret |= register_trace_initcall_finish(trace_initcall_finish_cb, &initcall_calltime); 1222 WARN(ret, "Failed to register initcall tracepoints\n"); 1223} 1224#define do_trace_initcall_start trace_initcall_start 1225#define do_trace_initcall_finish trace_initcall_finish 1226#else 1227static inline void do_trace_initcall_start(initcall_t fn) 1228{ 1229 if (!initcall_debug) { 1230 return; 1231 } 1232 trace_initcall_start_cb(&initcall_calltime, fn); 1233} 1234static inline void do_trace_initcall_finish(initcall_t fn, int ret) 1235{ 1236 if (!initcall_debug) { 1237 return; 1238 } 1239 trace_initcall_finish_cb(&initcall_calltime, fn, ret); 1240} 1241#endif /* !TRACEPOINTS_ENABLED */ 1242 1243int __init_or_module do_one_initcall(initcall_t fn) 1244{ 1245 int count = preempt_count(); 1246 char msgbuf[64]; 1247 int ret; 1248 1249 if (initcall_blacklisted(fn)) { 1250 return -EPERM; 1251 } 1252 1253 do_trace_initcall_start(fn); 1254 ret = fn(); 1255 do_trace_initcall_finish(fn, ret); 1256 1257 msgbuf[0] = 0; 1258 1259 if (preempt_count() != count) { 1260 sprintf(msgbuf, "preemption imbalance "); 1261 preempt_count_set(count); 1262 } 1263 if (irqs_disabled()) { 1264 strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf)); 1265 local_irq_enable(); 1266 } 1267 WARN(msgbuf[0], "initcall %pS returned with %s\n", fn, msgbuf); 1268 1269 add_latent_entropy(); 1270 return ret; 1271} 1272 1273extern initcall_entry_t __initcall_start[]; 1274extern initcall_entry_t __initcall0_start[]; 1275extern initcall_entry_t __initcall1_start[]; 1276extern initcall_entry_t __initcall2_start[]; 1277extern initcall_entry_t __initcall3_start[]; 1278extern initcall_entry_t __initcall4_start[]; 1279extern initcall_entry_t __initcall5_start[]; 1280extern initcall_entry_t __initcall6_start[]; 1281extern initcall_entry_t __initcall7_start[]; 1282extern initcall_entry_t __initcall_end[]; 1283 1284static initcall_entry_t *initcall_levels[] __initdata = { 1285 __initcall0_start, __initcall1_start, __initcall2_start, __initcall3_start, __initcall4_start, 1286 __initcall5_start, __initcall6_start, __initcall7_start, __initcall_end, 1287}; 1288 1289/* Keep these in sync with initcalls in include/linux/init.h */ 1290static const char *initcall_level_names[] __initdata = { 1291 "pure", "core", "postcore", "arch", "subsys", "fs", "device", "late", 1292}; 1293 1294static int __init ignore_unknown_bootoption(char *param, char *val, const char *unused, void *arg) 1295{ 1296 return 0; 1297} 1298 1299static void __init do_initcall_level(int level, char *command_line) 1300{ 1301 initcall_entry_t *fn; 1302 1303 parse_args(initcall_level_names[level], command_line, __start___param, __stop___param - __start___param, level, 1304 level, NULL, ignore_unknown_bootoption); 1305 1306 trace_initcall_level(initcall_level_names[level]); 1307 for (fn = initcall_levels[level]; fn < initcall_levels[level + 1]; fn++) { 1308 do_one_initcall(initcall_from_entry(fn)); 1309 } 1310} 1311 1312static void __init do_initcalls(void) 1313{ 1314 int level; 1315 size_t len = strlen(saved_command_line) + 1; 1316 char *command_line; 1317 1318 command_line = kzalloc(len, GFP_KERNEL); 1319 if (!command_line) { 1320 panic("%s: Failed to allocate %zu bytes\n", __func__, len); 1321 } 1322 1323 for (level = 0; level < ARRAY_SIZE(initcall_levels) - 1; level++) { 1324 /* Parser modifies command_line, restore it each time */ 1325 strcpy(command_line, saved_command_line); 1326 do_initcall_level(level, command_line); 1327 } 1328 1329 kfree(command_line); 1330} 1331 1332/* 1333 * Ok, the machine is now initialized. None of the devices 1334 * have been touched yet, but the CPU subsystem is up and 1335 * running, and memory and process management works. 1336 * 1337 * Now we can finally start doing some real work.. 1338 */ 1339static void __init do_basic_setup(void) 1340{ 1341 cpuset_init_smp(); 1342 driver_init(); 1343 init_irq_proc(); 1344 do_ctors(); 1345 usermodehelper_enable(); 1346 do_initcalls(); 1347} 1348 1349static void __init do_pre_smp_initcalls(void) 1350{ 1351 initcall_entry_t *fn; 1352 1353 trace_initcall_level("early"); 1354 for (fn = __initcall_start; fn < __initcall0_start; fn++) { 1355 do_one_initcall(initcall_from_entry(fn)); 1356 } 1357} 1358 1359static int run_init_process(const char *init_filename) 1360{ 1361 const char *const *p; 1362 1363 argv_init[0] = init_filename; 1364 pr_info("Run %s as init process\n", init_filename); 1365 pr_debug(" with arguments:\n"); 1366 for (p = argv_init; *p; p++) { 1367 pr_debug(" %s\n", *p); 1368 } 1369 pr_debug(" with environment:\n"); 1370 for (p = envp_init; *p; p++) { 1371 pr_debug(" %s\n", *p); 1372 } 1373 return kernel_execve(init_filename, argv_init, envp_init); 1374} 1375 1376static int try_to_run_init_process(const char *init_filename) 1377{ 1378 int ret; 1379 1380 ret = run_init_process(init_filename); 1381 if (ret && ret != -ENOENT) { 1382 pr_err("Starting init: %s exists but couldn't execute it (error %d)\n", init_filename, ret); 1383 } 1384 1385 return ret; 1386} 1387 1388static noinline void __init kernel_init_freeable(void); 1389 1390#if defined(CONFIG_STRICT_KERNEL_RWX) || defined(CONFIG_STRICT_MODULE_RWX) 1391bool rodata_enabled __ro_after_init = true; 1392static int __init set_debug_rodata(char *str) 1393{ 1394 if (strtobool(str, &rodata_enabled)) 1395 pr_warn("Invalid option string for rodata: '%s'\n", str); 1396 return 1; 1397} 1398__setup("rodata=", set_debug_rodata); 1399#endif 1400 1401#ifdef CONFIG_STRICT_KERNEL_RWX 1402static void mark_readonly(void) 1403{ 1404 if (rodata_enabled) { 1405 /* 1406 * load_module() results in W+X mappings, which are cleaned 1407 * up with call_rcu(). Let's make sure that queued work is 1408 * flushed so that we don't hit false positives looking for 1409 * insecure pages which are W+X. 1410 */ 1411 rcu_barrier(); 1412 mark_rodata_ro(); 1413 rodata_test(); 1414 } else { 1415 pr_info("Kernel memory protection disabled.\n"); 1416 } 1417} 1418#elif defined(CONFIG_ARCH_HAS_STRICT_KERNEL_RWX) 1419static inline void mark_readonly(void) 1420{ 1421 pr_warn("Kernel memory protection not selected by kernel config.\n"); 1422} 1423#else 1424static inline void mark_readonly(void) 1425{ 1426 pr_warn("This architecture does not have kernel memory protection.\n"); 1427} 1428#endif 1429 1430void __weak free_initmem(void) 1431{ 1432 free_initmem_default(POISON_FREE_INITMEM); 1433} 1434 1435static int __ref kernel_init(void *unused) 1436{ 1437 int ret; 1438 1439 kernel_init_freeable(); 1440 /* need to finish all async __init code before freeing the memory */ 1441 async_synchronize_full(); 1442 kprobe_free_init_mem(); 1443 ftrace_free_init_mem(); 1444 kgdb_free_init_mem(); 1445 free_initmem(); 1446 mark_readonly(); 1447 1448 /* 1449 * Kernel mappings are now finalized - update the userspace page-table 1450 * to finalize PTI. 1451 */ 1452 pti_finalize(); 1453 1454 system_state = SYSTEM_RUNNING; 1455 numa_default_policy(); 1456 1457 rcu_end_inkernel_boot(); 1458 1459 do_sysctl_args(); 1460 1461 if (ramdisk_execute_command) { 1462 ret = run_init_process(ramdisk_execute_command); 1463 if (!ret) { 1464 return 0; 1465 } 1466 pr_err("Failed to execute %s (error %d)\n", ramdisk_execute_command, ret); 1467 } 1468 1469 /* 1470 * We try each of these until one succeeds. 1471 * 1472 * The Bourne shell can be used instead of init if we are 1473 * trying to recover a really broken machine. 1474 */ 1475 if (execute_command) { 1476 ret = run_init_process(execute_command); 1477 if (!ret) { 1478 return 0; 1479 } 1480 panic("Requested init %s failed (error %d).", execute_command, ret); 1481 } 1482 1483 if (CONFIG_DEFAULT_INIT[0] != '\0') { 1484 ret = run_init_process(CONFIG_DEFAULT_INIT); 1485 if (ret) { 1486 pr_err("Default init %s failed (error %d)\n", CONFIG_DEFAULT_INIT, ret); 1487 } else { 1488 return 0; 1489 } 1490 } 1491 1492 if (!try_to_run_init_process("/sbin/init") || !try_to_run_init_process("/etc/init") || 1493 !try_to_run_init_process("/bin/init") || !try_to_run_init_process("/bin/sh")) { 1494 return 0; 1495 } 1496 1497 panic("No working init found. Try passing init= option to kernel. " 1498 "See Linux Documentation/admin-guide/init.rst for guidance."); 1499} 1500 1501/* Open /dev/console, for stdin/stdout/stderr, this should never fail */ 1502void __init console_on_rootfs(void) 1503{ 1504 struct file *file = filp_open("/dev/console", O_RDWR, 0); 1505 1506 if (IS_ERR(file)) { 1507 pr_err("Warning: unable to open an initial console.\n"); 1508 return; 1509 } 1510 init_dup(file); 1511 init_dup(file); 1512 init_dup(file); 1513 fput(file); 1514} 1515 1516static noinline void __init kernel_init_freeable(void) 1517{ 1518 /* 1519 * Wait until kthreadd is all set-up. 1520 */ 1521 wait_for_completion(&kthreadd_done); 1522 1523 /* Now the scheduler is fully set up and can do blocking allocations */ 1524 gfp_allowed_mask = __GFP_BITS_MASK; 1525 1526 /* 1527 * init can allocate pages on any node 1528 */ 1529 set_mems_allowed(node_states[N_MEMORY]); 1530 1531 cad_pid = get_pid(task_pid(current)); 1532 1533 smp_prepare_cpus(setup_max_cpus); 1534 1535 workqueue_init(); 1536 1537 init_mm_internals(); 1538 1539 rcu_init_tasks_generic(); 1540 do_pre_smp_initcalls(); 1541 lockup_detector_init(); 1542 1543 smp_init(); 1544 sched_init_smp(); 1545 1546#ifdef CONFIG_ROCKCHIP_THUNDER_BOOT 1547 kthread_run(defer_free_memblock, NULL, "defer_mem"); 1548#endif 1549 padata_init(); 1550 page_alloc_init_late(); 1551 /* Initialize page ext after all struct pages are initialized. */ 1552 page_ext_init(); 1553 1554 do_basic_setup(); 1555 1556 kunit_run_all_tests(); 1557 1558#if IS_BUILTIN(CONFIG_INITRD_ASYNC) 1559 async_synchronize_full(); 1560#endif 1561 1562 console_on_rootfs(); 1563 1564 /* 1565 * check if there is an early userspace init. If yes, let it do all 1566 * the work 1567 */ 1568 if (init_eaccess(ramdisk_execute_command) != 0) { 1569 ramdisk_execute_command = NULL; 1570 prepare_namespace(); 1571 } 1572 1573 /* 1574 * Ok, we have completed the initial bootup, and 1575 * we're essentially up and running. Get rid of the 1576 * initmem segments and start the user-mode stuff.. 1577 * 1578 * rootfs is available now, try loading the public keys 1579 * and default modules 1580 */ 1581 1582 integrity_load_keys(); 1583} 1584