1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * efi.c - EFI subsystem
4  *
5  * Copyright (C) 2001,2003,2004 Dell <Matt_Domsch@dell.com>
6  * Copyright (C) 2004 Intel Corporation <matthew.e.tolentino@intel.com>
7  * Copyright (C) 2013 Tom Gundersen <teg@jklm.no>
8  *
9  * This code registers /sys/firmware/efi{,/efivars} when EFI is supported,
10  * allowing the efivarfs to be mounted or the efivars module to be loaded.
11  * The existance of /sys/firmware/efi may also be used by userspace to
12  * determine that the system supports EFI.
13  */
14 
15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16 
17 #include <linux/kobject.h>
18 #include <linux/module.h>
19 #include <linux/init.h>
20 #include <linux/debugfs.h>
21 #include <linux/device.h>
22 #include <linux/efi.h>
23 #include <linux/of.h>
24 #include <linux/initrd.h>
25 #include <linux/io.h>
26 #include <linux/kexec.h>
27 #include <linux/platform_device.h>
28 #include <linux/random.h>
29 #include <linux/reboot.h>
30 #include <linux/slab.h>
31 #include <linux/acpi.h>
32 #include <linux/ucs2_string.h>
33 #include <linux/memblock.h>
34 #include <linux/security.h>
35 
36 #include <asm/early_ioremap.h>
37 
38 struct efi __read_mostly efi = {
39 	.runtime_supported_mask = EFI_RT_SUPPORTED_ALL,
40 	.acpi			= EFI_INVALID_TABLE_ADDR,
41 	.acpi20			= EFI_INVALID_TABLE_ADDR,
42 	.smbios			= EFI_INVALID_TABLE_ADDR,
43 	.smbios3		= EFI_INVALID_TABLE_ADDR,
44 	.esrt			= EFI_INVALID_TABLE_ADDR,
45 	.tpm_log		= EFI_INVALID_TABLE_ADDR,
46 	.tpm_final_log		= EFI_INVALID_TABLE_ADDR,
47 #ifdef CONFIG_LOAD_UEFI_KEYS
48 	.mokvar_table		= EFI_INVALID_TABLE_ADDR,
49 #endif
50 };
51 EXPORT_SYMBOL(efi);
52 
53 unsigned long __ro_after_init efi_rng_seed = EFI_INVALID_TABLE_ADDR;
54 static unsigned long __initdata mem_reserve = EFI_INVALID_TABLE_ADDR;
55 static unsigned long __initdata rt_prop = EFI_INVALID_TABLE_ADDR;
56 static unsigned long __initdata initrd = EFI_INVALID_TABLE_ADDR;
57 
58 struct mm_struct efi_mm = {
59 	.mm_rb			= RB_ROOT,
60 	.mm_users		= ATOMIC_INIT(2),
61 	.mm_count		= ATOMIC_INIT(1),
62 	.write_protect_seq      = SEQCNT_ZERO(efi_mm.write_protect_seq),
63 	MMAP_LOCK_INITIALIZER(efi_mm)
64 	.page_table_lock	= __SPIN_LOCK_UNLOCKED(efi_mm.page_table_lock),
65 	.mmlist			= LIST_HEAD_INIT(efi_mm.mmlist),
66 	.cpu_bitmap		= { [BITS_TO_LONGS(NR_CPUS)] = 0},
67 };
68 
69 struct workqueue_struct *efi_rts_wq;
70 
71 static bool disable_runtime;
setup_noefi(char *arg)72 static int __init setup_noefi(char *arg)
73 {
74 	disable_runtime = true;
75 	return 0;
76 }
77 early_param("noefi", setup_noefi);
78 
efi_runtime_disabled(void)79 bool efi_runtime_disabled(void)
80 {
81 	return disable_runtime;
82 }
83 
__efi_soft_reserve_enabled(void)84 bool __pure __efi_soft_reserve_enabled(void)
85 {
86 	return !efi_enabled(EFI_MEM_NO_SOFT_RESERVE);
87 }
88 
parse_efi_cmdline(char *str)89 static int __init parse_efi_cmdline(char *str)
90 {
91 	if (!str) {
92 		pr_warn("need at least one option\n");
93 		return -EINVAL;
94 	}
95 
96 	if (parse_option_str(str, "debug"))
97 		set_bit(EFI_DBG, &efi.flags);
98 
99 	if (parse_option_str(str, "noruntime"))
100 		disable_runtime = true;
101 
102 	if (parse_option_str(str, "nosoftreserve"))
103 		set_bit(EFI_MEM_NO_SOFT_RESERVE, &efi.flags);
104 
105 	return 0;
106 }
107 early_param("efi", parse_efi_cmdline);
108 
109 struct kobject *efi_kobj;
110 
111 /*
112  * Let's not leave out systab information that snuck into
113  * the efivars driver
114  * Note, do not add more fields in systab sysfs file as it breaks sysfs
115  * one value per file rule!
116  */
systab_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)117 static ssize_t systab_show(struct kobject *kobj,
118 			   struct kobj_attribute *attr, char *buf)
119 {
120 	char *str = buf;
121 
122 	if (!kobj || !buf)
123 		return -EINVAL;
124 
125 	if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
126 		str += sprintf(str, "ACPI20=0x%lx\n", efi.acpi20);
127 	if (efi.acpi != EFI_INVALID_TABLE_ADDR)
128 		str += sprintf(str, "ACPI=0x%lx\n", efi.acpi);
129 	/*
130 	 * If both SMBIOS and SMBIOS3 entry points are implemented, the
131 	 * SMBIOS3 entry point shall be preferred, so we list it first to
132 	 * let applications stop parsing after the first match.
133 	 */
134 	if (efi.smbios3 != EFI_INVALID_TABLE_ADDR)
135 		str += sprintf(str, "SMBIOS3=0x%lx\n", efi.smbios3);
136 	if (efi.smbios != EFI_INVALID_TABLE_ADDR)
137 		str += sprintf(str, "SMBIOS=0x%lx\n", efi.smbios);
138 
139 	if (IS_ENABLED(CONFIG_IA64) || IS_ENABLED(CONFIG_X86))
140 		str = efi_systab_show_arch(str);
141 
142 	return str - buf;
143 }
144 
145 static struct kobj_attribute efi_attr_systab = __ATTR_RO_MODE(systab, 0400);
146 
fw_platform_size_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)147 static ssize_t fw_platform_size_show(struct kobject *kobj,
148 				     struct kobj_attribute *attr, char *buf)
149 {
150 	return sprintf(buf, "%d\n", efi_enabled(EFI_64BIT) ? 64 : 32);
151 }
152 
153 extern __weak struct kobj_attribute efi_attr_fw_vendor;
154 extern __weak struct kobj_attribute efi_attr_runtime;
155 extern __weak struct kobj_attribute efi_attr_config_table;
156 static struct kobj_attribute efi_attr_fw_platform_size =
157 	__ATTR_RO(fw_platform_size);
158 
159 static struct attribute *efi_subsys_attrs[] = {
160 	&efi_attr_systab.attr,
161 	&efi_attr_fw_platform_size.attr,
162 	&efi_attr_fw_vendor.attr,
163 	&efi_attr_runtime.attr,
164 	&efi_attr_config_table.attr,
165 	NULL,
166 };
167 
efi_attr_is_visible(struct kobject *kobj, struct attribute *attr, int n)168 umode_t __weak efi_attr_is_visible(struct kobject *kobj, struct attribute *attr,
169 				   int n)
170 {
171 	return attr->mode;
172 }
173 
174 static const struct attribute_group efi_subsys_attr_group = {
175 	.attrs = efi_subsys_attrs,
176 	.is_visible = efi_attr_is_visible,
177 };
178 
179 static struct efivars generic_efivars;
180 static struct efivar_operations generic_ops;
181 
generic_ops_register(void)182 static int generic_ops_register(void)
183 {
184 	generic_ops.get_variable = efi.get_variable;
185 	generic_ops.get_next_variable = efi.get_next_variable;
186 	generic_ops.query_variable_store = efi_query_variable_store;
187 
188 	if (efi_rt_services_supported(EFI_RT_SUPPORTED_SET_VARIABLE)) {
189 		generic_ops.set_variable = efi.set_variable;
190 		generic_ops.set_variable_nonblocking = efi.set_variable_nonblocking;
191 	}
192 	return efivars_register(&generic_efivars, &generic_ops, efi_kobj);
193 }
194 
generic_ops_unregister(void)195 static void generic_ops_unregister(void)
196 {
197 	efivars_unregister(&generic_efivars);
198 }
199 
200 #ifdef CONFIG_EFI_CUSTOM_SSDT_OVERLAYS
201 #define EFIVAR_SSDT_NAME_MAX	16
202 static char efivar_ssdt[EFIVAR_SSDT_NAME_MAX] __initdata;
efivar_ssdt_setup(char *str)203 static int __init efivar_ssdt_setup(char *str)
204 {
205 	int ret = security_locked_down(LOCKDOWN_ACPI_TABLES);
206 
207 	if (ret)
208 		return ret;
209 
210 	if (strlen(str) < sizeof(efivar_ssdt))
211 		memcpy(efivar_ssdt, str, strlen(str));
212 	else
213 		pr_warn("efivar_ssdt: name too long: %s\n", str);
214 	return 1;
215 }
216 __setup("efivar_ssdt=", efivar_ssdt_setup);
217 
efivar_ssdt_iter(efi_char16_t *name, efi_guid_t vendor, unsigned long name_size, void *data)218 static __init int efivar_ssdt_iter(efi_char16_t *name, efi_guid_t vendor,
219 				   unsigned long name_size, void *data)
220 {
221 	struct efivar_entry *entry;
222 	struct list_head *list = data;
223 	char utf8_name[EFIVAR_SSDT_NAME_MAX];
224 	int limit = min_t(unsigned long, EFIVAR_SSDT_NAME_MAX, name_size);
225 
226 	ucs2_as_utf8(utf8_name, name, limit - 1);
227 	if (strncmp(utf8_name, efivar_ssdt, limit) != 0)
228 		return 0;
229 
230 	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
231 	if (!entry)
232 		return 0;
233 
234 	memcpy(entry->var.VariableName, name, name_size);
235 	memcpy(&entry->var.VendorGuid, &vendor, sizeof(efi_guid_t));
236 
237 	efivar_entry_add(entry, list);
238 
239 	return 0;
240 }
241 
efivar_ssdt_load(void)242 static __init int efivar_ssdt_load(void)
243 {
244 	LIST_HEAD(entries);
245 	struct efivar_entry *entry, *aux;
246 	unsigned long size;
247 	void *data;
248 	int ret;
249 
250 	if (!efivar_ssdt[0])
251 		return 0;
252 
253 	ret = efivar_init(efivar_ssdt_iter, &entries, true, &entries);
254 
255 	list_for_each_entry_safe(entry, aux, &entries, list) {
256 		pr_info("loading SSDT from variable %s-%pUl\n", efivar_ssdt,
257 			&entry->var.VendorGuid);
258 
259 		list_del(&entry->list);
260 
261 		ret = efivar_entry_size(entry, &size);
262 		if (ret) {
263 			pr_err("failed to get var size\n");
264 			goto free_entry;
265 		}
266 
267 		data = kmalloc(size, GFP_KERNEL);
268 		if (!data) {
269 			ret = -ENOMEM;
270 			goto free_entry;
271 		}
272 
273 		ret = efivar_entry_get(entry, NULL, &size, data);
274 		if (ret) {
275 			pr_err("failed to get var data\n");
276 			goto free_data;
277 		}
278 
279 		ret = acpi_load_table(data, NULL);
280 		if (ret) {
281 			pr_err("failed to load table: %d\n", ret);
282 			goto free_data;
283 		}
284 
285 		goto free_entry;
286 
287 free_data:
288 		kfree(data);
289 
290 free_entry:
291 		kfree(entry);
292 	}
293 
294 	return ret;
295 }
296 #else
efivar_ssdt_load(void)297 static inline int efivar_ssdt_load(void) { return 0; }
298 #endif
299 
300 #ifdef CONFIG_DEBUG_FS
301 
302 #define EFI_DEBUGFS_MAX_BLOBS 32
303 
304 static struct debugfs_blob_wrapper debugfs_blob[EFI_DEBUGFS_MAX_BLOBS];
305 
efi_debugfs_init(void)306 static void __init efi_debugfs_init(void)
307 {
308 	struct dentry *efi_debugfs;
309 	efi_memory_desc_t *md;
310 	char name[32];
311 	int type_count[EFI_BOOT_SERVICES_DATA + 1] = {};
312 	int i = 0;
313 
314 	efi_debugfs = debugfs_create_dir("efi", NULL);
315 	if (IS_ERR_OR_NULL(efi_debugfs))
316 		return;
317 
318 	for_each_efi_memory_desc(md) {
319 		switch (md->type) {
320 		case EFI_BOOT_SERVICES_CODE:
321 			snprintf(name, sizeof(name), "boot_services_code%d",
322 				 type_count[md->type]++);
323 			break;
324 		case EFI_BOOT_SERVICES_DATA:
325 			snprintf(name, sizeof(name), "boot_services_data%d",
326 				 type_count[md->type]++);
327 			break;
328 		default:
329 			continue;
330 		}
331 
332 		if (i >= EFI_DEBUGFS_MAX_BLOBS) {
333 			pr_warn("More then %d EFI boot service segments, only showing first %d in debugfs\n",
334 				EFI_DEBUGFS_MAX_BLOBS, EFI_DEBUGFS_MAX_BLOBS);
335 			break;
336 		}
337 
338 		debugfs_blob[i].size = md->num_pages << EFI_PAGE_SHIFT;
339 		debugfs_blob[i].data = memremap(md->phys_addr,
340 						debugfs_blob[i].size,
341 						MEMREMAP_WB);
342 		if (!debugfs_blob[i].data)
343 			continue;
344 
345 		debugfs_create_blob(name, 0400, efi_debugfs, &debugfs_blob[i]);
346 		i++;
347 	}
348 }
349 #else
efi_debugfs_init(void)350 static inline void efi_debugfs_init(void) {}
351 #endif
352 
353 /*
354  * We register the efi subsystem with the firmware subsystem and the
355  * efivars subsystem with the efi subsystem, if the system was booted with
356  * EFI.
357  */
efisubsys_init(void)358 static int __init efisubsys_init(void)
359 {
360 	int error;
361 
362 	if (!efi_enabled(EFI_RUNTIME_SERVICES))
363 		efi.runtime_supported_mask = 0;
364 
365 	if (!efi_enabled(EFI_BOOT))
366 		return 0;
367 
368 	if (efi.runtime_supported_mask) {
369 		/*
370 		 * Since we process only one efi_runtime_service() at a time, an
371 		 * ordered workqueue (which creates only one execution context)
372 		 * should suffice for all our needs.
373 		 */
374 		efi_rts_wq = alloc_ordered_workqueue("efi_rts_wq", 0);
375 		if (!efi_rts_wq) {
376 			pr_err("Creating efi_rts_wq failed, EFI runtime services disabled.\n");
377 			clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
378 			efi.runtime_supported_mask = 0;
379 			return 0;
380 		}
381 	}
382 
383 	if (efi_rt_services_supported(EFI_RT_SUPPORTED_TIME_SERVICES))
384 		platform_device_register_simple("rtc-efi", 0, NULL, 0);
385 
386 	/* We register the efi directory at /sys/firmware/efi */
387 	efi_kobj = kobject_create_and_add("efi", firmware_kobj);
388 	if (!efi_kobj) {
389 		pr_err("efi: Firmware registration failed.\n");
390 		error = -ENOMEM;
391 		goto err_destroy_wq;
392 	}
393 
394 	if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE |
395 				      EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME)) {
396 		error = generic_ops_register();
397 		if (error)
398 			goto err_put;
399 		efivar_ssdt_load();
400 		platform_device_register_simple("efivars", 0, NULL, 0);
401 	}
402 
403 	error = sysfs_create_group(efi_kobj, &efi_subsys_attr_group);
404 	if (error) {
405 		pr_err("efi: Sysfs attribute export failed with error %d.\n",
406 		       error);
407 		goto err_unregister;
408 	}
409 
410 	error = efi_runtime_map_init(efi_kobj);
411 	if (error)
412 		goto err_remove_group;
413 
414 	/* and the standard mountpoint for efivarfs */
415 	error = sysfs_create_mount_point(efi_kobj, "efivars");
416 	if (error) {
417 		pr_err("efivars: Subsystem registration failed.\n");
418 		goto err_remove_group;
419 	}
420 
421 	if (efi_enabled(EFI_DBG) && efi_enabled(EFI_PRESERVE_BS_REGIONS))
422 		efi_debugfs_init();
423 
424 	return 0;
425 
426 err_remove_group:
427 	sysfs_remove_group(efi_kobj, &efi_subsys_attr_group);
428 err_unregister:
429 	if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE |
430 				      EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME))
431 		generic_ops_unregister();
432 err_put:
433 	kobject_put(efi_kobj);
434 err_destroy_wq:
435 	if (efi_rts_wq)
436 		destroy_workqueue(efi_rts_wq);
437 
438 	return error;
439 }
440 
441 subsys_initcall(efisubsys_init);
442 
443 /*
444  * Find the efi memory descriptor for a given physical address.  Given a
445  * physical address, determine if it exists within an EFI Memory Map entry,
446  * and if so, populate the supplied memory descriptor with the appropriate
447  * data.
448  */
efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)449 int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
450 {
451 	efi_memory_desc_t *md;
452 
453 	if (!efi_enabled(EFI_MEMMAP)) {
454 		pr_err_once("EFI_MEMMAP is not enabled.\n");
455 		return -EINVAL;
456 	}
457 
458 	if (!out_md) {
459 		pr_err_once("out_md is null.\n");
460 		return -EINVAL;
461         }
462 
463 	for_each_efi_memory_desc(md) {
464 		u64 size;
465 		u64 end;
466 
467 		size = md->num_pages << EFI_PAGE_SHIFT;
468 		end = md->phys_addr + size;
469 		if (phys_addr >= md->phys_addr && phys_addr < end) {
470 			memcpy(out_md, md, sizeof(*out_md));
471 			return 0;
472 		}
473 	}
474 	return -ENOENT;
475 }
476 
477 /*
478  * Calculate the highest address of an efi memory descriptor.
479  */
efi_mem_desc_end(efi_memory_desc_t *md)480 u64 __init efi_mem_desc_end(efi_memory_desc_t *md)
481 {
482 	u64 size = md->num_pages << EFI_PAGE_SHIFT;
483 	u64 end = md->phys_addr + size;
484 	return end;
485 }
486 
efi_arch_mem_reserve(phys_addr_t addr, u64 size)487 void __init __weak efi_arch_mem_reserve(phys_addr_t addr, u64 size) {}
488 
489 /**
490  * efi_mem_reserve - Reserve an EFI memory region
491  * @addr: Physical address to reserve
492  * @size: Size of reservation
493  *
494  * Mark a region as reserved from general kernel allocation and
495  * prevent it being released by efi_free_boot_services().
496  *
497  * This function should be called drivers once they've parsed EFI
498  * configuration tables to figure out where their data lives, e.g.
499  * efi_esrt_init().
500  */
efi_mem_reserve(phys_addr_t addr, u64 size)501 void __init efi_mem_reserve(phys_addr_t addr, u64 size)
502 {
503 	if (!memblock_is_region_reserved(addr, size))
504 		memblock_reserve(addr, size);
505 
506 	/*
507 	 * Some architectures (x86) reserve all boot services ranges
508 	 * until efi_free_boot_services() because of buggy firmware
509 	 * implementations. This means the above memblock_reserve() is
510 	 * superfluous on x86 and instead what it needs to do is
511 	 * ensure the @start, @size is not freed.
512 	 */
513 	efi_arch_mem_reserve(addr, size);
514 }
515 
516 static const efi_config_table_type_t common_tables[] __initconst = {
517 	{ACPI_20_TABLE_GUID,			&efi.acpi20,		"ACPI 2.0"	},
518 	{ACPI_TABLE_GUID,			&efi.acpi,		"ACPI"		},
519 	{SMBIOS_TABLE_GUID,			&efi.smbios,		"SMBIOS"	},
520 	{SMBIOS3_TABLE_GUID,			&efi.smbios3,		"SMBIOS 3.0"	},
521 	{EFI_SYSTEM_RESOURCE_TABLE_GUID,	&efi.esrt,		"ESRT"		},
522 	{EFI_MEMORY_ATTRIBUTES_TABLE_GUID,	&efi_mem_attr_table,	"MEMATTR"	},
523 	{LINUX_EFI_RANDOM_SEED_TABLE_GUID,	&efi_rng_seed,		"RNG"		},
524 	{LINUX_EFI_TPM_EVENT_LOG_GUID,		&efi.tpm_log,		"TPMEventLog"	},
525 	{LINUX_EFI_TPM_FINAL_LOG_GUID,		&efi.tpm_final_log,	"TPMFinalLog"	},
526 	{LINUX_EFI_MEMRESERVE_TABLE_GUID,	&mem_reserve,		"MEMRESERVE"	},
527 	{LINUX_EFI_INITRD_MEDIA_GUID,		&initrd,		"INITRD"	},
528 	{EFI_RT_PROPERTIES_TABLE_GUID,		&rt_prop,		"RTPROP"	},
529 #ifdef CONFIG_EFI_RCI2_TABLE
530 	{DELLEMC_EFI_RCI2_TABLE_GUID,		&rci2_table_phys			},
531 #endif
532 #ifdef CONFIG_LOAD_UEFI_KEYS
533 	{LINUX_EFI_MOK_VARIABLE_TABLE_GUID,	&efi.mokvar_table,	"MOKvar"	},
534 #endif
535 	{},
536 };
537 
match_config_table(const efi_guid_t *guid, unsigned long table, const efi_config_table_type_t *table_types)538 static __init int match_config_table(const efi_guid_t *guid,
539 				     unsigned long table,
540 				     const efi_config_table_type_t *table_types)
541 {
542 	int i;
543 
544 	for (i = 0; efi_guidcmp(table_types[i].guid, NULL_GUID); i++) {
545 		if (!efi_guidcmp(*guid, table_types[i].guid)) {
546 			*(table_types[i].ptr) = table;
547 			if (table_types[i].name[0])
548 				pr_cont("%s=0x%lx ",
549 					table_types[i].name, table);
550 			return 1;
551 		}
552 	}
553 
554 	return 0;
555 }
556 
efi_config_parse_tables(const efi_config_table_t *config_tables, int count, const efi_config_table_type_t *arch_tables)557 int __init efi_config_parse_tables(const efi_config_table_t *config_tables,
558 				   int count,
559 				   const efi_config_table_type_t *arch_tables)
560 {
561 	const efi_config_table_64_t *tbl64 = (void *)config_tables;
562 	const efi_config_table_32_t *tbl32 = (void *)config_tables;
563 	const efi_guid_t *guid;
564 	unsigned long table;
565 	int i;
566 
567 	pr_info("");
568 	for (i = 0; i < count; i++) {
569 		if (!IS_ENABLED(CONFIG_X86)) {
570 			guid = &config_tables[i].guid;
571 			table = (unsigned long)config_tables[i].table;
572 		} else if (efi_enabled(EFI_64BIT)) {
573 			guid = &tbl64[i].guid;
574 			table = tbl64[i].table;
575 
576 			if (IS_ENABLED(CONFIG_X86_32) &&
577 			    tbl64[i].table > U32_MAX) {
578 				pr_cont("\n");
579 				pr_err("Table located above 4GB, disabling EFI.\n");
580 				return -EINVAL;
581 			}
582 		} else {
583 			guid = &tbl32[i].guid;
584 			table = tbl32[i].table;
585 		}
586 
587 		if (!match_config_table(guid, table, common_tables) && arch_tables)
588 			match_config_table(guid, table, arch_tables);
589 	}
590 	pr_cont("\n");
591 	set_bit(EFI_CONFIG_TABLES, &efi.flags);
592 
593 	if (efi_rng_seed != EFI_INVALID_TABLE_ADDR) {
594 		struct linux_efi_random_seed *seed;
595 		u32 size = 0;
596 
597 		seed = early_memremap(efi_rng_seed, sizeof(*seed));
598 		if (seed != NULL) {
599 			size = min_t(u32, seed->size, SZ_1K); // sanity check
600 			early_memunmap(seed, sizeof(*seed));
601 		} else {
602 			pr_err("Could not map UEFI random seed!\n");
603 		}
604 		if (size > 0) {
605 			seed = early_memremap(efi_rng_seed,
606 					      sizeof(*seed) + size);
607 			if (seed != NULL) {
608 				add_bootloader_randomness(seed->bits, size);
609 				memzero_explicit(seed->bits, size);
610 				early_memunmap(seed, sizeof(*seed) + size);
611 			} else {
612 				pr_err("Could not map UEFI random seed!\n");
613 			}
614 		}
615 	}
616 
617 	if (!IS_ENABLED(CONFIG_X86_32) && efi_enabled(EFI_MEMMAP))
618 		efi_memattr_init();
619 
620 	efi_tpm_eventlog_init();
621 
622 	if (mem_reserve != EFI_INVALID_TABLE_ADDR) {
623 		unsigned long prsv = mem_reserve;
624 
625 		while (prsv) {
626 			struct linux_efi_memreserve *rsv;
627 			u8 *p;
628 
629 			/*
630 			 * Just map a full page: that is what we will get
631 			 * anyway, and it permits us to map the entire entry
632 			 * before knowing its size.
633 			 */
634 			p = early_memremap(ALIGN_DOWN(prsv, PAGE_SIZE),
635 					   PAGE_SIZE);
636 			if (p == NULL) {
637 				pr_err("Could not map UEFI memreserve entry!\n");
638 				return -ENOMEM;
639 			}
640 
641 			rsv = (void *)(p + prsv % PAGE_SIZE);
642 
643 			/* reserve the entry itself */
644 			memblock_reserve(prsv,
645 					 struct_size(rsv, entry, rsv->size));
646 
647 			for (i = 0; i < atomic_read(&rsv->count); i++) {
648 				memblock_reserve(rsv->entry[i].base,
649 						 rsv->entry[i].size);
650 			}
651 
652 			prsv = rsv->next;
653 			early_memunmap(p, PAGE_SIZE);
654 		}
655 	}
656 
657 	if (rt_prop != EFI_INVALID_TABLE_ADDR) {
658 		efi_rt_properties_table_t *tbl;
659 
660 		tbl = early_memremap(rt_prop, sizeof(*tbl));
661 		if (tbl) {
662 			efi.runtime_supported_mask &= tbl->runtime_services_supported;
663 			early_memunmap(tbl, sizeof(*tbl));
664 		}
665 	}
666 
667 	if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) &&
668 	    initrd != EFI_INVALID_TABLE_ADDR && phys_initrd_size == 0) {
669 		struct linux_efi_initrd *tbl;
670 
671 		tbl = early_memremap(initrd, sizeof(*tbl));
672 		if (tbl) {
673 			phys_initrd_start = tbl->base;
674 			phys_initrd_size = tbl->size;
675 			early_memunmap(tbl, sizeof(*tbl));
676 		}
677 	}
678 
679 	return 0;
680 }
681 
efi_systab_check_header(const efi_table_hdr_t *systab_hdr, int min_major_version)682 int __init efi_systab_check_header(const efi_table_hdr_t *systab_hdr,
683 				   int min_major_version)
684 {
685 	if (systab_hdr->signature != EFI_SYSTEM_TABLE_SIGNATURE) {
686 		pr_err("System table signature incorrect!\n");
687 		return -EINVAL;
688 	}
689 
690 	if ((systab_hdr->revision >> 16) < min_major_version)
691 		pr_err("Warning: System table version %d.%02d, expected %d.00 or greater!\n",
692 		       systab_hdr->revision >> 16,
693 		       systab_hdr->revision & 0xffff,
694 		       min_major_version);
695 
696 	return 0;
697 }
698 
699 #ifndef CONFIG_IA64
map_fw_vendor(unsigned long fw_vendor, size_t size)700 static const efi_char16_t *__init map_fw_vendor(unsigned long fw_vendor,
701 						size_t size)
702 {
703 	const efi_char16_t *ret;
704 
705 	ret = early_memremap_ro(fw_vendor, size);
706 	if (!ret)
707 		pr_err("Could not map the firmware vendor!\n");
708 	return ret;
709 }
710 
unmap_fw_vendor(const void *fw_vendor, size_t size)711 static void __init unmap_fw_vendor(const void *fw_vendor, size_t size)
712 {
713 	early_memunmap((void *)fw_vendor, size);
714 }
715 #else
716 #define map_fw_vendor(p, s)	__va(p)
717 #define unmap_fw_vendor(v, s)
718 #endif
719 
efi_systab_report_header(const efi_table_hdr_t *systab_hdr, unsigned long fw_vendor)720 void __init efi_systab_report_header(const efi_table_hdr_t *systab_hdr,
721 				     unsigned long fw_vendor)
722 {
723 	char vendor[100] = "unknown";
724 	const efi_char16_t *c16;
725 	size_t i;
726 
727 	c16 = map_fw_vendor(fw_vendor, sizeof(vendor) * sizeof(efi_char16_t));
728 	if (c16) {
729 		for (i = 0; i < sizeof(vendor) - 1 && c16[i]; ++i)
730 			vendor[i] = c16[i];
731 		vendor[i] = '\0';
732 
733 		unmap_fw_vendor(c16, sizeof(vendor) * sizeof(efi_char16_t));
734 	}
735 
736 	pr_info("EFI v%u.%.02u by %s\n",
737 		systab_hdr->revision >> 16,
738 		systab_hdr->revision & 0xffff,
739 		vendor);
740 
741 	if (IS_ENABLED(CONFIG_X86_64) &&
742 	    systab_hdr->revision > EFI_1_10_SYSTEM_TABLE_REVISION &&
743 	    !strcmp(vendor, "Apple")) {
744 		pr_info("Apple Mac detected, using EFI v1.10 runtime services only\n");
745 		efi.runtime_version = EFI_1_10_SYSTEM_TABLE_REVISION;
746 	}
747 }
748 
749 static __initdata char memory_type_name[][13] = {
750 	"Reserved",
751 	"Loader Code",
752 	"Loader Data",
753 	"Boot Code",
754 	"Boot Data",
755 	"Runtime Code",
756 	"Runtime Data",
757 	"Conventional",
758 	"Unusable",
759 	"ACPI Reclaim",
760 	"ACPI Mem NVS",
761 	"MMIO",
762 	"MMIO Port",
763 	"PAL Code",
764 	"Persistent",
765 };
766 
efi_md_typeattr_format(char *buf, size_t size, const efi_memory_desc_t *md)767 char * __init efi_md_typeattr_format(char *buf, size_t size,
768 				     const efi_memory_desc_t *md)
769 {
770 	char *pos;
771 	int type_len;
772 	u64 attr;
773 
774 	pos = buf;
775 	if (md->type >= ARRAY_SIZE(memory_type_name))
776 		type_len = snprintf(pos, size, "[type=%u", md->type);
777 	else
778 		type_len = snprintf(pos, size, "[%-*s",
779 				    (int)(sizeof(memory_type_name[0]) - 1),
780 				    memory_type_name[md->type]);
781 	if (type_len >= size)
782 		return buf;
783 
784 	pos += type_len;
785 	size -= type_len;
786 
787 	attr = md->attribute;
788 	if (attr & ~(EFI_MEMORY_UC | EFI_MEMORY_WC | EFI_MEMORY_WT |
789 		     EFI_MEMORY_WB | EFI_MEMORY_UCE | EFI_MEMORY_RO |
790 		     EFI_MEMORY_WP | EFI_MEMORY_RP | EFI_MEMORY_XP |
791 		     EFI_MEMORY_NV | EFI_MEMORY_SP | EFI_MEMORY_CPU_CRYPTO |
792 		     EFI_MEMORY_RUNTIME | EFI_MEMORY_MORE_RELIABLE))
793 		snprintf(pos, size, "|attr=0x%016llx]",
794 			 (unsigned long long)attr);
795 	else
796 		snprintf(pos, size,
797 			 "|%3s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%3s|%2s|%2s|%2s|%2s]",
798 			 attr & EFI_MEMORY_RUNTIME		? "RUN" : "",
799 			 attr & EFI_MEMORY_MORE_RELIABLE	? "MR"  : "",
800 			 attr & EFI_MEMORY_CPU_CRYPTO   	? "CC"  : "",
801 			 attr & EFI_MEMORY_SP			? "SP"  : "",
802 			 attr & EFI_MEMORY_NV			? "NV"  : "",
803 			 attr & EFI_MEMORY_XP			? "XP"  : "",
804 			 attr & EFI_MEMORY_RP			? "RP"  : "",
805 			 attr & EFI_MEMORY_WP			? "WP"  : "",
806 			 attr & EFI_MEMORY_RO			? "RO"  : "",
807 			 attr & EFI_MEMORY_UCE			? "UCE" : "",
808 			 attr & EFI_MEMORY_WB			? "WB"  : "",
809 			 attr & EFI_MEMORY_WT			? "WT"  : "",
810 			 attr & EFI_MEMORY_WC			? "WC"  : "",
811 			 attr & EFI_MEMORY_UC			? "UC"  : "");
812 	return buf;
813 }
814 
815 /*
816  * IA64 has a funky EFI memory map that doesn't work the same way as
817  * other architectures.
818  */
819 #ifndef CONFIG_IA64
820 /*
821  * efi_mem_attributes - lookup memmap attributes for physical address
822  * @phys_addr: the physical address to lookup
823  *
824  * Search in the EFI memory map for the region covering
825  * @phys_addr. Returns the EFI memory attributes if the region
826  * was found in the memory map, 0 otherwise.
827  */
efi_mem_attributes(unsigned long phys_addr)828 u64 efi_mem_attributes(unsigned long phys_addr)
829 {
830 	efi_memory_desc_t *md;
831 
832 	if (!efi_enabled(EFI_MEMMAP))
833 		return 0;
834 
835 	for_each_efi_memory_desc(md) {
836 		if ((md->phys_addr <= phys_addr) &&
837 		    (phys_addr < (md->phys_addr +
838 		    (md->num_pages << EFI_PAGE_SHIFT))))
839 			return md->attribute;
840 	}
841 	return 0;
842 }
843 
844 /*
845  * efi_mem_type - lookup memmap type for physical address
846  * @phys_addr: the physical address to lookup
847  *
848  * Search in the EFI memory map for the region covering @phys_addr.
849  * Returns the EFI memory type if the region was found in the memory
850  * map, -EINVAL otherwise.
851  */
efi_mem_type(unsigned long phys_addr)852 int efi_mem_type(unsigned long phys_addr)
853 {
854 	const efi_memory_desc_t *md;
855 
856 	if (!efi_enabled(EFI_MEMMAP))
857 		return -ENOTSUPP;
858 
859 	for_each_efi_memory_desc(md) {
860 		if ((md->phys_addr <= phys_addr) &&
861 		    (phys_addr < (md->phys_addr +
862 				  (md->num_pages << EFI_PAGE_SHIFT))))
863 			return md->type;
864 	}
865 	return -EINVAL;
866 }
867 #endif
868 
efi_status_to_err(efi_status_t status)869 int efi_status_to_err(efi_status_t status)
870 {
871 	int err;
872 
873 	switch (status) {
874 	case EFI_SUCCESS:
875 		err = 0;
876 		break;
877 	case EFI_INVALID_PARAMETER:
878 		err = -EINVAL;
879 		break;
880 	case EFI_OUT_OF_RESOURCES:
881 		err = -ENOSPC;
882 		break;
883 	case EFI_DEVICE_ERROR:
884 		err = -EIO;
885 		break;
886 	case EFI_WRITE_PROTECTED:
887 		err = -EROFS;
888 		break;
889 	case EFI_SECURITY_VIOLATION:
890 		err = -EACCES;
891 		break;
892 	case EFI_NOT_FOUND:
893 		err = -ENOENT;
894 		break;
895 	case EFI_ABORTED:
896 		err = -EINTR;
897 		break;
898 	default:
899 		err = -EINVAL;
900 	}
901 
902 	return err;
903 }
904 
905 static DEFINE_SPINLOCK(efi_mem_reserve_persistent_lock);
906 static struct linux_efi_memreserve *efi_memreserve_root __ro_after_init;
907 
efi_memreserve_map_root(void)908 static int __init efi_memreserve_map_root(void)
909 {
910 	if (mem_reserve == EFI_INVALID_TABLE_ADDR)
911 		return -ENODEV;
912 
913 	efi_memreserve_root = memremap(mem_reserve,
914 				       sizeof(*efi_memreserve_root),
915 				       MEMREMAP_WB);
916 	if (WARN_ON_ONCE(!efi_memreserve_root))
917 		return -ENOMEM;
918 	return 0;
919 }
920 
efi_mem_reserve_iomem(phys_addr_t addr, u64 size)921 static int efi_mem_reserve_iomem(phys_addr_t addr, u64 size)
922 {
923 	struct resource *res, *parent;
924 	int ret;
925 
926 	res = kzalloc(sizeof(struct resource), GFP_ATOMIC);
927 	if (!res)
928 		return -ENOMEM;
929 
930 	res->name	= "reserved";
931 	res->flags	= IORESOURCE_MEM;
932 	res->start	= addr;
933 	res->end	= addr + size - 1;
934 
935 	/* we expect a conflict with a 'System RAM' region */
936 	parent = request_resource_conflict(&iomem_resource, res);
937 	ret = parent ? request_resource(parent, res) : 0;
938 
939 	/*
940 	 * Given that efi_mem_reserve_iomem() can be called at any
941 	 * time, only call memblock_reserve() if the architecture
942 	 * keeps the infrastructure around.
943 	 */
944 	if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK) && !ret)
945 		memblock_reserve(addr, size);
946 
947 	return ret;
948 }
949 
efi_mem_reserve_persistent(phys_addr_t addr, u64 size)950 int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
951 {
952 	struct linux_efi_memreserve *rsv;
953 	unsigned long prsv;
954 	int rc, index;
955 
956 	if (efi_memreserve_root == (void *)ULONG_MAX)
957 		return -ENODEV;
958 
959 	if (!efi_memreserve_root) {
960 		rc = efi_memreserve_map_root();
961 		if (rc)
962 			return rc;
963 	}
964 
965 	/* first try to find a slot in an existing linked list entry */
966 	for (prsv = efi_memreserve_root->next; prsv; ) {
967 		rsv = memremap(prsv, sizeof(*rsv), MEMREMAP_WB);
968 		if (!rsv)
969 			return -ENOMEM;
970 		index = atomic_fetch_add_unless(&rsv->count, 1, rsv->size);
971 		if (index < rsv->size) {
972 			rsv->entry[index].base = addr;
973 			rsv->entry[index].size = size;
974 
975 			memunmap(rsv);
976 			return efi_mem_reserve_iomem(addr, size);
977 		}
978 		prsv = rsv->next;
979 		memunmap(rsv);
980 	}
981 
982 	/* no slot found - allocate a new linked list entry */
983 	rsv = (struct linux_efi_memreserve *)__get_free_page(GFP_ATOMIC);
984 	if (!rsv)
985 		return -ENOMEM;
986 
987 	rc = efi_mem_reserve_iomem(__pa(rsv), SZ_4K);
988 	if (rc) {
989 		free_page((unsigned long)rsv);
990 		return rc;
991 	}
992 
993 	/*
994 	 * The memremap() call above assumes that a linux_efi_memreserve entry
995 	 * never crosses a page boundary, so let's ensure that this remains true
996 	 * even when kexec'ing a 4k pages kernel from a >4k pages kernel, by
997 	 * using SZ_4K explicitly in the size calculation below.
998 	 */
999 	rsv->size = EFI_MEMRESERVE_COUNT(SZ_4K);
1000 	atomic_set(&rsv->count, 1);
1001 	rsv->entry[0].base = addr;
1002 	rsv->entry[0].size = size;
1003 
1004 	spin_lock(&efi_mem_reserve_persistent_lock);
1005 	rsv->next = efi_memreserve_root->next;
1006 	efi_memreserve_root->next = __pa(rsv);
1007 	spin_unlock(&efi_mem_reserve_persistent_lock);
1008 
1009 	return efi_mem_reserve_iomem(addr, size);
1010 }
1011 
efi_memreserve_root_init(void)1012 static int __init efi_memreserve_root_init(void)
1013 {
1014 	if (efi_memreserve_root)
1015 		return 0;
1016 	if (efi_memreserve_map_root())
1017 		efi_memreserve_root = (void *)ULONG_MAX;
1018 	return 0;
1019 }
1020 early_initcall(efi_memreserve_root_init);
1021 
1022 #ifdef CONFIG_KEXEC
update_efi_random_seed(struct notifier_block *nb, unsigned long code, void *unused)1023 static int update_efi_random_seed(struct notifier_block *nb,
1024 				  unsigned long code, void *unused)
1025 {
1026 	struct linux_efi_random_seed *seed;
1027 	u32 size = 0;
1028 
1029 	if (!kexec_in_progress)
1030 		return NOTIFY_DONE;
1031 
1032 	seed = memremap(efi_rng_seed, sizeof(*seed), MEMREMAP_WB);
1033 	if (seed != NULL) {
1034 		size = min(seed->size, EFI_RANDOM_SEED_SIZE);
1035 		memunmap(seed);
1036 	} else {
1037 		pr_err("Could not map UEFI random seed!\n");
1038 	}
1039 	if (size > 0) {
1040 		seed = memremap(efi_rng_seed, sizeof(*seed) + size,
1041 				MEMREMAP_WB);
1042 		if (seed != NULL) {
1043 			seed->size = size;
1044 			get_random_bytes(seed->bits, seed->size);
1045 			memunmap(seed);
1046 		} else {
1047 			pr_err("Could not map UEFI random seed!\n");
1048 		}
1049 	}
1050 	return NOTIFY_DONE;
1051 }
1052 
1053 static struct notifier_block efi_random_seed_nb = {
1054 	.notifier_call = update_efi_random_seed,
1055 };
1056 
register_update_efi_random_seed(void)1057 static int __init register_update_efi_random_seed(void)
1058 {
1059 	if (efi_rng_seed == EFI_INVALID_TABLE_ADDR)
1060 		return 0;
1061 	return register_reboot_notifier(&efi_random_seed_nb);
1062 }
1063 late_initcall(register_update_efi_random_seed);
1064 #endif
1065