1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * efi.c - EFI subsystem
4 *
5 * Copyright (C) 2001,2003,2004 Dell <Matt_Domsch@dell.com>
6 * Copyright (C) 2004 Intel Corporation <matthew.e.tolentino@intel.com>
7 * Copyright (C) 2013 Tom Gundersen <teg@jklm.no>
8 *
9 * This code registers /sys/firmware/efi{,/efivars} when EFI is supported,
10 * allowing the efivarfs to be mounted or the efivars module to be loaded.
11 * The existance of /sys/firmware/efi may also be used by userspace to
12 * determine that the system supports EFI.
13 */
14
15#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
17#include <linux/kobject.h>
18#include <linux/module.h>
19#include <linux/init.h>
20#include <linux/debugfs.h>
21#include <linux/device.h>
22#include <linux/efi.h>
23#include <linux/of.h>
24#include <linux/initrd.h>
25#include <linux/io.h>
26#include <linux/kexec.h>
27#include <linux/platform_device.h>
28#include <linux/random.h>
29#include <linux/reboot.h>
30#include <linux/slab.h>
31#include <linux/acpi.h>
32#include <linux/ucs2_string.h>
33#include <linux/memblock.h>
34#include <linux/security.h>
35
36#include <asm/early_ioremap.h>
37
38struct efi __read_mostly efi = {
39	.runtime_supported_mask = EFI_RT_SUPPORTED_ALL,
40	.acpi			= EFI_INVALID_TABLE_ADDR,
41	.acpi20			= EFI_INVALID_TABLE_ADDR,
42	.smbios			= EFI_INVALID_TABLE_ADDR,
43	.smbios3		= EFI_INVALID_TABLE_ADDR,
44	.esrt			= EFI_INVALID_TABLE_ADDR,
45	.tpm_log		= EFI_INVALID_TABLE_ADDR,
46	.tpm_final_log		= EFI_INVALID_TABLE_ADDR,
47#ifdef CONFIG_LOAD_UEFI_KEYS
48	.mokvar_table		= EFI_INVALID_TABLE_ADDR,
49#endif
50#ifdef CONFIG_EFI_COCO_SECRET
51	.coco_secret		= EFI_INVALID_TABLE_ADDR,
52#endif
53#ifdef CONFIG_UNACCEPTED_MEMORY
54	.unaccepted		= EFI_INVALID_TABLE_ADDR,
55#endif
56};
57EXPORT_SYMBOL(efi);
58
59unsigned long __ro_after_init efi_rng_seed = EFI_INVALID_TABLE_ADDR;
60static unsigned long __initdata mem_reserve = EFI_INVALID_TABLE_ADDR;
61static unsigned long __initdata rt_prop = EFI_INVALID_TABLE_ADDR;
62static unsigned long __initdata initrd = EFI_INVALID_TABLE_ADDR;
63
64extern unsigned long screen_info_table;
65
66struct mm_struct efi_mm = {
67	.mm_mt			= MTREE_INIT_EXT(mm_mt, MM_MT_FLAGS, efi_mm.mmap_lock),
68	.mm_users		= ATOMIC_INIT(2),
69	.mm_count		= ATOMIC_INIT(1),
70	.write_protect_seq      = SEQCNT_ZERO(efi_mm.write_protect_seq),
71	MMAP_LOCK_INITIALIZER(efi_mm)
72	.page_table_lock	= __SPIN_LOCK_UNLOCKED(efi_mm.page_table_lock),
73	.mmlist			= LIST_HEAD_INIT(efi_mm.mmlist),
74	.cpu_bitmap		= { [BITS_TO_LONGS(NR_CPUS)] = 0},
75};
76
77struct workqueue_struct *efi_rts_wq;
78
79static bool disable_runtime = IS_ENABLED(CONFIG_EFI_DISABLE_RUNTIME);
80static int __init setup_noefi(char *arg)
81{
82	disable_runtime = true;
83	return 0;
84}
85early_param("noefi", setup_noefi);
86
87bool efi_runtime_disabled(void)
88{
89	return disable_runtime;
90}
91
92bool __pure __efi_soft_reserve_enabled(void)
93{
94	return !efi_enabled(EFI_MEM_NO_SOFT_RESERVE);
95}
96
97static int __init parse_efi_cmdline(char *str)
98{
99	if (!str) {
100		pr_warn("need at least one option\n");
101		return -EINVAL;
102	}
103
104	if (parse_option_str(str, "debug"))
105		set_bit(EFI_DBG, &efi.flags);
106
107	if (parse_option_str(str, "noruntime"))
108		disable_runtime = true;
109
110	if (parse_option_str(str, "runtime"))
111		disable_runtime = false;
112
113	if (parse_option_str(str, "nosoftreserve"))
114		set_bit(EFI_MEM_NO_SOFT_RESERVE, &efi.flags);
115
116	return 0;
117}
118early_param("efi", parse_efi_cmdline);
119
120struct kobject *efi_kobj;
121
122/*
123 * Let's not leave out systab information that snuck into
124 * the efivars driver
125 * Note, do not add more fields in systab sysfs file as it breaks sysfs
126 * one value per file rule!
127 */
128static ssize_t systab_show(struct kobject *kobj,
129			   struct kobj_attribute *attr, char *buf)
130{
131	char *str = buf;
132
133	if (!kobj || !buf)
134		return -EINVAL;
135
136	if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
137		str += sprintf(str, "ACPI20=0x%lx\n", efi.acpi20);
138	if (efi.acpi != EFI_INVALID_TABLE_ADDR)
139		str += sprintf(str, "ACPI=0x%lx\n", efi.acpi);
140	/*
141	 * If both SMBIOS and SMBIOS3 entry points are implemented, the
142	 * SMBIOS3 entry point shall be preferred, so we list it first to
143	 * let applications stop parsing after the first match.
144	 */
145	if (efi.smbios3 != EFI_INVALID_TABLE_ADDR)
146		str += sprintf(str, "SMBIOS3=0x%lx\n", efi.smbios3);
147	if (efi.smbios != EFI_INVALID_TABLE_ADDR)
148		str += sprintf(str, "SMBIOS=0x%lx\n", efi.smbios);
149
150	if (IS_ENABLED(CONFIG_IA64) || IS_ENABLED(CONFIG_X86))
151		str = efi_systab_show_arch(str);
152
153	return str - buf;
154}
155
156static struct kobj_attribute efi_attr_systab = __ATTR_RO_MODE(systab, 0400);
157
158static ssize_t fw_platform_size_show(struct kobject *kobj,
159				     struct kobj_attribute *attr, char *buf)
160{
161	return sprintf(buf, "%d\n", efi_enabled(EFI_64BIT) ? 64 : 32);
162}
163
164extern __weak struct kobj_attribute efi_attr_fw_vendor;
165extern __weak struct kobj_attribute efi_attr_runtime;
166extern __weak struct kobj_attribute efi_attr_config_table;
167static struct kobj_attribute efi_attr_fw_platform_size =
168	__ATTR_RO(fw_platform_size);
169
170static struct attribute *efi_subsys_attrs[] = {
171	&efi_attr_systab.attr,
172	&efi_attr_fw_platform_size.attr,
173	&efi_attr_fw_vendor.attr,
174	&efi_attr_runtime.attr,
175	&efi_attr_config_table.attr,
176	NULL,
177};
178
179umode_t __weak efi_attr_is_visible(struct kobject *kobj, struct attribute *attr,
180				   int n)
181{
182	return attr->mode;
183}
184
185static const struct attribute_group efi_subsys_attr_group = {
186	.attrs = efi_subsys_attrs,
187	.is_visible = efi_attr_is_visible,
188};
189
190static struct efivars generic_efivars;
191static struct efivar_operations generic_ops;
192
193static bool generic_ops_supported(void)
194{
195	unsigned long name_size;
196	efi_status_t status;
197	efi_char16_t name;
198	efi_guid_t guid;
199
200	name_size = sizeof(name);
201
202	status = efi.get_next_variable(&name_size, &name, &guid);
203	if (status == EFI_UNSUPPORTED)
204		return false;
205
206	return true;
207}
208
209static int generic_ops_register(void)
210{
211	if (!generic_ops_supported())
212		return 0;
213
214	generic_ops.get_variable = efi.get_variable;
215	generic_ops.get_next_variable = efi.get_next_variable;
216	generic_ops.query_variable_store = efi_query_variable_store;
217	generic_ops.query_variable_info = efi.query_variable_info;
218
219	if (efi_rt_services_supported(EFI_RT_SUPPORTED_SET_VARIABLE)) {
220		generic_ops.set_variable = efi.set_variable;
221		generic_ops.set_variable_nonblocking = efi.set_variable_nonblocking;
222	}
223	return efivars_register(&generic_efivars, &generic_ops);
224}
225
226static void generic_ops_unregister(void)
227{
228	if (!generic_ops.get_variable)
229		return;
230
231	efivars_unregister(&generic_efivars);
232}
233
234#ifdef CONFIG_EFI_CUSTOM_SSDT_OVERLAYS
235#define EFIVAR_SSDT_NAME_MAX	16UL
236static char efivar_ssdt[EFIVAR_SSDT_NAME_MAX] __initdata;
237static int __init efivar_ssdt_setup(char *str)
238{
239	int ret = security_locked_down(LOCKDOWN_ACPI_TABLES);
240
241	if (ret)
242		return ret;
243
244	if (strlen(str) < sizeof(efivar_ssdt))
245		memcpy(efivar_ssdt, str, strlen(str));
246	else
247		pr_warn("efivar_ssdt: name too long: %s\n", str);
248	return 1;
249}
250__setup("efivar_ssdt=", efivar_ssdt_setup);
251
252static __init int efivar_ssdt_load(void)
253{
254	unsigned long name_size = 256;
255	efi_char16_t *name = NULL;
256	efi_status_t status;
257	efi_guid_t guid;
258
259	if (!efivar_ssdt[0])
260		return 0;
261
262	name = kzalloc(name_size, GFP_KERNEL);
263	if (!name)
264		return -ENOMEM;
265
266	for (;;) {
267		char utf8_name[EFIVAR_SSDT_NAME_MAX];
268		unsigned long data_size = 0;
269		void *data;
270		int limit;
271
272		status = efi.get_next_variable(&name_size, name, &guid);
273		if (status == EFI_NOT_FOUND) {
274			break;
275		} else if (status == EFI_BUFFER_TOO_SMALL) {
276			efi_char16_t *name_tmp =
277				krealloc(name, name_size, GFP_KERNEL);
278			if (!name_tmp) {
279				kfree(name);
280				return -ENOMEM;
281			}
282			name = name_tmp;
283			continue;
284		}
285
286		limit = min(EFIVAR_SSDT_NAME_MAX, name_size);
287		ucs2_as_utf8(utf8_name, name, limit - 1);
288		if (strncmp(utf8_name, efivar_ssdt, limit) != 0)
289			continue;
290
291		pr_info("loading SSDT from variable %s-%pUl\n", efivar_ssdt, &guid);
292
293		status = efi.get_variable(name, &guid, NULL, &data_size, NULL);
294		if (status != EFI_BUFFER_TOO_SMALL || !data_size)
295			return -EIO;
296
297		data = kmalloc(data_size, GFP_KERNEL);
298		if (!data)
299			return -ENOMEM;
300
301		status = efi.get_variable(name, &guid, NULL, &data_size, data);
302		if (status == EFI_SUCCESS) {
303			acpi_status ret = acpi_load_table(data, NULL);
304			if (ret)
305				pr_err("failed to load table: %u\n", ret);
306			else
307				continue;
308		} else {
309			pr_err("failed to get var data: 0x%lx\n", status);
310		}
311		kfree(data);
312	}
313	return 0;
314}
315#else
316static inline int efivar_ssdt_load(void) { return 0; }
317#endif
318
319#ifdef CONFIG_DEBUG_FS
320
321#define EFI_DEBUGFS_MAX_BLOBS 32
322
323static struct debugfs_blob_wrapper debugfs_blob[EFI_DEBUGFS_MAX_BLOBS];
324
325static void __init efi_debugfs_init(void)
326{
327	struct dentry *efi_debugfs;
328	efi_memory_desc_t *md;
329	char name[32];
330	int type_count[EFI_BOOT_SERVICES_DATA + 1] = {};
331	int i = 0;
332
333	efi_debugfs = debugfs_create_dir("efi", NULL);
334	if (IS_ERR_OR_NULL(efi_debugfs))
335		return;
336
337	for_each_efi_memory_desc(md) {
338		switch (md->type) {
339		case EFI_BOOT_SERVICES_CODE:
340			snprintf(name, sizeof(name), "boot_services_code%d",
341				 type_count[md->type]++);
342			break;
343		case EFI_BOOT_SERVICES_DATA:
344			snprintf(name, sizeof(name), "boot_services_data%d",
345				 type_count[md->type]++);
346			break;
347		default:
348			continue;
349		}
350
351		if (i >= EFI_DEBUGFS_MAX_BLOBS) {
352			pr_warn("More then %d EFI boot service segments, only showing first %d in debugfs\n",
353				EFI_DEBUGFS_MAX_BLOBS, EFI_DEBUGFS_MAX_BLOBS);
354			break;
355		}
356
357		debugfs_blob[i].size = md->num_pages << EFI_PAGE_SHIFT;
358		debugfs_blob[i].data = memremap(md->phys_addr,
359						debugfs_blob[i].size,
360						MEMREMAP_WB);
361		if (!debugfs_blob[i].data)
362			continue;
363
364		debugfs_create_blob(name, 0400, efi_debugfs, &debugfs_blob[i]);
365		i++;
366	}
367}
368#else
369static inline void efi_debugfs_init(void) {}
370#endif
371
372/*
373 * We register the efi subsystem with the firmware subsystem and the
374 * efivars subsystem with the efi subsystem, if the system was booted with
375 * EFI.
376 */
377static int __init efisubsys_init(void)
378{
379	int error;
380
381	if (!efi_enabled(EFI_RUNTIME_SERVICES))
382		efi.runtime_supported_mask = 0;
383
384	if (!efi_enabled(EFI_BOOT))
385		return 0;
386
387	if (efi.runtime_supported_mask) {
388		/*
389		 * Since we process only one efi_runtime_service() at a time, an
390		 * ordered workqueue (which creates only one execution context)
391		 * should suffice for all our needs.
392		 */
393		efi_rts_wq = alloc_ordered_workqueue("efi_rts_wq", 0);
394		if (!efi_rts_wq) {
395			pr_err("Creating efi_rts_wq failed, EFI runtime services disabled.\n");
396			clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
397			efi.runtime_supported_mask = 0;
398			return 0;
399		}
400	}
401
402	if (efi_rt_services_supported(EFI_RT_SUPPORTED_TIME_SERVICES))
403		platform_device_register_simple("rtc-efi", 0, NULL, 0);
404
405	/* We register the efi directory at /sys/firmware/efi */
406	efi_kobj = kobject_create_and_add("efi", firmware_kobj);
407	if (!efi_kobj) {
408		pr_err("efi: Firmware registration failed.\n");
409		error = -ENOMEM;
410		goto err_destroy_wq;
411	}
412
413	if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE |
414				      EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME)) {
415		error = generic_ops_register();
416		if (error)
417			goto err_put;
418		efivar_ssdt_load();
419		platform_device_register_simple("efivars", 0, NULL, 0);
420	}
421
422	error = sysfs_create_group(efi_kobj, &efi_subsys_attr_group);
423	if (error) {
424		pr_err("efi: Sysfs attribute export failed with error %d.\n",
425		       error);
426		goto err_unregister;
427	}
428
429	/* and the standard mountpoint for efivarfs */
430	error = sysfs_create_mount_point(efi_kobj, "efivars");
431	if (error) {
432		pr_err("efivars: Subsystem registration failed.\n");
433		goto err_remove_group;
434	}
435
436	if (efi_enabled(EFI_DBG) && efi_enabled(EFI_PRESERVE_BS_REGIONS))
437		efi_debugfs_init();
438
439#ifdef CONFIG_EFI_COCO_SECRET
440	if (efi.coco_secret != EFI_INVALID_TABLE_ADDR)
441		platform_device_register_simple("efi_secret", 0, NULL, 0);
442#endif
443
444	return 0;
445
446err_remove_group:
447	sysfs_remove_group(efi_kobj, &efi_subsys_attr_group);
448err_unregister:
449	if (efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE |
450				      EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME))
451		generic_ops_unregister();
452err_put:
453	kobject_put(efi_kobj);
454	efi_kobj = NULL;
455err_destroy_wq:
456	if (efi_rts_wq)
457		destroy_workqueue(efi_rts_wq);
458
459	return error;
460}
461
462subsys_initcall(efisubsys_init);
463
464void __init efi_find_mirror(void)
465{
466	efi_memory_desc_t *md;
467	u64 mirror_size = 0, total_size = 0;
468
469	if (!efi_enabled(EFI_MEMMAP))
470		return;
471
472	for_each_efi_memory_desc(md) {
473		unsigned long long start = md->phys_addr;
474		unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;
475
476		total_size += size;
477		if (md->attribute & EFI_MEMORY_MORE_RELIABLE) {
478			memblock_mark_mirror(start, size);
479			mirror_size += size;
480		}
481	}
482	if (mirror_size)
483		pr_info("Memory: %lldM/%lldM mirrored memory\n",
484			mirror_size>>20, total_size>>20);
485}
486
487/*
488 * Find the efi memory descriptor for a given physical address.  Given a
489 * physical address, determine if it exists within an EFI Memory Map entry,
490 * and if so, populate the supplied memory descriptor with the appropriate
491 * data.
492 */
493int __efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
494{
495	efi_memory_desc_t *md;
496
497	if (!efi_enabled(EFI_MEMMAP)) {
498		pr_err_once("EFI_MEMMAP is not enabled.\n");
499		return -EINVAL;
500	}
501
502	if (!out_md) {
503		pr_err_once("out_md is null.\n");
504		return -EINVAL;
505        }
506
507	for_each_efi_memory_desc(md) {
508		u64 size;
509		u64 end;
510
511		/* skip bogus entries (including empty ones) */
512		if ((md->phys_addr & (EFI_PAGE_SIZE - 1)) ||
513		    (md->num_pages <= 0) ||
514		    (md->num_pages > (U64_MAX - md->phys_addr) >> EFI_PAGE_SHIFT))
515			continue;
516
517		size = md->num_pages << EFI_PAGE_SHIFT;
518		end = md->phys_addr + size;
519		if (phys_addr >= md->phys_addr && phys_addr < end) {
520			memcpy(out_md, md, sizeof(*out_md));
521			return 0;
522		}
523	}
524	return -ENOENT;
525}
526
527extern int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
528	__weak __alias(__efi_mem_desc_lookup);
529
530/*
531 * Calculate the highest address of an efi memory descriptor.
532 */
533u64 __init efi_mem_desc_end(efi_memory_desc_t *md)
534{
535	u64 size = md->num_pages << EFI_PAGE_SHIFT;
536	u64 end = md->phys_addr + size;
537	return end;
538}
539
540void __init __weak efi_arch_mem_reserve(phys_addr_t addr, u64 size) {}
541
542/**
543 * efi_mem_reserve - Reserve an EFI memory region
544 * @addr: Physical address to reserve
545 * @size: Size of reservation
546 *
547 * Mark a region as reserved from general kernel allocation and
548 * prevent it being released by efi_free_boot_services().
549 *
550 * This function should be called drivers once they've parsed EFI
551 * configuration tables to figure out where their data lives, e.g.
552 * efi_esrt_init().
553 */
554void __init efi_mem_reserve(phys_addr_t addr, u64 size)
555{
556	/* efi_mem_reserve() does not work under Xen */
557	if (WARN_ON_ONCE(efi_enabled(EFI_PARAVIRT)))
558		return;
559
560	if (!memblock_is_region_reserved(addr, size))
561		memblock_reserve(addr, size);
562
563	/*
564	 * Some architectures (x86) reserve all boot services ranges
565	 * until efi_free_boot_services() because of buggy firmware
566	 * implementations. This means the above memblock_reserve() is
567	 * superfluous on x86 and instead what it needs to do is
568	 * ensure the @start, @size is not freed.
569	 */
570	efi_arch_mem_reserve(addr, size);
571}
572
573static const efi_config_table_type_t common_tables[] __initconst = {
574	{ACPI_20_TABLE_GUID,			&efi.acpi20,		"ACPI 2.0"	},
575	{ACPI_TABLE_GUID,			&efi.acpi,		"ACPI"		},
576	{SMBIOS_TABLE_GUID,			&efi.smbios,		"SMBIOS"	},
577	{SMBIOS3_TABLE_GUID,			&efi.smbios3,		"SMBIOS 3.0"	},
578	{EFI_SYSTEM_RESOURCE_TABLE_GUID,	&efi.esrt,		"ESRT"		},
579	{EFI_MEMORY_ATTRIBUTES_TABLE_GUID,	&efi_mem_attr_table,	"MEMATTR"	},
580	{LINUX_EFI_RANDOM_SEED_TABLE_GUID,	&efi_rng_seed,		"RNG"		},
581	{LINUX_EFI_TPM_EVENT_LOG_GUID,		&efi.tpm_log,		"TPMEventLog"	},
582	{LINUX_EFI_TPM_FINAL_LOG_GUID,		&efi.tpm_final_log,	"TPMFinalLog"	},
583	{LINUX_EFI_MEMRESERVE_TABLE_GUID,	&mem_reserve,		"MEMRESERVE"	},
584	{LINUX_EFI_INITRD_MEDIA_GUID,		&initrd,		"INITRD"	},
585	{EFI_RT_PROPERTIES_TABLE_GUID,		&rt_prop,		"RTPROP"	},
586#ifdef CONFIG_EFI_RCI2_TABLE
587	{DELLEMC_EFI_RCI2_TABLE_GUID,		&rci2_table_phys			},
588#endif
589#ifdef CONFIG_LOAD_UEFI_KEYS
590	{LINUX_EFI_MOK_VARIABLE_TABLE_GUID,	&efi.mokvar_table,	"MOKvar"	},
591#endif
592#ifdef CONFIG_EFI_COCO_SECRET
593	{LINUX_EFI_COCO_SECRET_AREA_GUID,	&efi.coco_secret,	"CocoSecret"	},
594#endif
595#ifdef CONFIG_UNACCEPTED_MEMORY
596	{LINUX_EFI_UNACCEPTED_MEM_TABLE_GUID,	&efi.unaccepted,	"Unaccepted"	},
597#endif
598#ifdef CONFIG_EFI_GENERIC_STUB
599	{LINUX_EFI_SCREEN_INFO_TABLE_GUID,	&screen_info_table			},
600#endif
601	{},
602};
603
604static __init int match_config_table(const efi_guid_t *guid,
605				     unsigned long table,
606				     const efi_config_table_type_t *table_types)
607{
608	int i;
609
610	for (i = 0; efi_guidcmp(table_types[i].guid, NULL_GUID); i++) {
611		if (efi_guidcmp(*guid, table_types[i].guid))
612			continue;
613
614		if (!efi_config_table_is_usable(guid, table)) {
615			if (table_types[i].name[0])
616				pr_cont("(%s=0x%lx unusable) ",
617					table_types[i].name, table);
618			return 1;
619		}
620
621		*(table_types[i].ptr) = table;
622		if (table_types[i].name[0])
623			pr_cont("%s=0x%lx ", table_types[i].name, table);
624		return 1;
625	}
626
627	return 0;
628}
629
630/**
631 * reserve_unaccepted - Map and reserve unaccepted configuration table
632 * @unaccepted: Pointer to unaccepted memory table
633 *
634 * memblock_add() makes sure that the table is mapped in direct mapping. During
635 * normal boot it happens automatically because the table is allocated from
636 * usable memory. But during crashkernel boot only memory specifically reserved
637 * for crash scenario is mapped. memblock_add() forces the table to be mapped
638 * in crashkernel case.
639 *
640 * Align the range to the nearest page borders. Ranges smaller than page size
641 * are not going to be mapped.
642 *
643 * memblock_reserve() makes sure that future allocations will not touch the
644 * table.
645 */
646
647static __init void reserve_unaccepted(struct efi_unaccepted_memory *unaccepted)
648{
649	phys_addr_t start, size;
650
651	start = PAGE_ALIGN_DOWN(efi.unaccepted);
652	size = PAGE_ALIGN(sizeof(*unaccepted) + unaccepted->size);
653
654	memblock_add(start, size);
655	memblock_reserve(start, size);
656}
657
658int __init efi_config_parse_tables(const efi_config_table_t *config_tables,
659				   int count,
660				   const efi_config_table_type_t *arch_tables)
661{
662	const efi_config_table_64_t *tbl64 = (void *)config_tables;
663	const efi_config_table_32_t *tbl32 = (void *)config_tables;
664	const efi_guid_t *guid;
665	unsigned long table;
666	int i;
667
668	pr_info("");
669	for (i = 0; i < count; i++) {
670		if (!IS_ENABLED(CONFIG_X86)) {
671			guid = &config_tables[i].guid;
672			table = (unsigned long)config_tables[i].table;
673		} else if (efi_enabled(EFI_64BIT)) {
674			guid = &tbl64[i].guid;
675			table = tbl64[i].table;
676
677			if (IS_ENABLED(CONFIG_X86_32) &&
678			    tbl64[i].table > U32_MAX) {
679				pr_cont("\n");
680				pr_err("Table located above 4GB, disabling EFI.\n");
681				return -EINVAL;
682			}
683		} else {
684			guid = &tbl32[i].guid;
685			table = tbl32[i].table;
686		}
687
688		if (!match_config_table(guid, table, common_tables) && arch_tables)
689			match_config_table(guid, table, arch_tables);
690	}
691	pr_cont("\n");
692	set_bit(EFI_CONFIG_TABLES, &efi.flags);
693
694	if (efi_rng_seed != EFI_INVALID_TABLE_ADDR) {
695		struct linux_efi_random_seed *seed;
696		u32 size = 0;
697
698		seed = early_memremap(efi_rng_seed, sizeof(*seed));
699		if (seed != NULL) {
700			size = min_t(u32, seed->size, SZ_1K); // sanity check
701			early_memunmap(seed, sizeof(*seed));
702		} else {
703			pr_err("Could not map UEFI random seed!\n");
704		}
705		if (size > 0) {
706			seed = early_memremap(efi_rng_seed,
707					      sizeof(*seed) + size);
708			if (seed != NULL) {
709				add_bootloader_randomness(seed->bits, size);
710				memzero_explicit(seed->bits, size);
711				early_memunmap(seed, sizeof(*seed) + size);
712			} else {
713				pr_err("Could not map UEFI random seed!\n");
714			}
715		}
716	}
717
718	if (!IS_ENABLED(CONFIG_X86_32) && efi_enabled(EFI_MEMMAP))
719		efi_memattr_init();
720
721	efi_tpm_eventlog_init();
722
723	if (mem_reserve != EFI_INVALID_TABLE_ADDR) {
724		unsigned long prsv = mem_reserve;
725
726		while (prsv) {
727			struct linux_efi_memreserve *rsv;
728			u8 *p;
729
730			/*
731			 * Just map a full page: that is what we will get
732			 * anyway, and it permits us to map the entire entry
733			 * before knowing its size.
734			 */
735			p = early_memremap(ALIGN_DOWN(prsv, PAGE_SIZE),
736					   PAGE_SIZE);
737			if (p == NULL) {
738				pr_err("Could not map UEFI memreserve entry!\n");
739				return -ENOMEM;
740			}
741
742			rsv = (void *)(p + prsv % PAGE_SIZE);
743
744			/* reserve the entry itself */
745			memblock_reserve(prsv,
746					 struct_size(rsv, entry, rsv->size));
747
748			for (i = 0; i < atomic_read(&rsv->count); i++) {
749				memblock_reserve(rsv->entry[i].base,
750						 rsv->entry[i].size);
751			}
752
753			prsv = rsv->next;
754			early_memunmap(p, PAGE_SIZE);
755		}
756	}
757
758	if (rt_prop != EFI_INVALID_TABLE_ADDR) {
759		efi_rt_properties_table_t *tbl;
760
761		tbl = early_memremap(rt_prop, sizeof(*tbl));
762		if (tbl) {
763			efi.runtime_supported_mask &= tbl->runtime_services_supported;
764			early_memunmap(tbl, sizeof(*tbl));
765		}
766	}
767
768	if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) &&
769	    initrd != EFI_INVALID_TABLE_ADDR && phys_initrd_size == 0) {
770		struct linux_efi_initrd *tbl;
771
772		tbl = early_memremap(initrd, sizeof(*tbl));
773		if (tbl) {
774			phys_initrd_start = tbl->base;
775			phys_initrd_size = tbl->size;
776			early_memunmap(tbl, sizeof(*tbl));
777		}
778	}
779
780	if (IS_ENABLED(CONFIG_UNACCEPTED_MEMORY) &&
781	    efi.unaccepted != EFI_INVALID_TABLE_ADDR) {
782		struct efi_unaccepted_memory *unaccepted;
783
784		unaccepted = early_memremap(efi.unaccepted, sizeof(*unaccepted));
785		if (unaccepted) {
786
787			if (unaccepted->version == 1) {
788				reserve_unaccepted(unaccepted);
789			} else {
790				efi.unaccepted = EFI_INVALID_TABLE_ADDR;
791			}
792
793			early_memunmap(unaccepted, sizeof(*unaccepted));
794		}
795	}
796
797	return 0;
798}
799
800int __init efi_systab_check_header(const efi_table_hdr_t *systab_hdr)
801{
802	if (systab_hdr->signature != EFI_SYSTEM_TABLE_SIGNATURE) {
803		pr_err("System table signature incorrect!\n");
804		return -EINVAL;
805	}
806
807	return 0;
808}
809
810#ifndef CONFIG_IA64
811static const efi_char16_t *__init map_fw_vendor(unsigned long fw_vendor,
812						size_t size)
813{
814	const efi_char16_t *ret;
815
816	ret = early_memremap_ro(fw_vendor, size);
817	if (!ret)
818		pr_err("Could not map the firmware vendor!\n");
819	return ret;
820}
821
822static void __init unmap_fw_vendor(const void *fw_vendor, size_t size)
823{
824	early_memunmap((void *)fw_vendor, size);
825}
826#else
827#define map_fw_vendor(p, s)	__va(p)
828#define unmap_fw_vendor(v, s)
829#endif
830
831void __init efi_systab_report_header(const efi_table_hdr_t *systab_hdr,
832				     unsigned long fw_vendor)
833{
834	char vendor[100] = "unknown";
835	const efi_char16_t *c16;
836	size_t i;
837	u16 rev;
838
839	c16 = map_fw_vendor(fw_vendor, sizeof(vendor) * sizeof(efi_char16_t));
840	if (c16) {
841		for (i = 0; i < sizeof(vendor) - 1 && c16[i]; ++i)
842			vendor[i] = c16[i];
843		vendor[i] = '\0';
844
845		unmap_fw_vendor(c16, sizeof(vendor) * sizeof(efi_char16_t));
846	}
847
848	rev = (u16)systab_hdr->revision;
849	pr_info("EFI v%u.%u", systab_hdr->revision >> 16, rev / 10);
850
851	rev %= 10;
852	if (rev)
853		pr_cont(".%u", rev);
854
855	pr_cont(" by %s\n", vendor);
856
857	if (IS_ENABLED(CONFIG_X86_64) &&
858	    systab_hdr->revision > EFI_1_10_SYSTEM_TABLE_REVISION &&
859	    !strcmp(vendor, "Apple")) {
860		pr_info("Apple Mac detected, using EFI v1.10 runtime services only\n");
861		efi.runtime_version = EFI_1_10_SYSTEM_TABLE_REVISION;
862	}
863}
864
865static __initdata char memory_type_name[][13] = {
866	"Reserved",
867	"Loader Code",
868	"Loader Data",
869	"Boot Code",
870	"Boot Data",
871	"Runtime Code",
872	"Runtime Data",
873	"Conventional",
874	"Unusable",
875	"ACPI Reclaim",
876	"ACPI Mem NVS",
877	"MMIO",
878	"MMIO Port",
879	"PAL Code",
880	"Persistent",
881	"Unaccepted",
882};
883
884char * __init efi_md_typeattr_format(char *buf, size_t size,
885				     const efi_memory_desc_t *md)
886{
887	char *pos;
888	int type_len;
889	u64 attr;
890
891	pos = buf;
892	if (md->type >= ARRAY_SIZE(memory_type_name))
893		type_len = snprintf(pos, size, "[type=%u", md->type);
894	else
895		type_len = snprintf(pos, size, "[%-*s",
896				    (int)(sizeof(memory_type_name[0]) - 1),
897				    memory_type_name[md->type]);
898	if (type_len >= size)
899		return buf;
900
901	pos += type_len;
902	size -= type_len;
903
904	attr = md->attribute;
905	if (attr & ~(EFI_MEMORY_UC | EFI_MEMORY_WC | EFI_MEMORY_WT |
906		     EFI_MEMORY_WB | EFI_MEMORY_UCE | EFI_MEMORY_RO |
907		     EFI_MEMORY_WP | EFI_MEMORY_RP | EFI_MEMORY_XP |
908		     EFI_MEMORY_NV | EFI_MEMORY_SP | EFI_MEMORY_CPU_CRYPTO |
909		     EFI_MEMORY_RUNTIME | EFI_MEMORY_MORE_RELIABLE))
910		snprintf(pos, size, "|attr=0x%016llx]",
911			 (unsigned long long)attr);
912	else
913		snprintf(pos, size,
914			 "|%3s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%2s|%3s|%2s|%2s|%2s|%2s]",
915			 attr & EFI_MEMORY_RUNTIME		? "RUN" : "",
916			 attr & EFI_MEMORY_MORE_RELIABLE	? "MR"  : "",
917			 attr & EFI_MEMORY_CPU_CRYPTO   	? "CC"  : "",
918			 attr & EFI_MEMORY_SP			? "SP"  : "",
919			 attr & EFI_MEMORY_NV			? "NV"  : "",
920			 attr & EFI_MEMORY_XP			? "XP"  : "",
921			 attr & EFI_MEMORY_RP			? "RP"  : "",
922			 attr & EFI_MEMORY_WP			? "WP"  : "",
923			 attr & EFI_MEMORY_RO			? "RO"  : "",
924			 attr & EFI_MEMORY_UCE			? "UCE" : "",
925			 attr & EFI_MEMORY_WB			? "WB"  : "",
926			 attr & EFI_MEMORY_WT			? "WT"  : "",
927			 attr & EFI_MEMORY_WC			? "WC"  : "",
928			 attr & EFI_MEMORY_UC			? "UC"  : "");
929	return buf;
930}
931
932/*
933 * IA64 has a funky EFI memory map that doesn't work the same way as
934 * other architectures.
935 */
936#ifndef CONFIG_IA64
937/*
938 * efi_mem_attributes - lookup memmap attributes for physical address
939 * @phys_addr: the physical address to lookup
940 *
941 * Search in the EFI memory map for the region covering
942 * @phys_addr. Returns the EFI memory attributes if the region
943 * was found in the memory map, 0 otherwise.
944 */
945u64 efi_mem_attributes(unsigned long phys_addr)
946{
947	efi_memory_desc_t *md;
948
949	if (!efi_enabled(EFI_MEMMAP))
950		return 0;
951
952	for_each_efi_memory_desc(md) {
953		if ((md->phys_addr <= phys_addr) &&
954		    (phys_addr < (md->phys_addr +
955		    (md->num_pages << EFI_PAGE_SHIFT))))
956			return md->attribute;
957	}
958	return 0;
959}
960
961/*
962 * efi_mem_type - lookup memmap type for physical address
963 * @phys_addr: the physical address to lookup
964 *
965 * Search in the EFI memory map for the region covering @phys_addr.
966 * Returns the EFI memory type if the region was found in the memory
967 * map, -EINVAL otherwise.
968 */
969int efi_mem_type(unsigned long phys_addr)
970{
971	const efi_memory_desc_t *md;
972
973	if (!efi_enabled(EFI_MEMMAP))
974		return -ENOTSUPP;
975
976	for_each_efi_memory_desc(md) {
977		if ((md->phys_addr <= phys_addr) &&
978		    (phys_addr < (md->phys_addr +
979				  (md->num_pages << EFI_PAGE_SHIFT))))
980			return md->type;
981	}
982	return -EINVAL;
983}
984#endif
985
986int efi_status_to_err(efi_status_t status)
987{
988	int err;
989
990	switch (status) {
991	case EFI_SUCCESS:
992		err = 0;
993		break;
994	case EFI_INVALID_PARAMETER:
995		err = -EINVAL;
996		break;
997	case EFI_OUT_OF_RESOURCES:
998		err = -ENOSPC;
999		break;
1000	case EFI_DEVICE_ERROR:
1001		err = -EIO;
1002		break;
1003	case EFI_WRITE_PROTECTED:
1004		err = -EROFS;
1005		break;
1006	case EFI_SECURITY_VIOLATION:
1007		err = -EACCES;
1008		break;
1009	case EFI_NOT_FOUND:
1010		err = -ENOENT;
1011		break;
1012	case EFI_ABORTED:
1013		err = -EINTR;
1014		break;
1015	default:
1016		err = -EINVAL;
1017	}
1018
1019	return err;
1020}
1021EXPORT_SYMBOL_GPL(efi_status_to_err);
1022
1023static DEFINE_SPINLOCK(efi_mem_reserve_persistent_lock);
1024static struct linux_efi_memreserve *efi_memreserve_root __ro_after_init;
1025
1026static int __init efi_memreserve_map_root(void)
1027{
1028	if (mem_reserve == EFI_INVALID_TABLE_ADDR)
1029		return -ENODEV;
1030
1031	efi_memreserve_root = memremap(mem_reserve,
1032				       sizeof(*efi_memreserve_root),
1033				       MEMREMAP_WB);
1034	if (WARN_ON_ONCE(!efi_memreserve_root))
1035		return -ENOMEM;
1036	return 0;
1037}
1038
1039static int efi_mem_reserve_iomem(phys_addr_t addr, u64 size)
1040{
1041	struct resource *res, *parent;
1042	int ret;
1043
1044	res = kzalloc(sizeof(struct resource), GFP_ATOMIC);
1045	if (!res)
1046		return -ENOMEM;
1047
1048	res->name	= "reserved";
1049	res->flags	= IORESOURCE_MEM;
1050	res->start	= addr;
1051	res->end	= addr + size - 1;
1052
1053	/* we expect a conflict with a 'System RAM' region */
1054	parent = request_resource_conflict(&iomem_resource, res);
1055	ret = parent ? request_resource(parent, res) : 0;
1056
1057	/*
1058	 * Given that efi_mem_reserve_iomem() can be called at any
1059	 * time, only call memblock_reserve() if the architecture
1060	 * keeps the infrastructure around.
1061	 */
1062	if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK) && !ret)
1063		memblock_reserve(addr, size);
1064
1065	return ret;
1066}
1067
1068int __ref efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
1069{
1070	struct linux_efi_memreserve *rsv;
1071	unsigned long prsv;
1072	int rc, index;
1073
1074	if (efi_memreserve_root == (void *)ULONG_MAX)
1075		return -ENODEV;
1076
1077	if (!efi_memreserve_root) {
1078		rc = efi_memreserve_map_root();
1079		if (rc)
1080			return rc;
1081	}
1082
1083	/* first try to find a slot in an existing linked list entry */
1084	for (prsv = efi_memreserve_root->next; prsv; ) {
1085		rsv = memremap(prsv, sizeof(*rsv), MEMREMAP_WB);
1086		if (!rsv)
1087			return -ENOMEM;
1088		index = atomic_fetch_add_unless(&rsv->count, 1, rsv->size);
1089		if (index < rsv->size) {
1090			rsv->entry[index].base = addr;
1091			rsv->entry[index].size = size;
1092
1093			memunmap(rsv);
1094			return efi_mem_reserve_iomem(addr, size);
1095		}
1096		prsv = rsv->next;
1097		memunmap(rsv);
1098	}
1099
1100	/* no slot found - allocate a new linked list entry */
1101	rsv = (struct linux_efi_memreserve *)__get_free_page(GFP_ATOMIC);
1102	if (!rsv)
1103		return -ENOMEM;
1104
1105	rc = efi_mem_reserve_iomem(__pa(rsv), SZ_4K);
1106	if (rc) {
1107		free_page((unsigned long)rsv);
1108		return rc;
1109	}
1110
1111	/*
1112	 * The memremap() call above assumes that a linux_efi_memreserve entry
1113	 * never crosses a page boundary, so let's ensure that this remains true
1114	 * even when kexec'ing a 4k pages kernel from a >4k pages kernel, by
1115	 * using SZ_4K explicitly in the size calculation below.
1116	 */
1117	rsv->size = EFI_MEMRESERVE_COUNT(SZ_4K);
1118	atomic_set(&rsv->count, 1);
1119	rsv->entry[0].base = addr;
1120	rsv->entry[0].size = size;
1121
1122	spin_lock(&efi_mem_reserve_persistent_lock);
1123	rsv->next = efi_memreserve_root->next;
1124	efi_memreserve_root->next = __pa(rsv);
1125	spin_unlock(&efi_mem_reserve_persistent_lock);
1126
1127	return efi_mem_reserve_iomem(addr, size);
1128}
1129
1130static int __init efi_memreserve_root_init(void)
1131{
1132	if (efi_memreserve_root)
1133		return 0;
1134	if (efi_memreserve_map_root())
1135		efi_memreserve_root = (void *)ULONG_MAX;
1136	return 0;
1137}
1138early_initcall(efi_memreserve_root_init);
1139
1140#ifdef CONFIG_KEXEC
1141static int update_efi_random_seed(struct notifier_block *nb,
1142				  unsigned long code, void *unused)
1143{
1144	struct linux_efi_random_seed *seed;
1145	u32 size = 0;
1146
1147	if (!kexec_in_progress)
1148		return NOTIFY_DONE;
1149
1150	seed = memremap(efi_rng_seed, sizeof(*seed), MEMREMAP_WB);
1151	if (seed != NULL) {
1152		size = min(seed->size, EFI_RANDOM_SEED_SIZE);
1153		memunmap(seed);
1154	} else {
1155		pr_err("Could not map UEFI random seed!\n");
1156	}
1157	if (size > 0) {
1158		seed = memremap(efi_rng_seed, sizeof(*seed) + size,
1159				MEMREMAP_WB);
1160		if (seed != NULL) {
1161			seed->size = size;
1162			get_random_bytes(seed->bits, seed->size);
1163			memunmap(seed);
1164		} else {
1165			pr_err("Could not map UEFI random seed!\n");
1166		}
1167	}
1168	return NOTIFY_DONE;
1169}
1170
1171static struct notifier_block efi_random_seed_nb = {
1172	.notifier_call = update_efi_random_seed,
1173};
1174
1175static int __init register_update_efi_random_seed(void)
1176{
1177	if (efi_rng_seed == EFI_INVALID_TABLE_ADDR)
1178		return 0;
1179	return register_reboot_notifier(&efi_random_seed_nb);
1180}
1181late_initcall(register_update_efi_random_seed);
1182#endif
1183