1// SPDX-License-Identifier: GPL-2.0
2
3#include <linux/spinlock.h>
4#include <linux/percpu.h>
5#include <linux/kallsyms.h>
6#include <linux/kcore.h>
7#include <linux/pgtable.h>
8#include <linux/random.h>
9
10#include <asm/cpu_entry_area.h>
11#include <asm/fixmap.h>
12#include <asm/desc.h>
13#include <asm/kasan.h>
14
15static DEFINE_PER_CPU_PAGE_ALIGNED(struct entry_stack_page, entry_stack_storage);
16
17#ifdef CONFIG_X86_64
18static DEFINE_PER_CPU_PAGE_ALIGNED(struct exception_stacks, exception_stacks);
19DEFINE_PER_CPU(struct cea_exception_stacks*, cea_exception_stacks);
20
21static DEFINE_PER_CPU_READ_MOSTLY(unsigned long, _cea_offset);
22
23static __always_inline unsigned int cea_offset(unsigned int cpu)
24{
25	return per_cpu(_cea_offset, cpu);
26}
27
28static __init void init_cea_offsets(void)
29{
30	unsigned int max_cea;
31	unsigned int i, j;
32
33	max_cea = (CPU_ENTRY_AREA_MAP_SIZE - PAGE_SIZE) / CPU_ENTRY_AREA_SIZE;
34
35	/* O(sodding terrible) */
36	for_each_possible_cpu(i) {
37		unsigned int cea;
38
39again:
40		/*
41		 * Directly use get_random_u32() instead of prandom_u32_max
42		 * to avoid seed can't be generated when CONFIG_RANDOMIZE_BASE=n.
43		 */
44		cea = (u32)(((u64) get_random_u32() * max_cea) >> 32);
45
46		for_each_possible_cpu(j) {
47			if (cea_offset(j) == cea)
48				goto again;
49
50			if (i == j)
51				break;
52		}
53
54		per_cpu(_cea_offset, i) = cea;
55	}
56}
57#else /* !X86_64 */
58DECLARE_PER_CPU_PAGE_ALIGNED(struct doublefault_stack, doublefault_stack);
59
60static __always_inline unsigned int cea_offset(unsigned int cpu)
61{
62	return cpu;
63}
64static inline void init_cea_offsets(void) { }
65#endif
66
67/* Is called from entry code, so must be noinstr */
68noinstr struct cpu_entry_area *get_cpu_entry_area(int cpu)
69{
70	unsigned long va = CPU_ENTRY_AREA_PER_CPU + cea_offset(cpu) * CPU_ENTRY_AREA_SIZE;
71	BUILD_BUG_ON(sizeof(struct cpu_entry_area) % PAGE_SIZE != 0);
72
73	return (struct cpu_entry_area *) va;
74}
75EXPORT_SYMBOL(get_cpu_entry_area);
76
77void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags)
78{
79	unsigned long va = (unsigned long) cea_vaddr;
80	pte_t pte = pfn_pte(pa >> PAGE_SHIFT, flags);
81
82	/*
83	 * The cpu_entry_area is shared between the user and kernel
84	 * page tables.  All of its ptes can safely be global.
85	 * _PAGE_GLOBAL gets reused to help indicate PROT_NONE for
86	 * non-present PTEs, so be careful not to set it in that
87	 * case to avoid confusion.
88	 */
89	if (boot_cpu_has(X86_FEATURE_PGE) &&
90	    (pgprot_val(flags) & _PAGE_PRESENT))
91		pte = pte_set_flags(pte, _PAGE_GLOBAL);
92
93	set_pte_vaddr(va, pte);
94}
95
96static void __init
97cea_map_percpu_pages(void *cea_vaddr, void *ptr, int pages, pgprot_t prot)
98{
99	for ( ; pages; pages--, cea_vaddr+= PAGE_SIZE, ptr += PAGE_SIZE)
100		cea_set_pte(cea_vaddr, per_cpu_ptr_to_phys(ptr), prot);
101}
102
103static void __init percpu_setup_debug_store(unsigned int cpu)
104{
105#ifdef CONFIG_CPU_SUP_INTEL
106	unsigned int npages;
107	void *cea;
108
109	if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
110		return;
111
112	cea = &get_cpu_entry_area(cpu)->cpu_debug_store;
113	npages = sizeof(struct debug_store) / PAGE_SIZE;
114	BUILD_BUG_ON(sizeof(struct debug_store) % PAGE_SIZE != 0);
115	cea_map_percpu_pages(cea, &per_cpu(cpu_debug_store, cpu), npages,
116			     PAGE_KERNEL);
117
118	cea = &get_cpu_entry_area(cpu)->cpu_debug_buffers;
119	/*
120	 * Force the population of PMDs for not yet allocated per cpu
121	 * memory like debug store buffers.
122	 */
123	npages = sizeof(struct debug_store_buffers) / PAGE_SIZE;
124	for (; npages; npages--, cea += PAGE_SIZE)
125		cea_set_pte(cea, 0, PAGE_NONE);
126#endif
127}
128
129#ifdef CONFIG_X86_64
130
131#define cea_map_stack(name) do {					\
132	npages = sizeof(estacks->name## _stack) / PAGE_SIZE;		\
133	cea_map_percpu_pages(cea->estacks.name## _stack,		\
134			estacks->name## _stack, npages, PAGE_KERNEL);	\
135	} while (0)
136
137static void __init percpu_setup_exception_stacks(unsigned int cpu)
138{
139	struct exception_stacks *estacks = per_cpu_ptr(&exception_stacks, cpu);
140	struct cpu_entry_area *cea = get_cpu_entry_area(cpu);
141	unsigned int npages;
142
143	BUILD_BUG_ON(sizeof(exception_stacks) % PAGE_SIZE != 0);
144
145	per_cpu(cea_exception_stacks, cpu) = &cea->estacks;
146
147	/*
148	 * The exceptions stack mappings in the per cpu area are protected
149	 * by guard pages so each stack must be mapped separately. DB2 is
150	 * not mapped; it just exists to catch triple nesting of #DB.
151	 */
152	cea_map_stack(DF);
153	cea_map_stack(NMI);
154	cea_map_stack(DB);
155	cea_map_stack(MCE);
156
157	if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT)) {
158		if (cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT)) {
159			cea_map_stack(VC);
160			cea_map_stack(VC2);
161		}
162	}
163}
164#else
165static inline void percpu_setup_exception_stacks(unsigned int cpu)
166{
167	struct cpu_entry_area *cea = get_cpu_entry_area(cpu);
168
169	cea_map_percpu_pages(&cea->doublefault_stack,
170			     &per_cpu(doublefault_stack, cpu), 1, PAGE_KERNEL);
171}
172#endif
173
174/* Setup the fixmap mappings only once per-processor */
175static void __init setup_cpu_entry_area(unsigned int cpu)
176{
177	struct cpu_entry_area *cea = get_cpu_entry_area(cpu);
178#ifdef CONFIG_X86_64
179	/* On 64-bit systems, we use a read-only fixmap GDT and TSS. */
180	pgprot_t gdt_prot = PAGE_KERNEL_RO;
181	pgprot_t tss_prot = PAGE_KERNEL_RO;
182#else
183	/*
184	 * On native 32-bit systems, the GDT cannot be read-only because
185	 * our double fault handler uses a task gate, and entering through
186	 * a task gate needs to change an available TSS to busy.  If the
187	 * GDT is read-only, that will triple fault.  The TSS cannot be
188	 * read-only because the CPU writes to it on task switches.
189	 *
190	 * On Xen PV, the GDT must be read-only because the hypervisor
191	 * requires it.
192	 */
193	pgprot_t gdt_prot = boot_cpu_has(X86_FEATURE_XENPV) ?
194		PAGE_KERNEL_RO : PAGE_KERNEL;
195	pgprot_t tss_prot = PAGE_KERNEL;
196#endif
197
198	kasan_populate_shadow_for_vaddr(cea, CPU_ENTRY_AREA_SIZE,
199					early_cpu_to_node(cpu));
200
201	cea_set_pte(&cea->gdt, get_cpu_gdt_paddr(cpu), gdt_prot);
202
203	cea_map_percpu_pages(&cea->entry_stack_page,
204			     per_cpu_ptr(&entry_stack_storage, cpu), 1,
205			     PAGE_KERNEL);
206
207	/*
208	 * The Intel SDM says (Volume 3, 7.2.1):
209	 *
210	 *  Avoid placing a page boundary in the part of the TSS that the
211	 *  processor reads during a task switch (the first 104 bytes). The
212	 *  processor may not correctly perform address translations if a
213	 *  boundary occurs in this area. During a task switch, the processor
214	 *  reads and writes into the first 104 bytes of each TSS (using
215	 *  contiguous physical addresses beginning with the physical address
216	 *  of the first byte of the TSS). So, after TSS access begins, if
217	 *  part of the 104 bytes is not physically contiguous, the processor
218	 *  will access incorrect information without generating a page-fault
219	 *  exception.
220	 *
221	 * There are also a lot of errata involving the TSS spanning a page
222	 * boundary.  Assert that we're not doing that.
223	 */
224	BUILD_BUG_ON((offsetof(struct tss_struct, x86_tss) ^
225		      offsetofend(struct tss_struct, x86_tss)) & PAGE_MASK);
226	BUILD_BUG_ON(sizeof(struct tss_struct) % PAGE_SIZE != 0);
227	/*
228	 * VMX changes the host TR limit to 0x67 after a VM exit. This is
229	 * okay, since 0x67 covers the size of struct x86_hw_tss. Make sure
230	 * that this is correct.
231	 */
232	BUILD_BUG_ON(offsetof(struct tss_struct, x86_tss) != 0);
233	BUILD_BUG_ON(sizeof(struct x86_hw_tss) != 0x68);
234
235	cea_map_percpu_pages(&cea->tss, &per_cpu(cpu_tss_rw, cpu),
236			     sizeof(struct tss_struct) / PAGE_SIZE, tss_prot);
237
238#ifdef CONFIG_X86_32
239	per_cpu(cpu_entry_area, cpu) = cea;
240#endif
241
242	percpu_setup_exception_stacks(cpu);
243
244	percpu_setup_debug_store(cpu);
245}
246
247static __init void setup_cpu_entry_area_ptes(void)
248{
249#ifdef CONFIG_X86_32
250	unsigned long start, end;
251
252	/* The +1 is for the readonly IDT: */
253	BUILD_BUG_ON((CPU_ENTRY_AREA_PAGES+1)*PAGE_SIZE != CPU_ENTRY_AREA_MAP_SIZE);
254	BUG_ON(CPU_ENTRY_AREA_BASE & ~PMD_MASK);
255
256	start = CPU_ENTRY_AREA_BASE;
257	end = start + CPU_ENTRY_AREA_MAP_SIZE;
258
259	/* Careful here: start + PMD_SIZE might wrap around */
260	for (; start < end && start >= CPU_ENTRY_AREA_BASE; start += PMD_SIZE)
261		populate_extra_pte(start);
262#endif
263}
264
265void __init setup_cpu_entry_areas(void)
266{
267	unsigned int cpu;
268
269	init_cea_offsets();
270
271	setup_cpu_entry_area_ptes();
272
273	for_each_possible_cpu(cpu)
274		setup_cpu_entry_area(cpu);
275
276	/*
277	 * This is the last essential update to swapper_pgdir which needs
278	 * to be synchronized to initial_page_table on 32bit.
279	 */
280	sync_initial_page_table();
281}
282