1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Suspend support specific for i386/x86-64.
4  *
5  * Copyright (c) 2007 Rafael J. Wysocki <rjw@sisk.pl>
6  * Copyright (c) 2002 Pavel Machek <pavel@ucw.cz>
7  * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org>
8  */
9 
10 #include <linux/suspend.h>
11 #include <linux/export.h>
12 #include <linux/smp.h>
13 #include <linux/perf_event.h>
14 #include <linux/tboot.h>
15 #include <linux/dmi.h>
16 #include <linux/pgtable.h>
17 
18 #include <asm/proto.h>
19 #include <asm/mtrr.h>
20 #include <asm/page.h>
21 #include <asm/mce.h>
22 #include <asm/suspend.h>
23 #include <asm/fpu/internal.h>
24 #include <asm/debugreg.h>
25 #include <asm/cpu.h>
26 #include <asm/mmu_context.h>
27 #include <asm/cpu_device_id.h>
28 #include <asm/microcode.h>
29 
30 #ifdef CONFIG_X86_32
31 __visible unsigned long saved_context_ebx;
32 __visible unsigned long saved_context_esp, saved_context_ebp;
33 __visible unsigned long saved_context_esi, saved_context_edi;
34 __visible unsigned long saved_context_eflags;
35 #endif
36 struct saved_context saved_context;
37 
msr_save_context(struct saved_context *ctxt)38 static void msr_save_context(struct saved_context *ctxt)
39 {
40 	struct saved_msr *msr = ctxt->saved_msrs.array;
41 	struct saved_msr *end = msr + ctxt->saved_msrs.num;
42 
43 	while (msr < end) {
44 		if (msr->valid)
45 			rdmsrl(msr->info.msr_no, msr->info.reg.q);
46 		msr++;
47 	}
48 }
49 
msr_restore_context(struct saved_context *ctxt)50 static void msr_restore_context(struct saved_context *ctxt)
51 {
52 	struct saved_msr *msr = ctxt->saved_msrs.array;
53 	struct saved_msr *end = msr + ctxt->saved_msrs.num;
54 
55 	while (msr < end) {
56 		if (msr->valid)
57 			wrmsrl(msr->info.msr_no, msr->info.reg.q);
58 		msr++;
59 	}
60 }
61 
62 /**
63  *	__save_processor_state - save CPU registers before creating a
64  *		hibernation image and before restoring the memory state from it
65  *	@ctxt - structure to store the registers contents in
66  *
67  *	NOTE: If there is a CPU register the modification of which by the
68  *	boot kernel (ie. the kernel used for loading the hibernation image)
69  *	might affect the operations of the restored target kernel (ie. the one
70  *	saved in the hibernation image), then its contents must be saved by this
71  *	function.  In other words, if kernel A is hibernated and different
72  *	kernel B is used for loading the hibernation image into memory, the
73  *	kernel A's __save_processor_state() function must save all registers
74  *	needed by kernel A, so that it can operate correctly after the resume
75  *	regardless of what kernel B does in the meantime.
76  */
__save_processor_state(struct saved_context *ctxt)77 static void __save_processor_state(struct saved_context *ctxt)
78 {
79 #ifdef CONFIG_X86_32
80 	mtrr_save_fixed_ranges(NULL);
81 #endif
82 	kernel_fpu_begin();
83 
84 	/*
85 	 * descriptor tables
86 	 */
87 	store_idt(&ctxt->idt);
88 
89 	/*
90 	 * We save it here, but restore it only in the hibernate case.
91 	 * For ACPI S3 resume, this is loaded via 'early_gdt_desc' in 64-bit
92 	 * mode in "secondary_startup_64". In 32-bit mode it is done via
93 	 * 'pmode_gdt' in wakeup_start.
94 	 */
95 	ctxt->gdt_desc.size = GDT_SIZE - 1;
96 	ctxt->gdt_desc.address = (unsigned long)get_cpu_gdt_rw(smp_processor_id());
97 
98 	store_tr(ctxt->tr);
99 
100 	/* XMM0..XMM15 should be handled by kernel_fpu_begin(). */
101 	/*
102 	 * segment registers
103 	 */
104 #ifdef CONFIG_X86_32_LAZY_GS
105 	savesegment(gs, ctxt->gs);
106 #endif
107 #ifdef CONFIG_X86_64
108 	savesegment(gs, ctxt->gs);
109 	savesegment(fs, ctxt->fs);
110 	savesegment(ds, ctxt->ds);
111 	savesegment(es, ctxt->es);
112 
113 	rdmsrl(MSR_FS_BASE, ctxt->fs_base);
114 	rdmsrl(MSR_GS_BASE, ctxt->kernelmode_gs_base);
115 	rdmsrl(MSR_KERNEL_GS_BASE, ctxt->usermode_gs_base);
116 	mtrr_save_fixed_ranges(NULL);
117 
118 	rdmsrl(MSR_EFER, ctxt->efer);
119 #endif
120 
121 	/*
122 	 * control registers
123 	 */
124 	ctxt->cr0 = read_cr0();
125 	ctxt->cr2 = read_cr2();
126 	ctxt->cr3 = __read_cr3();
127 	ctxt->cr4 = __read_cr4();
128 	ctxt->misc_enable_saved = !rdmsrl_safe(MSR_IA32_MISC_ENABLE,
129 					       &ctxt->misc_enable);
130 	msr_save_context(ctxt);
131 }
132 
133 /* Needed by apm.c */
save_processor_state(void)134 void save_processor_state(void)
135 {
136 	__save_processor_state(&saved_context);
137 	x86_platform.save_sched_clock_state();
138 }
139 #ifdef CONFIG_X86_32
140 EXPORT_SYMBOL(save_processor_state);
141 #endif
142 
do_fpu_end(void)143 static void do_fpu_end(void)
144 {
145 	/*
146 	 * Restore FPU regs if necessary.
147 	 */
148 	kernel_fpu_end();
149 }
150 
fix_processor_context(void)151 static void fix_processor_context(void)
152 {
153 	int cpu = smp_processor_id();
154 #ifdef CONFIG_X86_64
155 	struct desc_struct *desc = get_cpu_gdt_rw(cpu);
156 	tss_desc tss;
157 #endif
158 
159 	/*
160 	 * We need to reload TR, which requires that we change the
161 	 * GDT entry to indicate "available" first.
162 	 *
163 	 * XXX: This could probably all be replaced by a call to
164 	 * force_reload_TR().
165 	 */
166 	set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss);
167 
168 #ifdef CONFIG_X86_64
169 	memcpy(&tss, &desc[GDT_ENTRY_TSS], sizeof(tss_desc));
170 	tss.type = 0x9; /* The available 64-bit TSS (see AMD vol 2, pg 91 */
171 	write_gdt_entry(desc, GDT_ENTRY_TSS, &tss, DESC_TSS);
172 
173 	syscall_init();				/* This sets MSR_*STAR and related */
174 #else
175 	if (boot_cpu_has(X86_FEATURE_SEP))
176 		enable_sep_cpu();
177 #endif
178 	load_TR_desc();				/* This does ltr */
179 	load_mm_ldt(current->active_mm);	/* This does lldt */
180 	initialize_tlbstate_and_flush();
181 
182 	fpu__resume_cpu();
183 
184 	/* The processor is back on the direct GDT, load back the fixmap */
185 	load_fixmap_gdt(cpu);
186 }
187 
188 /**
189  * __restore_processor_state - restore the contents of CPU registers saved
190  *                             by __save_processor_state()
191  * @ctxt - structure to load the registers contents from
192  *
193  * The asm code that gets us here will have restored a usable GDT, although
194  * it will be pointing to the wrong alias.
195  */
__restore_processor_state(struct saved_context *ctxt)196 static void notrace __restore_processor_state(struct saved_context *ctxt)
197 {
198 	struct cpuinfo_x86 *c;
199 
200 	if (ctxt->misc_enable_saved)
201 		wrmsrl(MSR_IA32_MISC_ENABLE, ctxt->misc_enable);
202 	/*
203 	 * control registers
204 	 */
205 	/* cr4 was introduced in the Pentium CPU */
206 #ifdef CONFIG_X86_32
207 	if (ctxt->cr4)
208 		__write_cr4(ctxt->cr4);
209 #else
210 /* CONFIG X86_64 */
211 	wrmsrl(MSR_EFER, ctxt->efer);
212 	__write_cr4(ctxt->cr4);
213 #endif
214 	write_cr3(ctxt->cr3);
215 	write_cr2(ctxt->cr2);
216 	write_cr0(ctxt->cr0);
217 
218 	/* Restore the IDT. */
219 	load_idt(&ctxt->idt);
220 
221 	/*
222 	 * Just in case the asm code got us here with the SS, DS, or ES
223 	 * out of sync with the GDT, update them.
224 	 */
225 	loadsegment(ss, __KERNEL_DS);
226 	loadsegment(ds, __USER_DS);
227 	loadsegment(es, __USER_DS);
228 
229 	/*
230 	 * Restore percpu access.  Percpu access can happen in exception
231 	 * handlers or in complicated helpers like load_gs_index().
232 	 */
233 #ifdef CONFIG_X86_64
234 	wrmsrl(MSR_GS_BASE, ctxt->kernelmode_gs_base);
235 #else
236 	loadsegment(fs, __KERNEL_PERCPU);
237 	loadsegment(gs, __KERNEL_STACK_CANARY);
238 #endif
239 
240 	/* Restore the TSS, RO GDT, LDT, and usermode-relevant MSRs. */
241 	fix_processor_context();
242 
243 	/*
244 	 * Now that we have descriptor tables fully restored and working
245 	 * exception handling, restore the usermode segments.
246 	 */
247 #ifdef CONFIG_X86_64
248 	loadsegment(ds, ctxt->es);
249 	loadsegment(es, ctxt->es);
250 	loadsegment(fs, ctxt->fs);
251 	load_gs_index(ctxt->gs);
252 
253 	/*
254 	 * Restore FSBASE and GSBASE after restoring the selectors, since
255 	 * restoring the selectors clobbers the bases.  Keep in mind
256 	 * that MSR_KERNEL_GS_BASE is horribly misnamed.
257 	 */
258 	wrmsrl(MSR_FS_BASE, ctxt->fs_base);
259 	wrmsrl(MSR_KERNEL_GS_BASE, ctxt->usermode_gs_base);
260 #elif defined(CONFIG_X86_32_LAZY_GS)
261 	loadsegment(gs, ctxt->gs);
262 #endif
263 
264 	do_fpu_end();
265 	tsc_verify_tsc_adjust(true);
266 	x86_platform.restore_sched_clock_state();
267 	mtrr_bp_restore();
268 	perf_restore_debug_store();
269 
270 	c = &cpu_data(smp_processor_id());
271 	if (cpu_has(c, X86_FEATURE_MSR_IA32_FEAT_CTL))
272 		init_ia32_feat_ctl(c);
273 
274 	microcode_bsp_resume();
275 
276 	/*
277 	 * This needs to happen after the microcode has been updated upon resume
278 	 * because some of the MSRs are "emulated" in microcode.
279 	 */
280 	msr_restore_context(ctxt);
281 }
282 
283 /* Needed by apm.c */
restore_processor_state(void)284 void notrace restore_processor_state(void)
285 {
286 	__restore_processor_state(&saved_context);
287 }
288 #ifdef CONFIG_X86_32
289 EXPORT_SYMBOL(restore_processor_state);
290 #endif
291 
292 #if defined(CONFIG_HIBERNATION) && defined(CONFIG_HOTPLUG_CPU)
resume_play_dead(void)293 static void resume_play_dead(void)
294 {
295 	play_dead_common();
296 	tboot_shutdown(TB_SHUTDOWN_WFS);
297 	hlt_play_dead();
298 }
299 
hibernate_resume_nonboot_cpu_disable(void)300 int hibernate_resume_nonboot_cpu_disable(void)
301 {
302 	void (*play_dead)(void) = smp_ops.play_dead;
303 	int ret;
304 
305 	/*
306 	 * Ensure that MONITOR/MWAIT will not be used in the "play dead" loop
307 	 * during hibernate image restoration, because it is likely that the
308 	 * monitored address will be actually written to at that time and then
309 	 * the "dead" CPU will attempt to execute instructions again, but the
310 	 * address in its instruction pointer may not be possible to resolve
311 	 * any more at that point (the page tables used by it previously may
312 	 * have been overwritten by hibernate image data).
313 	 *
314 	 * First, make sure that we wake up all the potentially disabled SMT
315 	 * threads which have been initially brought up and then put into
316 	 * mwait/cpuidle sleep.
317 	 * Those will be put to proper (not interfering with hibernation
318 	 * resume) sleep afterwards, and the resumed kernel will decide itself
319 	 * what to do with them.
320 	 */
321 	ret = cpuhp_smt_enable();
322 	if (ret)
323 		return ret;
324 	smp_ops.play_dead = resume_play_dead;
325 	ret = freeze_secondary_cpus(0);
326 	smp_ops.play_dead = play_dead;
327 	return ret;
328 }
329 #endif
330 
331 /*
332  * When bsp_check() is called in hibernate and suspend, cpu hotplug
333  * is disabled already. So it's unnessary to handle race condition between
334  * cpumask query and cpu hotplug.
335  */
bsp_check(void)336 static int bsp_check(void)
337 {
338 	if (cpumask_first(cpu_online_mask) != 0) {
339 		pr_warn("CPU0 is offline.\n");
340 		return -ENODEV;
341 	}
342 
343 	return 0;
344 }
345 
bsp_pm_callback(struct notifier_block *nb, unsigned long action, void *ptr)346 static int bsp_pm_callback(struct notifier_block *nb, unsigned long action,
347 			   void *ptr)
348 {
349 	int ret = 0;
350 
351 	switch (action) {
352 	case PM_SUSPEND_PREPARE:
353 	case PM_HIBERNATION_PREPARE:
354 		ret = bsp_check();
355 		break;
356 #ifdef CONFIG_DEBUG_HOTPLUG_CPU0
357 	case PM_RESTORE_PREPARE:
358 		/*
359 		 * When system resumes from hibernation, online CPU0 because
360 		 * 1. it's required for resume and
361 		 * 2. the CPU was online before hibernation
362 		 */
363 		if (!cpu_online(0))
364 			_debug_hotplug_cpu(0, 1);
365 		break;
366 	case PM_POST_RESTORE:
367 		/*
368 		 * When a resume really happens, this code won't be called.
369 		 *
370 		 * This code is called only when user space hibernation software
371 		 * prepares for snapshot device during boot time. So we just
372 		 * call _debug_hotplug_cpu() to restore to CPU0's state prior to
373 		 * preparing the snapshot device.
374 		 *
375 		 * This works for normal boot case in our CPU0 hotplug debug
376 		 * mode, i.e. CPU0 is offline and user mode hibernation
377 		 * software initializes during boot time.
378 		 *
379 		 * If CPU0 is online and user application accesses snapshot
380 		 * device after boot time, this will offline CPU0 and user may
381 		 * see different CPU0 state before and after accessing
382 		 * the snapshot device. But hopefully this is not a case when
383 		 * user debugging CPU0 hotplug. Even if users hit this case,
384 		 * they can easily online CPU0 back.
385 		 *
386 		 * To simplify this debug code, we only consider normal boot
387 		 * case. Otherwise we need to remember CPU0's state and restore
388 		 * to that state and resolve racy conditions etc.
389 		 */
390 		_debug_hotplug_cpu(0, 0);
391 		break;
392 #endif
393 	default:
394 		break;
395 	}
396 	return notifier_from_errno(ret);
397 }
398 
bsp_pm_check_init(void)399 static int __init bsp_pm_check_init(void)
400 {
401 	/*
402 	 * Set this bsp_pm_callback as lower priority than
403 	 * cpu_hotplug_pm_callback. So cpu_hotplug_pm_callback will be called
404 	 * earlier to disable cpu hotplug before bsp online check.
405 	 */
406 	pm_notifier(bsp_pm_callback, -INT_MAX);
407 	return 0;
408 }
409 
410 core_initcall(bsp_pm_check_init);
411 
msr_build_context(const u32 *msr_id, const int num)412 static int msr_build_context(const u32 *msr_id, const int num)
413 {
414 	struct saved_msrs *saved_msrs = &saved_context.saved_msrs;
415 	struct saved_msr *msr_array;
416 	int total_num;
417 	int i, j;
418 
419 	total_num = saved_msrs->num + num;
420 
421 	msr_array = kmalloc_array(total_num, sizeof(struct saved_msr), GFP_KERNEL);
422 	if (!msr_array) {
423 		pr_err("x86/pm: Can not allocate memory to save/restore MSRs during suspend.\n");
424 		return -ENOMEM;
425 	}
426 
427 	if (saved_msrs->array) {
428 		/*
429 		 * Multiple callbacks can invoke this function, so copy any
430 		 * MSR save requests from previous invocations.
431 		 */
432 		memcpy(msr_array, saved_msrs->array,
433 		       sizeof(struct saved_msr) * saved_msrs->num);
434 
435 		kfree(saved_msrs->array);
436 	}
437 
438 	for (i = saved_msrs->num, j = 0; i < total_num; i++, j++) {
439 		u64 dummy;
440 
441 		msr_array[i].info.msr_no	= msr_id[j];
442 		msr_array[i].valid		= !rdmsrl_safe(msr_id[j], &dummy);
443 		msr_array[i].info.reg.q		= 0;
444 	}
445 	saved_msrs->num   = total_num;
446 	saved_msrs->array = msr_array;
447 
448 	return 0;
449 }
450 
451 /*
452  * The following sections are a quirk framework for problematic BIOSen:
453  * Sometimes MSRs are modified by the BIOSen after suspended to
454  * RAM, this might cause unexpected behavior after wakeup.
455  * Thus we save/restore these specified MSRs across suspend/resume
456  * in order to work around it.
457  *
458  * For any further problematic BIOSen/platforms,
459  * please add your own function similar to msr_initialize_bdw.
460  */
msr_initialize_bdw(const struct dmi_system_id *d)461 static int msr_initialize_bdw(const struct dmi_system_id *d)
462 {
463 	/* Add any extra MSR ids into this array. */
464 	u32 bdw_msr_id[] = { MSR_IA32_THERM_CONTROL };
465 
466 	pr_info("x86/pm: %s detected, MSR saving is needed during suspending.\n", d->ident);
467 	return msr_build_context(bdw_msr_id, ARRAY_SIZE(bdw_msr_id));
468 }
469 
470 static const struct dmi_system_id msr_save_dmi_table[] = {
471 	{
472 	 .callback = msr_initialize_bdw,
473 	 .ident = "BROADWELL BDX_EP",
474 	 .matches = {
475 		DMI_MATCH(DMI_PRODUCT_NAME, "GRANTLEY"),
476 		DMI_MATCH(DMI_PRODUCT_VERSION, "E63448-400"),
477 		},
478 	},
479 	{}
480 };
481 
msr_save_cpuid_features(const struct x86_cpu_id *c)482 static int msr_save_cpuid_features(const struct x86_cpu_id *c)
483 {
484 	u32 cpuid_msr_id[] = {
485 		MSR_AMD64_CPUID_FN_1,
486 	};
487 
488 	pr_info("x86/pm: family %#hx cpu detected, MSR saving is needed during suspending.\n",
489 		c->family);
490 
491 	return msr_build_context(cpuid_msr_id, ARRAY_SIZE(cpuid_msr_id));
492 }
493 
494 static const struct x86_cpu_id msr_save_cpu_table[] = {
495 	X86_MATCH_VENDOR_FAM(AMD, 0x15, &msr_save_cpuid_features),
496 	X86_MATCH_VENDOR_FAM(AMD, 0x16, &msr_save_cpuid_features),
497 	{}
498 };
499 
500 typedef int (*pm_cpu_match_t)(const struct x86_cpu_id *);
pm_cpu_check(const struct x86_cpu_id *c)501 static int pm_cpu_check(const struct x86_cpu_id *c)
502 {
503 	const struct x86_cpu_id *m;
504 	int ret = 0;
505 
506 	m = x86_match_cpu(msr_save_cpu_table);
507 	if (m) {
508 		pm_cpu_match_t fn;
509 
510 		fn = (pm_cpu_match_t)m->driver_data;
511 		ret = fn(m);
512 	}
513 
514 	return ret;
515 }
516 
pm_save_spec_msr(void)517 static void pm_save_spec_msr(void)
518 {
519 	struct msr_enumeration {
520 		u32 msr_no;
521 		u32 feature;
522 	} msr_enum[] = {
523 		{ MSR_IA32_SPEC_CTRL,	 X86_FEATURE_MSR_SPEC_CTRL },
524 		{ MSR_IA32_TSX_CTRL,	 X86_FEATURE_MSR_TSX_CTRL },
525 		{ MSR_TSX_FORCE_ABORT,	 X86_FEATURE_TSX_FORCE_ABORT },
526 		{ MSR_IA32_MCU_OPT_CTRL, X86_FEATURE_SRBDS_CTRL },
527 		{ MSR_AMD64_LS_CFG,	 X86_FEATURE_LS_CFG_SSBD },
528 		{ MSR_AMD64_DE_CFG,	 X86_FEATURE_LFENCE_RDTSC },
529 	};
530 	int i;
531 
532 	for (i = 0; i < ARRAY_SIZE(msr_enum); i++) {
533 		if (boot_cpu_has(msr_enum[i].feature))
534 			msr_build_context(&msr_enum[i].msr_no, 1);
535 	}
536 }
537 
pm_check_save_msr(void)538 static int pm_check_save_msr(void)
539 {
540 	dmi_check_system(msr_save_dmi_table);
541 	pm_cpu_check(msr_save_cpu_table);
542 	pm_save_spec_msr();
543 
544 	return 0;
545 }
546 
547 device_initcall(pm_check_save_msr);
548