1// SPDX-License-Identifier: GPL-2.0
2// Copyright (C) 2017 Arm Ltd.
3#define pr_fmt(fmt) "sdei: " fmt
4
5#include <acpi/ghes.h>
6#include <linux/acpi.h>
7#include <linux/arm_sdei.h>
8#include <linux/arm-smccc.h>
9#include <linux/atomic.h>
10#include <linux/bitops.h>
11#include <linux/compiler.h>
12#include <linux/cpuhotplug.h>
13#include <linux/cpu.h>
14#include <linux/cpu_pm.h>
15#include <linux/errno.h>
16#include <linux/hardirq.h>
17#include <linux/kernel.h>
18#include <linux/kprobes.h>
19#include <linux/kvm_host.h>
20#include <linux/list.h>
21#include <linux/mutex.h>
22#include <linux/notifier.h>
23#include <linux/of.h>
24#include <linux/of_platform.h>
25#include <linux/percpu.h>
26#include <linux/platform_device.h>
27#include <linux/pm.h>
28#include <linux/ptrace.h>
29#include <linux/preempt.h>
30#include <linux/reboot.h>
31#include <linux/slab.h>
32#include <linux/smp.h>
33#include <linux/spinlock.h>
34#include <linux/uaccess.h>
35
36/*
37 * The call to use to reach the firmware.
38 */
39static asmlinkage void (*sdei_firmware_call)(unsigned long function_id,
40		      unsigned long arg0, unsigned long arg1,
41		      unsigned long arg2, unsigned long arg3,
42		      unsigned long arg4, struct arm_smccc_res *res);
43
44/* entry point from firmware to arch asm code */
45static unsigned long sdei_entry_point;
46
47static int sdei_hp_state;
48
49struct sdei_event {
50	/* These three are protected by the sdei_list_lock */
51	struct list_head	list;
52	bool			reregister;
53	bool			reenable;
54
55	u32			event_num;
56	u8			type;
57	u8			priority;
58
59	/* This pointer is handed to firmware as the event argument. */
60	union {
61		/* Shared events */
62		struct sdei_registered_event *registered;
63
64		/* CPU private events */
65		struct sdei_registered_event __percpu *private_registered;
66	};
67};
68
69/* Take the mutex for any API call or modification. Take the mutex first. */
70static DEFINE_MUTEX(sdei_events_lock);
71
72/* and then hold this when modifying the list */
73static DEFINE_SPINLOCK(sdei_list_lock);
74static LIST_HEAD(sdei_list);
75
76/* Private events are registered/enabled via IPI passing one of these */
77struct sdei_crosscall_args {
78	struct sdei_event *event;
79	atomic_t errors;
80	int first_error;
81};
82
83#define CROSSCALL_INIT(arg, event)		\
84	do {					\
85		arg.event = event;		\
86		arg.first_error = 0;		\
87		atomic_set(&arg.errors, 0);	\
88	} while (0)
89
90static inline int sdei_do_local_call(smp_call_func_t fn,
91				     struct sdei_event *event)
92{
93	struct sdei_crosscall_args arg;
94
95	CROSSCALL_INIT(arg, event);
96	fn(&arg);
97
98	return arg.first_error;
99}
100
101static inline int sdei_do_cross_call(smp_call_func_t fn,
102				     struct sdei_event *event)
103{
104	struct sdei_crosscall_args arg;
105
106	CROSSCALL_INIT(arg, event);
107	on_each_cpu(fn, &arg, true);
108
109	return arg.first_error;
110}
111
112static inline void
113sdei_cross_call_return(struct sdei_crosscall_args *arg, int err)
114{
115	if (err && (atomic_inc_return(&arg->errors) == 1))
116		arg->first_error = err;
117}
118
119static int sdei_to_linux_errno(unsigned long sdei_err)
120{
121	switch (sdei_err) {
122	case SDEI_NOT_SUPPORTED:
123		return -EOPNOTSUPP;
124	case SDEI_INVALID_PARAMETERS:
125		return -EINVAL;
126	case SDEI_DENIED:
127		return -EPERM;
128	case SDEI_PENDING:
129		return -EINPROGRESS;
130	case SDEI_OUT_OF_RESOURCE:
131		return -ENOMEM;
132	}
133
134	return 0;
135}
136
137static int invoke_sdei_fn(unsigned long function_id, unsigned long arg0,
138			  unsigned long arg1, unsigned long arg2,
139			  unsigned long arg3, unsigned long arg4,
140			  u64 *result)
141{
142	int err;
143	struct arm_smccc_res res;
144
145	if (sdei_firmware_call) {
146		sdei_firmware_call(function_id, arg0, arg1, arg2, arg3, arg4,
147				   &res);
148		err = sdei_to_linux_errno(res.a0);
149	} else {
150		/*
151		 * !sdei_firmware_call means we failed to probe or called
152		 * sdei_mark_interface_broken(). -EIO is not an error returned
153		 * by sdei_to_linux_errno() and is used to suppress messages
154		 * from this driver.
155		 */
156		err = -EIO;
157		res.a0 = SDEI_NOT_SUPPORTED;
158	}
159
160	if (result)
161		*result = res.a0;
162
163	return err;
164}
165NOKPROBE_SYMBOL(invoke_sdei_fn);
166
167static struct sdei_event *sdei_event_find(u32 event_num)
168{
169	struct sdei_event *e, *found = NULL;
170
171	lockdep_assert_held(&sdei_events_lock);
172
173	spin_lock(&sdei_list_lock);
174	list_for_each_entry(e, &sdei_list, list) {
175		if (e->event_num == event_num) {
176			found = e;
177			break;
178		}
179	}
180	spin_unlock(&sdei_list_lock);
181
182	return found;
183}
184
185int sdei_api_event_context(u32 query, u64 *result)
186{
187	return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_CONTEXT, query, 0, 0, 0, 0,
188			      result);
189}
190NOKPROBE_SYMBOL(sdei_api_event_context);
191
192static int sdei_api_event_get_info(u32 event, u32 info, u64 *result)
193{
194	return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_GET_INFO, event, info, 0,
195			      0, 0, result);
196}
197
198static struct sdei_event *sdei_event_create(u32 event_num,
199					    sdei_event_callback *cb,
200					    void *cb_arg)
201{
202	int err;
203	u64 result;
204	struct sdei_event *event;
205	struct sdei_registered_event *reg;
206
207	lockdep_assert_held(&sdei_events_lock);
208
209	event = kzalloc(sizeof(*event), GFP_KERNEL);
210	if (!event) {
211		err = -ENOMEM;
212		goto fail;
213	}
214
215	INIT_LIST_HEAD(&event->list);
216	event->event_num = event_num;
217
218	err = sdei_api_event_get_info(event_num, SDEI_EVENT_INFO_EV_PRIORITY,
219				      &result);
220	if (err)
221		goto fail;
222	event->priority = result;
223
224	err = sdei_api_event_get_info(event_num, SDEI_EVENT_INFO_EV_TYPE,
225				      &result);
226	if (err)
227		goto fail;
228	event->type = result;
229
230	if (event->type == SDEI_EVENT_TYPE_SHARED) {
231		reg = kzalloc(sizeof(*reg), GFP_KERNEL);
232		if (!reg) {
233			err = -ENOMEM;
234			goto fail;
235		}
236
237		reg->event_num = event->event_num;
238		reg->priority = event->priority;
239
240		reg->callback = cb;
241		reg->callback_arg = cb_arg;
242		event->registered = reg;
243	} else {
244		int cpu;
245		struct sdei_registered_event __percpu *regs;
246
247		regs = alloc_percpu(struct sdei_registered_event);
248		if (!regs) {
249			err = -ENOMEM;
250			goto fail;
251		}
252
253		for_each_possible_cpu(cpu) {
254			reg = per_cpu_ptr(regs, cpu);
255
256			reg->event_num = event->event_num;
257			reg->priority = event->priority;
258			reg->callback = cb;
259			reg->callback_arg = cb_arg;
260		}
261
262		event->private_registered = regs;
263	}
264
265	spin_lock(&sdei_list_lock);
266	list_add(&event->list, &sdei_list);
267	spin_unlock(&sdei_list_lock);
268
269	return event;
270
271fail:
272	kfree(event);
273	return ERR_PTR(err);
274}
275
276static void sdei_event_destroy_llocked(struct sdei_event *event)
277{
278	lockdep_assert_held(&sdei_events_lock);
279	lockdep_assert_held(&sdei_list_lock);
280
281	list_del(&event->list);
282
283	if (event->type == SDEI_EVENT_TYPE_SHARED)
284		kfree(event->registered);
285	else
286		free_percpu(event->private_registered);
287
288	kfree(event);
289}
290
291static void sdei_event_destroy(struct sdei_event *event)
292{
293	spin_lock(&sdei_list_lock);
294	sdei_event_destroy_llocked(event);
295	spin_unlock(&sdei_list_lock);
296}
297
298static int sdei_api_get_version(u64 *version)
299{
300	return invoke_sdei_fn(SDEI_1_0_FN_SDEI_VERSION, 0, 0, 0, 0, 0, version);
301}
302
303int sdei_mask_local_cpu(void)
304{
305	int err;
306
307	err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PE_MASK, 0, 0, 0, 0, 0, NULL);
308	if (err && err != -EIO) {
309		pr_warn_once("failed to mask CPU[%u]: %d\n",
310			      smp_processor_id(), err);
311		return err;
312	}
313
314	return 0;
315}
316
317static void _ipi_mask_cpu(void *ignored)
318{
319	WARN_ON_ONCE(preemptible());
320	sdei_mask_local_cpu();
321}
322
323int sdei_unmask_local_cpu(void)
324{
325	int err;
326
327	err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PE_UNMASK, 0, 0, 0, 0, 0, NULL);
328	if (err && err != -EIO) {
329		pr_warn_once("failed to unmask CPU[%u]: %d\n",
330			     smp_processor_id(), err);
331		return err;
332	}
333
334	return 0;
335}
336
337static void _ipi_unmask_cpu(void *ignored)
338{
339	WARN_ON_ONCE(preemptible());
340	sdei_unmask_local_cpu();
341}
342
343static void _ipi_private_reset(void *ignored)
344{
345	int err;
346
347	WARN_ON_ONCE(preemptible());
348
349	err = invoke_sdei_fn(SDEI_1_0_FN_SDEI_PRIVATE_RESET, 0, 0, 0, 0, 0,
350			     NULL);
351	if (err && err != -EIO)
352		pr_warn_once("failed to reset CPU[%u]: %d\n",
353			     smp_processor_id(), err);
354}
355
356static int sdei_api_shared_reset(void)
357{
358	return invoke_sdei_fn(SDEI_1_0_FN_SDEI_SHARED_RESET, 0, 0, 0, 0, 0,
359			      NULL);
360}
361
362static void sdei_mark_interface_broken(void)
363{
364	pr_err("disabling SDEI firmware interface\n");
365	on_each_cpu(&_ipi_mask_cpu, NULL, true);
366	sdei_firmware_call = NULL;
367}
368
369static int sdei_platform_reset(void)
370{
371	int err;
372
373	on_each_cpu(&_ipi_private_reset, NULL, true);
374	err = sdei_api_shared_reset();
375	if (err) {
376		pr_err("Failed to reset platform: %d\n", err);
377		sdei_mark_interface_broken();
378	}
379
380	return err;
381}
382
383static int sdei_api_event_enable(u32 event_num)
384{
385	return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_ENABLE, event_num, 0, 0, 0,
386			      0, NULL);
387}
388
389/* Called directly by the hotplug callbacks */
390static void _local_event_enable(void *data)
391{
392	int err;
393	struct sdei_crosscall_args *arg = data;
394
395	err = sdei_api_event_enable(arg->event->event_num);
396
397	sdei_cross_call_return(arg, err);
398}
399
400int sdei_event_enable(u32 event_num)
401{
402	int err = -EINVAL;
403	struct sdei_event *event;
404
405	mutex_lock(&sdei_events_lock);
406	event = sdei_event_find(event_num);
407	if (!event) {
408		mutex_unlock(&sdei_events_lock);
409		return -ENOENT;
410	}
411
412
413	cpus_read_lock();
414	if (event->type == SDEI_EVENT_TYPE_SHARED)
415		err = sdei_api_event_enable(event->event_num);
416	else
417		err = sdei_do_cross_call(_local_event_enable, event);
418
419	if (!err) {
420		spin_lock(&sdei_list_lock);
421		event->reenable = true;
422		spin_unlock(&sdei_list_lock);
423	}
424	cpus_read_unlock();
425	mutex_unlock(&sdei_events_lock);
426
427	return err;
428}
429
430static int sdei_api_event_disable(u32 event_num)
431{
432	return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_DISABLE, event_num, 0, 0,
433			      0, 0, NULL);
434}
435
436static void _ipi_event_disable(void *data)
437{
438	int err;
439	struct sdei_crosscall_args *arg = data;
440
441	err = sdei_api_event_disable(arg->event->event_num);
442
443	sdei_cross_call_return(arg, err);
444}
445
446int sdei_event_disable(u32 event_num)
447{
448	int err = -EINVAL;
449	struct sdei_event *event;
450
451	mutex_lock(&sdei_events_lock);
452	event = sdei_event_find(event_num);
453	if (!event) {
454		mutex_unlock(&sdei_events_lock);
455		return -ENOENT;
456	}
457
458	spin_lock(&sdei_list_lock);
459	event->reenable = false;
460	spin_unlock(&sdei_list_lock);
461
462	if (event->type == SDEI_EVENT_TYPE_SHARED)
463		err = sdei_api_event_disable(event->event_num);
464	else
465		err = sdei_do_cross_call(_ipi_event_disable, event);
466	mutex_unlock(&sdei_events_lock);
467
468	return err;
469}
470
471static int sdei_api_event_unregister(u32 event_num)
472{
473	return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_UNREGISTER, event_num, 0,
474			      0, 0, 0, NULL);
475}
476
477/* Called directly by the hotplug callbacks */
478static void _local_event_unregister(void *data)
479{
480	int err;
481	struct sdei_crosscall_args *arg = data;
482
483	err = sdei_api_event_unregister(arg->event->event_num);
484
485	sdei_cross_call_return(arg, err);
486}
487
488int sdei_event_unregister(u32 event_num)
489{
490	int err;
491	struct sdei_event *event;
492
493	WARN_ON(in_nmi());
494
495	mutex_lock(&sdei_events_lock);
496	event = sdei_event_find(event_num);
497	if (!event) {
498		pr_warn("Event %u not registered\n", event_num);
499		err = -ENOENT;
500		goto unlock;
501	}
502
503	spin_lock(&sdei_list_lock);
504	event->reregister = false;
505	event->reenable = false;
506	spin_unlock(&sdei_list_lock);
507
508	if (event->type == SDEI_EVENT_TYPE_SHARED)
509		err = sdei_api_event_unregister(event->event_num);
510	else
511		err = sdei_do_cross_call(_local_event_unregister, event);
512
513	if (err)
514		goto unlock;
515
516	sdei_event_destroy(event);
517unlock:
518	mutex_unlock(&sdei_events_lock);
519
520	return err;
521}
522
523/*
524 * unregister events, but don't destroy them as they are re-registered by
525 * sdei_reregister_shared().
526 */
527static int sdei_unregister_shared(void)
528{
529	int err = 0;
530	struct sdei_event *event;
531
532	mutex_lock(&sdei_events_lock);
533	spin_lock(&sdei_list_lock);
534	list_for_each_entry(event, &sdei_list, list) {
535		if (event->type != SDEI_EVENT_TYPE_SHARED)
536			continue;
537
538		err = sdei_api_event_unregister(event->event_num);
539		if (err)
540			break;
541	}
542	spin_unlock(&sdei_list_lock);
543	mutex_unlock(&sdei_events_lock);
544
545	return err;
546}
547
548static int sdei_api_event_register(u32 event_num, unsigned long entry_point,
549				   void *arg, u64 flags, u64 affinity)
550{
551	return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_REGISTER, event_num,
552			      (unsigned long)entry_point, (unsigned long)arg,
553			      flags, affinity, NULL);
554}
555
556/* Called directly by the hotplug callbacks */
557static void _local_event_register(void *data)
558{
559	int err;
560	struct sdei_registered_event *reg;
561	struct sdei_crosscall_args *arg = data;
562
563	reg = per_cpu_ptr(arg->event->private_registered, smp_processor_id());
564	err = sdei_api_event_register(arg->event->event_num, sdei_entry_point,
565				      reg, 0, 0);
566
567	sdei_cross_call_return(arg, err);
568}
569
570int sdei_event_register(u32 event_num, sdei_event_callback *cb, void *arg)
571{
572	int err;
573	struct sdei_event *event;
574
575	WARN_ON(in_nmi());
576
577	mutex_lock(&sdei_events_lock);
578	if (sdei_event_find(event_num)) {
579		pr_warn("Event %u already registered\n", event_num);
580		err = -EBUSY;
581		goto unlock;
582	}
583
584	event = sdei_event_create(event_num, cb, arg);
585	if (IS_ERR(event)) {
586		err = PTR_ERR(event);
587		pr_warn("Failed to create event %u: %d\n", event_num, err);
588		goto unlock;
589	}
590
591	cpus_read_lock();
592	if (event->type == SDEI_EVENT_TYPE_SHARED) {
593		err = sdei_api_event_register(event->event_num,
594					      sdei_entry_point,
595					      event->registered,
596					      SDEI_EVENT_REGISTER_RM_ANY, 0);
597	} else {
598		err = sdei_do_cross_call(_local_event_register, event);
599		if (err)
600			sdei_do_cross_call(_local_event_unregister, event);
601	}
602
603	if (err) {
604		sdei_event_destroy(event);
605		pr_warn("Failed to register event %u: %d\n", event_num, err);
606		goto cpu_unlock;
607	}
608
609	spin_lock(&sdei_list_lock);
610	event->reregister = true;
611	spin_unlock(&sdei_list_lock);
612cpu_unlock:
613	cpus_read_unlock();
614unlock:
615	mutex_unlock(&sdei_events_lock);
616	return err;
617}
618
619static int sdei_reregister_shared(void)
620{
621	int err = 0;
622	struct sdei_event *event;
623
624	mutex_lock(&sdei_events_lock);
625	spin_lock(&sdei_list_lock);
626	list_for_each_entry(event, &sdei_list, list) {
627		if (event->type != SDEI_EVENT_TYPE_SHARED)
628			continue;
629
630		if (event->reregister) {
631			err = sdei_api_event_register(event->event_num,
632					sdei_entry_point, event->registered,
633					SDEI_EVENT_REGISTER_RM_ANY, 0);
634			if (err) {
635				pr_err("Failed to re-register event %u\n",
636				       event->event_num);
637				sdei_event_destroy_llocked(event);
638				break;
639			}
640		}
641
642		if (event->reenable) {
643			err = sdei_api_event_enable(event->event_num);
644			if (err) {
645				pr_err("Failed to re-enable event %u\n",
646				       event->event_num);
647				break;
648			}
649		}
650	}
651	spin_unlock(&sdei_list_lock);
652	mutex_unlock(&sdei_events_lock);
653
654	return err;
655}
656
657static int sdei_cpuhp_down(unsigned int cpu)
658{
659	struct sdei_event *event;
660	int err;
661
662	/* un-register private events */
663	spin_lock(&sdei_list_lock);
664	list_for_each_entry(event, &sdei_list, list) {
665		if (event->type == SDEI_EVENT_TYPE_SHARED)
666			continue;
667
668		err = sdei_do_local_call(_local_event_unregister, event);
669		if (err) {
670			pr_err("Failed to unregister event %u: %d\n",
671			       event->event_num, err);
672		}
673	}
674	spin_unlock(&sdei_list_lock);
675
676	return sdei_mask_local_cpu();
677}
678
679static int sdei_cpuhp_up(unsigned int cpu)
680{
681	struct sdei_event *event;
682	int err;
683
684	/* re-register/enable private events */
685	spin_lock(&sdei_list_lock);
686	list_for_each_entry(event, &sdei_list, list) {
687		if (event->type == SDEI_EVENT_TYPE_SHARED)
688			continue;
689
690		if (event->reregister) {
691			err = sdei_do_local_call(_local_event_register, event);
692			if (err) {
693				pr_err("Failed to re-register event %u: %d\n",
694				       event->event_num, err);
695			}
696		}
697
698		if (event->reenable) {
699			err = sdei_do_local_call(_local_event_enable, event);
700			if (err) {
701				pr_err("Failed to re-enable event %u: %d\n",
702				       event->event_num, err);
703			}
704		}
705	}
706	spin_unlock(&sdei_list_lock);
707
708	return sdei_unmask_local_cpu();
709}
710
711/* When entering idle, mask/unmask events for this cpu */
712static int sdei_pm_notifier(struct notifier_block *nb, unsigned long action,
713			    void *data)
714{
715	int rv;
716
717	WARN_ON_ONCE(preemptible());
718
719	switch (action) {
720	case CPU_PM_ENTER:
721		rv = sdei_mask_local_cpu();
722		break;
723	case CPU_PM_EXIT:
724	case CPU_PM_ENTER_FAILED:
725		rv = sdei_unmask_local_cpu();
726		break;
727	default:
728		return NOTIFY_DONE;
729	}
730
731	if (rv)
732		return notifier_from_errno(rv);
733
734	return NOTIFY_OK;
735}
736
737static struct notifier_block sdei_pm_nb = {
738	.notifier_call = sdei_pm_notifier,
739};
740
741static int sdei_device_suspend(struct device *dev)
742{
743	on_each_cpu(_ipi_mask_cpu, NULL, true);
744
745	return 0;
746}
747
748static int sdei_device_resume(struct device *dev)
749{
750	on_each_cpu(_ipi_unmask_cpu, NULL, true);
751
752	return 0;
753}
754
755/*
756 * We need all events to be reregistered when we resume from hibernate.
757 *
758 * The sequence is freeze->thaw. Reboot. freeze->restore. We unregister
759 * events during freeze, then re-register and re-enable them during thaw
760 * and restore.
761 */
762static int sdei_device_freeze(struct device *dev)
763{
764	int err;
765
766	/* unregister private events */
767	cpuhp_remove_state(sdei_entry_point);
768
769	err = sdei_unregister_shared();
770	if (err)
771		return err;
772
773	return 0;
774}
775
776static int sdei_device_thaw(struct device *dev)
777{
778	int err;
779
780	/* re-register shared events */
781	err = sdei_reregister_shared();
782	if (err) {
783		pr_warn("Failed to re-register shared events...\n");
784		sdei_mark_interface_broken();
785		return err;
786	}
787
788	err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "SDEI",
789				&sdei_cpuhp_up, &sdei_cpuhp_down);
790	if (err < 0) {
791		pr_warn("Failed to re-register CPU hotplug notifier...\n");
792		return err;
793	}
794
795	sdei_hp_state = err;
796	return 0;
797}
798
799static int sdei_device_restore(struct device *dev)
800{
801	int err;
802
803	err = sdei_platform_reset();
804	if (err)
805		return err;
806
807	return sdei_device_thaw(dev);
808}
809
810static const struct dev_pm_ops sdei_pm_ops = {
811	.suspend = sdei_device_suspend,
812	.resume = sdei_device_resume,
813	.freeze = sdei_device_freeze,
814	.thaw = sdei_device_thaw,
815	.restore = sdei_device_restore,
816};
817
818/*
819 * Mask all CPUs and unregister all events on panic, reboot or kexec.
820 */
821static int sdei_reboot_notifier(struct notifier_block *nb, unsigned long action,
822				void *data)
823{
824	/*
825	 * We are going to reset the interface, after this there is no point
826	 * doing work when we take CPUs offline.
827	 */
828	cpuhp_remove_state(sdei_hp_state);
829
830	sdei_platform_reset();
831
832	return NOTIFY_OK;
833}
834
835static struct notifier_block sdei_reboot_nb = {
836	.notifier_call = sdei_reboot_notifier,
837};
838
839static void sdei_smccc_smc(unsigned long function_id,
840			   unsigned long arg0, unsigned long arg1,
841			   unsigned long arg2, unsigned long arg3,
842			   unsigned long arg4, struct arm_smccc_res *res)
843{
844	arm_smccc_smc(function_id, arg0, arg1, arg2, arg3, arg4, 0, 0, res);
845}
846NOKPROBE_SYMBOL(sdei_smccc_smc);
847
848static void sdei_smccc_hvc(unsigned long function_id,
849			   unsigned long arg0, unsigned long arg1,
850			   unsigned long arg2, unsigned long arg3,
851			   unsigned long arg4, struct arm_smccc_res *res)
852{
853	arm_smccc_hvc(function_id, arg0, arg1, arg2, arg3, arg4, 0, 0, res);
854}
855NOKPROBE_SYMBOL(sdei_smccc_hvc);
856
857int sdei_register_ghes(struct ghes *ghes, sdei_event_callback *normal_cb,
858		       sdei_event_callback *critical_cb)
859{
860	int err;
861	u64 result;
862	u32 event_num;
863	sdei_event_callback *cb;
864
865	if (!IS_ENABLED(CONFIG_ACPI_APEI_GHES))
866		return -EOPNOTSUPP;
867
868	event_num = ghes->generic->notify.vector;
869	if (event_num == 0) {
870		/*
871		 * Event 0 is reserved by the specification for
872		 * SDEI_EVENT_SIGNAL.
873		 */
874		return -EINVAL;
875	}
876
877	err = sdei_api_event_get_info(event_num, SDEI_EVENT_INFO_EV_PRIORITY,
878				      &result);
879	if (err)
880		return err;
881
882	if (result == SDEI_EVENT_PRIORITY_CRITICAL)
883		cb = critical_cb;
884	else
885		cb = normal_cb;
886
887	err = sdei_event_register(event_num, cb, ghes);
888	if (!err)
889		err = sdei_event_enable(event_num);
890
891	return err;
892}
893
894int sdei_unregister_ghes(struct ghes *ghes)
895{
896	int i;
897	int err;
898	u32 event_num = ghes->generic->notify.vector;
899
900	might_sleep();
901
902	if (!IS_ENABLED(CONFIG_ACPI_APEI_GHES))
903		return -EOPNOTSUPP;
904
905	/*
906	 * The event may be running on another CPU. Disable it
907	 * to stop new events, then try to unregister a few times.
908	 */
909	err = sdei_event_disable(event_num);
910	if (err)
911		return err;
912
913	for (i = 0; i < 3; i++) {
914		err = sdei_event_unregister(event_num);
915		if (err != -EINPROGRESS)
916			break;
917
918		schedule();
919	}
920
921	return err;
922}
923
924static int sdei_get_conduit(struct platform_device *pdev)
925{
926	const char *method;
927	struct device_node *np = pdev->dev.of_node;
928
929	sdei_firmware_call = NULL;
930	if (np) {
931		if (of_property_read_string(np, "method", &method)) {
932			pr_warn("missing \"method\" property\n");
933			return SMCCC_CONDUIT_NONE;
934		}
935
936		if (!strcmp("hvc", method)) {
937			sdei_firmware_call = &sdei_smccc_hvc;
938			return SMCCC_CONDUIT_HVC;
939		} else if (!strcmp("smc", method)) {
940			sdei_firmware_call = &sdei_smccc_smc;
941			return SMCCC_CONDUIT_SMC;
942		}
943
944		pr_warn("invalid \"method\" property: %s\n", method);
945	} else if (!acpi_disabled) {
946		if (acpi_psci_use_hvc()) {
947			sdei_firmware_call = &sdei_smccc_hvc;
948			return SMCCC_CONDUIT_HVC;
949		} else {
950			sdei_firmware_call = &sdei_smccc_smc;
951			return SMCCC_CONDUIT_SMC;
952		}
953	}
954
955	return SMCCC_CONDUIT_NONE;
956}
957
958static int sdei_probe(struct platform_device *pdev)
959{
960	int err;
961	u64 ver = 0;
962	int conduit;
963
964	conduit = sdei_get_conduit(pdev);
965	if (!sdei_firmware_call)
966		return 0;
967
968	err = sdei_api_get_version(&ver);
969	if (err) {
970		pr_err("Failed to get SDEI version: %d\n", err);
971		sdei_mark_interface_broken();
972		return err;
973	}
974
975	pr_info("SDEIv%d.%d (0x%x) detected in firmware.\n",
976		(int)SDEI_VERSION_MAJOR(ver), (int)SDEI_VERSION_MINOR(ver),
977		(int)SDEI_VERSION_VENDOR(ver));
978
979	if (SDEI_VERSION_MAJOR(ver) != 1) {
980		pr_warn("Conflicting SDEI version detected.\n");
981		sdei_mark_interface_broken();
982		return -EINVAL;
983	}
984
985	err = sdei_platform_reset();
986	if (err)
987		return err;
988
989	sdei_entry_point = sdei_arch_get_entry_point(conduit);
990	if (!sdei_entry_point) {
991		/* Not supported due to hardware or boot configuration */
992		sdei_mark_interface_broken();
993		return 0;
994	}
995
996	err = cpu_pm_register_notifier(&sdei_pm_nb);
997	if (err) {
998		pr_warn("Failed to register CPU PM notifier...\n");
999		goto error;
1000	}
1001
1002	err = register_reboot_notifier(&sdei_reboot_nb);
1003	if (err) {
1004		pr_warn("Failed to register reboot notifier...\n");
1005		goto remove_cpupm;
1006	}
1007
1008	err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "SDEI",
1009				&sdei_cpuhp_up, &sdei_cpuhp_down);
1010	if (err < 0) {
1011		pr_warn("Failed to register CPU hotplug notifier...\n");
1012		goto remove_reboot;
1013	}
1014
1015	sdei_hp_state = err;
1016
1017	return 0;
1018
1019remove_reboot:
1020	unregister_reboot_notifier(&sdei_reboot_nb);
1021
1022remove_cpupm:
1023	cpu_pm_unregister_notifier(&sdei_pm_nb);
1024
1025error:
1026	sdei_mark_interface_broken();
1027	return err;
1028}
1029
1030static const struct of_device_id sdei_of_match[] = {
1031	{ .compatible = "arm,sdei-1.0" },
1032	{}
1033};
1034
1035static struct platform_driver sdei_driver = {
1036	.driver		= {
1037		.name			= "sdei",
1038		.pm			= &sdei_pm_ops,
1039		.of_match_table		= sdei_of_match,
1040	},
1041	.probe		= sdei_probe,
1042};
1043
1044static bool __init sdei_present_acpi(void)
1045{
1046	acpi_status status;
1047	struct acpi_table_header *sdei_table_header;
1048
1049	if (acpi_disabled)
1050		return false;
1051
1052	status = acpi_get_table(ACPI_SIG_SDEI, 0, &sdei_table_header);
1053	if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
1054		const char *msg = acpi_format_exception(status);
1055
1056		pr_info("Failed to get ACPI:SDEI table, %s\n", msg);
1057	}
1058	if (ACPI_FAILURE(status))
1059		return false;
1060
1061	acpi_put_table(sdei_table_header);
1062
1063	return true;
1064}
1065
1066void __init sdei_init(void)
1067{
1068	struct platform_device *pdev;
1069	int ret;
1070
1071	ret = platform_driver_register(&sdei_driver);
1072	if (ret || !sdei_present_acpi())
1073		return;
1074
1075	pdev = platform_device_register_simple(sdei_driver.driver.name,
1076					       0, NULL, 0);
1077	if (IS_ERR(pdev)) {
1078		ret = PTR_ERR(pdev);
1079		platform_driver_unregister(&sdei_driver);
1080		pr_info("Failed to register ACPI:SDEI platform device %d\n",
1081			ret);
1082	}
1083}
1084
1085int sdei_event_handler(struct pt_regs *regs,
1086		       struct sdei_registered_event *arg)
1087{
1088	int err;
1089	mm_segment_t orig_addr_limit;
1090	u32 event_num = arg->event_num;
1091
1092	/*
1093	 * Save restore 'fs'.
1094	 * The architecture's entry code save/restores 'fs' when taking an
1095	 * exception from the kernel. This ensures addr_limit isn't inherited
1096	 * if you interrupted something that allowed the uaccess routines to
1097	 * access kernel memory.
1098	 * Do the same here because this doesn't come via the same entry code.
1099	*/
1100	orig_addr_limit = force_uaccess_begin();
1101
1102	err = arg->callback(event_num, regs, arg->callback_arg);
1103	if (err)
1104		pr_err_ratelimited("event %u on CPU %u failed with error: %d\n",
1105				   event_num, smp_processor_id(), err);
1106
1107	force_uaccess_end(orig_addr_limit);
1108
1109	return err;
1110}
1111NOKPROBE_SYMBOL(sdei_event_handler);
1112
1113void sdei_handler_abort(void)
1114{
1115	/*
1116	 * If the crash happened in an SDEI event handler then we need to
1117	 * finish the handler with the firmware so that we can have working
1118	 * interrupts in the crash kernel.
1119	 */
1120	if (__this_cpu_read(sdei_active_critical_event)) {
1121	        pr_warn("still in SDEI critical event context, attempting to finish handler.\n");
1122	        __sdei_handler_abort();
1123	        __this_cpu_write(sdei_active_critical_event, NULL);
1124	}
1125	if (__this_cpu_read(sdei_active_normal_event)) {
1126	        pr_warn("still in SDEI normal event context, attempting to finish handler.\n");
1127	        __sdei_handler_abort();
1128	        __this_cpu_write(sdei_active_normal_event, NULL);
1129	}
1130}
1131