1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __X86_MCE_INTERNAL_H__
3#define __X86_MCE_INTERNAL_H__
4
5#undef pr_fmt
6#define pr_fmt(fmt) "mce: " fmt
7
8#include <linux/device.h>
9#include <asm/mce.h>
10
11/* Pointer to the installed machine check handler for this CPU setup. */
12extern void (*machine_check_vector)(struct pt_regs *);
13
14enum severity_level {
15	MCE_NO_SEVERITY,
16	MCE_DEFERRED_SEVERITY,
17	MCE_UCNA_SEVERITY = MCE_DEFERRED_SEVERITY,
18	MCE_KEEP_SEVERITY,
19	MCE_SOME_SEVERITY,
20	MCE_AO_SEVERITY,
21	MCE_UC_SEVERITY,
22	MCE_AR_SEVERITY,
23	MCE_PANIC_SEVERITY,
24};
25
26extern struct blocking_notifier_head x86_mce_decoder_chain;
27
28#define INITIAL_CHECK_INTERVAL	5 * 60 /* 5 minutes */
29
30struct mce_evt_llist {
31	struct llist_node llnode;
32	struct mce mce;
33};
34
35void mce_gen_pool_process(struct work_struct *__unused);
36bool mce_gen_pool_empty(void);
37int mce_gen_pool_add(struct mce *mce);
38int mce_gen_pool_init(void);
39struct llist_node *mce_gen_pool_prepare_records(void);
40
41extern int (*mce_severity)(struct mce *a, struct pt_regs *regs,
42			   int tolerant, char **msg, bool is_excp);
43struct dentry *mce_get_debugfs_dir(void);
44
45extern mce_banks_t mce_banks_ce_disabled;
46
47#ifdef CONFIG_X86_MCE_INTEL
48unsigned long cmci_intel_adjust_timer(unsigned long interval);
49bool mce_intel_cmci_poll(void);
50void mce_intel_hcpu_update(unsigned long cpu);
51void cmci_disable_bank(int bank);
52void intel_init_cmci(void);
53void intel_init_lmce(void);
54void intel_clear_lmce(void);
55bool intel_filter_mce(struct mce *m);
56#else
57# define cmci_intel_adjust_timer mce_adjust_timer_default
58static inline bool mce_intel_cmci_poll(void) { return false; }
59static inline void mce_intel_hcpu_update(unsigned long cpu) { }
60static inline void cmci_disable_bank(int bank) { }
61static inline void intel_init_cmci(void) { }
62static inline void intel_init_lmce(void) { }
63static inline void intel_clear_lmce(void) { }
64static inline bool intel_filter_mce(struct mce *m) { return false; };
65#endif
66
67void mce_timer_kick(unsigned long interval);
68
69#ifdef CONFIG_ACPI_APEI
70int apei_write_mce(struct mce *m);
71ssize_t apei_read_mce(struct mce *m, u64 *record_id);
72int apei_check_mce(void);
73int apei_clear_mce(u64 record_id);
74#else
75static inline int apei_write_mce(struct mce *m)
76{
77	return -EINVAL;
78}
79static inline ssize_t apei_read_mce(struct mce *m, u64 *record_id)
80{
81	return 0;
82}
83static inline int apei_check_mce(void)
84{
85	return 0;
86}
87static inline int apei_clear_mce(u64 record_id)
88{
89	return -EINVAL;
90}
91#endif
92
93/*
94 * We consider records to be equivalent if bank+status+addr+misc all match.
95 * This is only used when the system is going down because of a fatal error
96 * to avoid cluttering the console log with essentially repeated information.
97 * In normal processing all errors seen are logged.
98 */
99static inline bool mce_cmp(struct mce *m1, struct mce *m2)
100{
101	return m1->bank != m2->bank ||
102		m1->status != m2->status ||
103		m1->addr != m2->addr ||
104		m1->misc != m2->misc;
105}
106
107extern struct device_attribute dev_attr_trigger;
108
109#ifdef CONFIG_X86_MCELOG_LEGACY
110void mce_work_trigger(void);
111void mce_register_injector_chain(struct notifier_block *nb);
112void mce_unregister_injector_chain(struct notifier_block *nb);
113#else
114static inline void mce_work_trigger(void)	{ }
115static inline void mce_register_injector_chain(struct notifier_block *nb)	{ }
116static inline void mce_unregister_injector_chain(struct notifier_block *nb)	{ }
117#endif
118
119struct mca_config {
120	bool dont_log_ce;
121	bool cmci_disabled;
122	bool ignore_ce;
123	bool print_all;
124
125	__u64 lmce_disabled		: 1,
126	      disabled			: 1,
127	      ser			: 1,
128	      recovery			: 1,
129	      bios_cmci_threshold	: 1,
130	      __reserved		: 59;
131
132	s8 bootlog;
133	int tolerant;
134	int monarch_timeout;
135	int panic_timeout;
136	u32 rip_msr;
137};
138
139extern struct mca_config mca_cfg;
140DECLARE_PER_CPU_READ_MOSTLY(unsigned int, mce_num_banks);
141
142struct mce_vendor_flags {
143	/*
144	 * Indicates that overflow conditions are not fatal, when set.
145	 */
146	__u64 overflow_recov	: 1,
147
148	/*
149	 * (AMD) SUCCOR stands for S/W UnCorrectable error COntainment and
150	 * Recovery. It indicates support for data poisoning in HW and deferred
151	 * error interrupts.
152	 */
153	succor			: 1,
154
155	/*
156	 * (AMD) SMCA: This bit indicates support for Scalable MCA which expands
157	 * the register space for each MCA bank and also increases number of
158	 * banks. Also, to accommodate the new banks and registers, the MCA
159	 * register space is moved to a new MSR range.
160	 */
161	smca			: 1,
162
163	/* AMD-style error thresholding banks present. */
164	amd_threshold		: 1,
165
166	__reserved_0		: 60;
167};
168
169extern struct mce_vendor_flags mce_flags;
170
171enum mca_msr {
172	MCA_CTL,
173	MCA_STATUS,
174	MCA_ADDR,
175	MCA_MISC,
176};
177
178u32 mca_msr_reg(int bank, enum mca_msr reg);
179
180/* Decide whether to add MCE record to MCE event pool or filter it out. */
181extern bool filter_mce(struct mce *m);
182
183#ifdef CONFIG_X86_MCE_AMD
184extern bool amd_filter_mce(struct mce *m);
185#else
186static inline bool amd_filter_mce(struct mce *m)			{ return false; };
187#endif
188
189__visible bool ex_handler_rdmsr_fault(const struct exception_table_entry *fixup,
190				      struct pt_regs *regs, int trapnr,
191				      unsigned long error_code,
192				      unsigned long fault_addr);
193
194__visible bool ex_handler_wrmsr_fault(const struct exception_table_entry *fixup,
195				      struct pt_regs *regs, int trapnr,
196				      unsigned long error_code,
197				      unsigned long fault_addr);
198
199#endif /* __X86_MCE_INTERNAL_H__ */
200