1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_X86_PERF_EVENT_H
3#define _ASM_X86_PERF_EVENT_H
4
5/*
6 * Performance event hw details:
7 */
8
9#define INTEL_PMC_MAX_GENERIC				       32
10#define INTEL_PMC_MAX_FIXED					4
11#define INTEL_PMC_IDX_FIXED				       32
12
13#define X86_PMC_IDX_MAX					       64
14
15#define MSR_ARCH_PERFMON_PERFCTR0			      0xc1
16#define MSR_ARCH_PERFMON_PERFCTR1			      0xc2
17
18#define MSR_ARCH_PERFMON_EVENTSEL0			     0x186
19#define MSR_ARCH_PERFMON_EVENTSEL1			     0x187
20
21#define ARCH_PERFMON_EVENTSEL_EVENT			0x000000FFULL
22#define ARCH_PERFMON_EVENTSEL_UMASK			0x0000FF00ULL
23#define ARCH_PERFMON_EVENTSEL_USR			(1ULL << 16)
24#define ARCH_PERFMON_EVENTSEL_OS			(1ULL << 17)
25#define ARCH_PERFMON_EVENTSEL_EDGE			(1ULL << 18)
26#define ARCH_PERFMON_EVENTSEL_PIN_CONTROL		(1ULL << 19)
27#define ARCH_PERFMON_EVENTSEL_INT			(1ULL << 20)
28#define ARCH_PERFMON_EVENTSEL_ANY			(1ULL << 21)
29#define ARCH_PERFMON_EVENTSEL_ENABLE			(1ULL << 22)
30#define ARCH_PERFMON_EVENTSEL_INV			(1ULL << 23)
31#define ARCH_PERFMON_EVENTSEL_CMASK			0xFF000000ULL
32
33#define HSW_IN_TX					(1ULL << 32)
34#define HSW_IN_TX_CHECKPOINTED				(1ULL << 33)
35#define ICL_EVENTSEL_ADAPTIVE				(1ULL << 34)
36#define ICL_FIXED_0_ADAPTIVE				(1ULL << 32)
37
38#define AMD64_EVENTSEL_INT_CORE_ENABLE			(1ULL << 36)
39#define AMD64_EVENTSEL_GUESTONLY			(1ULL << 40)
40#define AMD64_EVENTSEL_HOSTONLY				(1ULL << 41)
41
42#define AMD64_EVENTSEL_INT_CORE_SEL_SHIFT		37
43#define AMD64_EVENTSEL_INT_CORE_SEL_MASK		\
44	(0xFULL << AMD64_EVENTSEL_INT_CORE_SEL_SHIFT)
45
46#define AMD64_EVENTSEL_EVENT	\
47	(ARCH_PERFMON_EVENTSEL_EVENT | (0x0FULL << 32))
48#define INTEL_ARCH_EVENT_MASK	\
49	(ARCH_PERFMON_EVENTSEL_UMASK | ARCH_PERFMON_EVENTSEL_EVENT)
50
51#define AMD64_L3_SLICE_SHIFT				48
52#define AMD64_L3_SLICE_MASK				\
53	(0xFULL << AMD64_L3_SLICE_SHIFT)
54#define AMD64_L3_SLICEID_MASK				\
55	(0x7ULL << AMD64_L3_SLICE_SHIFT)
56
57#define AMD64_L3_THREAD_SHIFT				56
58#define AMD64_L3_THREAD_MASK				\
59	(0xFFULL << AMD64_L3_THREAD_SHIFT)
60#define AMD64_L3_F19H_THREAD_MASK			\
61	(0x3ULL << AMD64_L3_THREAD_SHIFT)
62
63#define AMD64_L3_EN_ALL_CORES				BIT_ULL(47)
64#define AMD64_L3_EN_ALL_SLICES				BIT_ULL(46)
65
66#define AMD64_L3_COREID_SHIFT				42
67#define AMD64_L3_COREID_MASK				\
68	(0x7ULL << AMD64_L3_COREID_SHIFT)
69
70#define X86_RAW_EVENT_MASK		\
71	(ARCH_PERFMON_EVENTSEL_EVENT |	\
72	 ARCH_PERFMON_EVENTSEL_UMASK |	\
73	 ARCH_PERFMON_EVENTSEL_EDGE  |	\
74	 ARCH_PERFMON_EVENTSEL_INV   |	\
75	 ARCH_PERFMON_EVENTSEL_CMASK)
76#define X86_ALL_EVENT_FLAGS  			\
77	(ARCH_PERFMON_EVENTSEL_EDGE |  		\
78	 ARCH_PERFMON_EVENTSEL_INV | 		\
79	 ARCH_PERFMON_EVENTSEL_CMASK | 		\
80	 ARCH_PERFMON_EVENTSEL_ANY | 		\
81	 ARCH_PERFMON_EVENTSEL_PIN_CONTROL | 	\
82	 HSW_IN_TX | 				\
83	 HSW_IN_TX_CHECKPOINTED)
84#define AMD64_RAW_EVENT_MASK		\
85	(X86_RAW_EVENT_MASK          |  \
86	 AMD64_EVENTSEL_EVENT)
87#define AMD64_RAW_EVENT_MASK_NB		\
88	(AMD64_EVENTSEL_EVENT        |  \
89	 ARCH_PERFMON_EVENTSEL_UMASK)
90#define AMD64_NUM_COUNTERS				4
91#define AMD64_NUM_COUNTERS_CORE				6
92#define AMD64_NUM_COUNTERS_NB				4
93
94#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL		0x3c
95#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK		(0x00 << 8)
96#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX		0
97#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \
98		(1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX))
99
100#define ARCH_PERFMON_BRANCH_MISSES_RETIRED		6
101#define ARCH_PERFMON_EVENTS_COUNT			7
102
103#define PEBS_DATACFG_MEMINFO	BIT_ULL(0)
104#define PEBS_DATACFG_GP	BIT_ULL(1)
105#define PEBS_DATACFG_XMMS	BIT_ULL(2)
106#define PEBS_DATACFG_LBRS	BIT_ULL(3)
107#define PEBS_DATACFG_LBR_SHIFT	24
108
109/*
110 * Intel "Architectural Performance Monitoring" CPUID
111 * detection/enumeration details:
112 */
113union cpuid10_eax {
114	struct {
115		unsigned int version_id:8;
116		unsigned int num_counters:8;
117		unsigned int bit_width:8;
118		unsigned int mask_length:8;
119	} split;
120	unsigned int full;
121};
122
123union cpuid10_ebx {
124	struct {
125		unsigned int no_unhalted_core_cycles:1;
126		unsigned int no_instructions_retired:1;
127		unsigned int no_unhalted_reference_cycles:1;
128		unsigned int no_llc_reference:1;
129		unsigned int no_llc_misses:1;
130		unsigned int no_branch_instruction_retired:1;
131		unsigned int no_branch_misses_retired:1;
132	} split;
133	unsigned int full;
134};
135
136union cpuid10_edx {
137	struct {
138		unsigned int num_counters_fixed:5;
139		unsigned int bit_width_fixed:8;
140		unsigned int reserved1:2;
141		unsigned int anythread_deprecated:1;
142		unsigned int reserved2:16;
143	} split;
144	unsigned int full;
145};
146
147/*
148 * Intel Architectural LBR CPUID detection/enumeration details:
149 */
150union cpuid28_eax {
151	struct {
152		/* Supported LBR depth values */
153		unsigned int	lbr_depth_mask:8;
154		unsigned int	reserved:22;
155		/* Deep C-state Reset */
156		unsigned int	lbr_deep_c_reset:1;
157		/* IP values contain LIP */
158		unsigned int	lbr_lip:1;
159	} split;
160	unsigned int		full;
161};
162
163union cpuid28_ebx {
164	struct {
165		/* CPL Filtering Supported */
166		unsigned int    lbr_cpl:1;
167		/* Branch Filtering Supported */
168		unsigned int    lbr_filter:1;
169		/* Call-stack Mode Supported */
170		unsigned int    lbr_call_stack:1;
171	} split;
172	unsigned int            full;
173};
174
175union cpuid28_ecx {
176	struct {
177		/* Mispredict Bit Supported */
178		unsigned int    lbr_mispred:1;
179		/* Timed LBRs Supported */
180		unsigned int    lbr_timed_lbr:1;
181		/* Branch Type Field Supported */
182		unsigned int    lbr_br_type:1;
183	} split;
184	unsigned int            full;
185};
186
187struct x86_pmu_capability {
188	int		version;
189	int		num_counters_gp;
190	int		num_counters_fixed;
191	int		bit_width_gp;
192	int		bit_width_fixed;
193	unsigned int	events_mask;
194	int		events_mask_len;
195};
196
197/*
198 * Fixed-purpose performance events:
199 */
200
201/* RDPMC offset for Fixed PMCs */
202#define INTEL_PMC_FIXED_RDPMC_BASE		(1 << 30)
203#define INTEL_PMC_FIXED_RDPMC_METRICS		(1 << 29)
204
205/*
206 * All the fixed-mode PMCs are configured via this single MSR:
207 */
208#define MSR_ARCH_PERFMON_FIXED_CTR_CTRL	0x38d
209
210/*
211 * There is no event-code assigned to the fixed-mode PMCs.
212 *
213 * For a fixed-mode PMC, which has an equivalent event on a general-purpose
214 * PMC, the event-code of the equivalent event is used for the fixed-mode PMC,
215 * e.g., Instr_Retired.Any and CPU_CLK_Unhalted.Core.
216 *
217 * For a fixed-mode PMC, which doesn't have an equivalent event, a
218 * pseudo-encoding is used, e.g., CPU_CLK_Unhalted.Ref and TOPDOWN.SLOTS.
219 * The pseudo event-code for a fixed-mode PMC must be 0x00.
220 * The pseudo umask-code is 0xX. The X equals the index of the fixed
221 * counter + 1, e.g., the fixed counter 2 has the pseudo-encoding 0x0300.
222 *
223 * The counts are available in separate MSRs:
224 */
225
226/* Instr_Retired.Any: */
227#define MSR_ARCH_PERFMON_FIXED_CTR0	0x309
228#define INTEL_PMC_IDX_FIXED_INSTRUCTIONS	(INTEL_PMC_IDX_FIXED + 0)
229
230/* CPU_CLK_Unhalted.Core: */
231#define MSR_ARCH_PERFMON_FIXED_CTR1	0x30a
232#define INTEL_PMC_IDX_FIXED_CPU_CYCLES	(INTEL_PMC_IDX_FIXED + 1)
233
234/* CPU_CLK_Unhalted.Ref: event=0x00,umask=0x3 (pseudo-encoding) */
235#define MSR_ARCH_PERFMON_FIXED_CTR2	0x30b
236#define INTEL_PMC_IDX_FIXED_REF_CYCLES	(INTEL_PMC_IDX_FIXED + 2)
237#define INTEL_PMC_MSK_FIXED_REF_CYCLES	(1ULL << INTEL_PMC_IDX_FIXED_REF_CYCLES)
238
239/* TOPDOWN.SLOTS: event=0x00,umask=0x4 (pseudo-encoding) */
240#define MSR_ARCH_PERFMON_FIXED_CTR3	0x30c
241#define INTEL_PMC_IDX_FIXED_SLOTS	(INTEL_PMC_IDX_FIXED + 3)
242#define INTEL_PMC_MSK_FIXED_SLOTS	(1ULL << INTEL_PMC_IDX_FIXED_SLOTS)
243
244/*
245 * We model BTS tracing as another fixed-mode PMC.
246 *
247 * We choose the value 47 for the fixed index of BTS, since lower
248 * values are used by actual fixed events and higher values are used
249 * to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr.
250 */
251#define INTEL_PMC_IDX_FIXED_BTS			(INTEL_PMC_IDX_FIXED + 15)
252
253/*
254 * The PERF_METRICS MSR is modeled as several magic fixed-mode PMCs, one for
255 * each TopDown metric event.
256 *
257 * Internally the TopDown metric events are mapped to the FxCtr 3 (SLOTS).
258 */
259#define INTEL_PMC_IDX_METRIC_BASE		(INTEL_PMC_IDX_FIXED + 16)
260#define INTEL_PMC_IDX_TD_RETIRING		(INTEL_PMC_IDX_METRIC_BASE + 0)
261#define INTEL_PMC_IDX_TD_BAD_SPEC		(INTEL_PMC_IDX_METRIC_BASE + 1)
262#define INTEL_PMC_IDX_TD_FE_BOUND		(INTEL_PMC_IDX_METRIC_BASE + 2)
263#define INTEL_PMC_IDX_TD_BE_BOUND		(INTEL_PMC_IDX_METRIC_BASE + 3)
264#define INTEL_PMC_IDX_METRIC_END		INTEL_PMC_IDX_TD_BE_BOUND
265#define INTEL_PMC_MSK_TOPDOWN			((0xfull << INTEL_PMC_IDX_METRIC_BASE) | \
266						INTEL_PMC_MSK_FIXED_SLOTS)
267
268/*
269 * There is no event-code assigned to the TopDown events.
270 *
271 * For the slots event, use the pseudo code of the fixed counter 3.
272 *
273 * For the metric events, the pseudo event-code is 0x00.
274 * The pseudo umask-code starts from the middle of the pseudo event
275 * space, 0x80.
276 */
277#define INTEL_TD_SLOTS				0x0400	/* TOPDOWN.SLOTS */
278/* Level 1 metrics */
279#define INTEL_TD_METRIC_RETIRING		0x8000	/* Retiring metric */
280#define INTEL_TD_METRIC_BAD_SPEC		0x8100	/* Bad speculation metric */
281#define INTEL_TD_METRIC_FE_BOUND		0x8200	/* FE bound metric */
282#define INTEL_TD_METRIC_BE_BOUND		0x8300	/* BE bound metric */
283#define INTEL_TD_METRIC_MAX			INTEL_TD_METRIC_BE_BOUND
284#define INTEL_TD_METRIC_NUM			4
285
286static inline bool is_metric_idx(int idx)
287{
288	return (unsigned)(idx - INTEL_PMC_IDX_METRIC_BASE) < INTEL_TD_METRIC_NUM;
289}
290
291static inline bool is_topdown_idx(int idx)
292{
293	return is_metric_idx(idx) || idx == INTEL_PMC_IDX_FIXED_SLOTS;
294}
295
296#define INTEL_PMC_OTHER_TOPDOWN_BITS(bit)	\
297			(~(0x1ull << bit) & INTEL_PMC_MSK_TOPDOWN)
298
299#define GLOBAL_STATUS_COND_CHG			BIT_ULL(63)
300#define GLOBAL_STATUS_BUFFER_OVF_BIT		62
301#define GLOBAL_STATUS_BUFFER_OVF		BIT_ULL(GLOBAL_STATUS_BUFFER_OVF_BIT)
302#define GLOBAL_STATUS_UNC_OVF			BIT_ULL(61)
303#define GLOBAL_STATUS_ASIF			BIT_ULL(60)
304#define GLOBAL_STATUS_COUNTERS_FROZEN		BIT_ULL(59)
305#define GLOBAL_STATUS_LBRS_FROZEN_BIT		58
306#define GLOBAL_STATUS_LBRS_FROZEN		BIT_ULL(GLOBAL_STATUS_LBRS_FROZEN_BIT)
307#define GLOBAL_STATUS_TRACE_TOPAPMI_BIT		55
308#define GLOBAL_STATUS_TRACE_TOPAPMI		BIT_ULL(GLOBAL_STATUS_TRACE_TOPAPMI_BIT)
309#define GLOBAL_STATUS_PERF_METRICS_OVF_BIT	48
310
311#define GLOBAL_CTRL_EN_PERF_METRICS		48
312/*
313 * We model guest LBR event tracing as another fixed-mode PMC like BTS.
314 *
315 * We choose bit 58 because it's used to indicate LBR stack frozen state
316 * for architectural perfmon v4, also we unconditionally mask that bit in
317 * the handle_pmi_common(), so it'll never be set in the overflow handling.
318 *
319 * With this fake counter assigned, the guest LBR event user (such as KVM),
320 * can program the LBR registers on its own, and we don't actually do anything
321 * with then in the host context.
322 */
323#define INTEL_PMC_IDX_FIXED_VLBR	(GLOBAL_STATUS_LBRS_FROZEN_BIT)
324
325/*
326 * Pseudo-encoding the guest LBR event as event=0x00,umask=0x1b,
327 * since it would claim bit 58 which is effectively Fixed26.
328 */
329#define INTEL_FIXED_VLBR_EVENT	0x1b00
330
331/*
332 * Adaptive PEBS v4
333 */
334
335struct pebs_basic {
336	u64 format_size;
337	u64 ip;
338	u64 applicable_counters;
339	u64 tsc;
340};
341
342struct pebs_meminfo {
343	u64 address;
344	u64 aux;
345	u64 latency;
346	u64 tsx_tuning;
347};
348
349struct pebs_gprs {
350	u64 flags, ip, ax, cx, dx, bx, sp, bp, si, di;
351	u64 r8, r9, r10, r11, r12, r13, r14, r15;
352};
353
354struct pebs_xmm {
355	u64 xmm[16*2];	/* two entries for each register */
356};
357
358/*
359 * IBS cpuid feature detection
360 */
361
362#define IBS_CPUID_FEATURES		0x8000001b
363
364/*
365 * Same bit mask as for IBS cpuid feature flags (Fn8000_001B_EAX), but
366 * bit 0 is used to indicate the existence of IBS.
367 */
368#define IBS_CAPS_AVAIL			(1U<<0)
369#define IBS_CAPS_FETCHSAM		(1U<<1)
370#define IBS_CAPS_OPSAM			(1U<<2)
371#define IBS_CAPS_RDWROPCNT		(1U<<3)
372#define IBS_CAPS_OPCNT			(1U<<4)
373#define IBS_CAPS_BRNTRGT		(1U<<5)
374#define IBS_CAPS_OPCNTEXT		(1U<<6)
375#define IBS_CAPS_RIPINVALIDCHK		(1U<<7)
376#define IBS_CAPS_OPBRNFUSE		(1U<<8)
377#define IBS_CAPS_FETCHCTLEXTD		(1U<<9)
378#define IBS_CAPS_OPDATA4		(1U<<10)
379
380#define IBS_CAPS_DEFAULT		(IBS_CAPS_AVAIL		\
381					 | IBS_CAPS_FETCHSAM	\
382					 | IBS_CAPS_OPSAM)
383
384/*
385 * IBS APIC setup
386 */
387#define IBSCTL				0x1cc
388#define IBSCTL_LVT_OFFSET_VALID		(1ULL<<8)
389#define IBSCTL_LVT_OFFSET_MASK		0x0F
390
391/* IBS fetch bits/masks */
392#define IBS_FETCH_RAND_EN	(1ULL<<57)
393#define IBS_FETCH_VAL		(1ULL<<49)
394#define IBS_FETCH_ENABLE	(1ULL<<48)
395#define IBS_FETCH_CNT		0xFFFF0000ULL
396#define IBS_FETCH_MAX_CNT	0x0000FFFFULL
397
398/*
399 * IBS op bits/masks
400 * The lower 7 bits of the current count are random bits
401 * preloaded by hardware and ignored in software
402 */
403#define IBS_OP_CUR_CNT		(0xFFF80ULL<<32)
404#define IBS_OP_CUR_CNT_RAND	(0x0007FULL<<32)
405#define IBS_OP_CNT_CTL		(1ULL<<19)
406#define IBS_OP_VAL		(1ULL<<18)
407#define IBS_OP_ENABLE		(1ULL<<17)
408#define IBS_OP_MAX_CNT		0x0000FFFFULL
409#define IBS_OP_MAX_CNT_EXT	0x007FFFFFULL	/* not a register bit mask */
410#define IBS_OP_MAX_CNT_EXT_MASK	(0x7FULL<<20)	/* separate upper 7 bits */
411#define IBS_RIP_INVALID		(1ULL<<38)
412
413#ifdef CONFIG_X86_LOCAL_APIC
414extern u32 get_ibs_caps(void);
415extern int forward_event_to_ibs(struct perf_event *event);
416#else
417static inline u32 get_ibs_caps(void) { return 0; }
418static inline int forward_event_to_ibs(struct perf_event *event) { return -ENOENT; }
419#endif
420
421#ifdef CONFIG_PERF_EVENTS
422extern void perf_events_lapic_init(void);
423
424/*
425 * Abuse bits {3,5} of the cpu eflags register. These flags are otherwise
426 * unused and ABI specified to be 0, so nobody should care what we do with
427 * them.
428 *
429 * EXACT - the IP points to the exact instruction that triggered the
430 *         event (HW bugs exempt).
431 * VM    - original X86_VM_MASK; see set_linear_ip().
432 */
433#define PERF_EFLAGS_EXACT	(1UL << 3)
434#define PERF_EFLAGS_VM		(1UL << 5)
435
436struct pt_regs;
437struct x86_perf_regs {
438	struct pt_regs	regs;
439	u64		*xmm_regs;
440};
441
442extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
443extern unsigned long perf_misc_flags(struct pt_regs *regs);
444#define perf_misc_flags(regs)	perf_misc_flags(regs)
445
446#include <asm/stacktrace.h>
447
448/*
449 * We abuse bit 3 from flags to pass exact information, see perf_misc_flags
450 * and the comment with PERF_EFLAGS_EXACT.
451 */
452#define perf_arch_fetch_caller_regs(regs, __ip)		{	\
453	(regs)->ip = (__ip);					\
454	(regs)->sp = (unsigned long)__builtin_frame_address(0);	\
455	(regs)->cs = __KERNEL_CS;				\
456	regs->flags = 0;					\
457}
458
459struct perf_guest_switch_msr {
460	unsigned msr;
461	u64 host, guest;
462};
463
464struct x86_pmu_lbr {
465	unsigned int	nr;
466	unsigned int	from;
467	unsigned int	to;
468	unsigned int	info;
469};
470
471extern void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap);
472extern void perf_check_microcode(void);
473extern int x86_perf_rdpmc_index(struct perf_event *event);
474#else
475static inline void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap)
476{
477	memset(cap, 0, sizeof(*cap));
478}
479
480static inline void perf_events_lapic_init(void)	{ }
481static inline void perf_check_microcode(void) { }
482#endif
483
484#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL)
485extern struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr);
486extern int x86_perf_get_lbr(struct x86_pmu_lbr *lbr);
487#else
488static inline struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr)
489{
490	*nr = 0;
491	return NULL;
492}
493static inline int x86_perf_get_lbr(struct x86_pmu_lbr *lbr)
494{
495	return -1;
496}
497#endif
498
499#ifdef CONFIG_CPU_SUP_INTEL
500 extern void intel_pt_handle_vmx(int on);
501#else
502static inline void intel_pt_handle_vmx(int on)
503{
504
505}
506#endif
507
508#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
509 extern void amd_pmu_enable_virt(void);
510 extern void amd_pmu_disable_virt(void);
511#else
512 static inline void amd_pmu_enable_virt(void) { }
513 static inline void amd_pmu_disable_virt(void) { }
514#endif
515
516#define arch_perf_out_copy_user copy_from_user_nmi
517
518#endif /* _ASM_X86_PERF_EVENT_H */
519