1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * lppaca.h
4 * Copyright (C) 2001  Mike Corrigan IBM Corporation
5 */
6#ifndef _ASM_POWERPC_LPPACA_H
7#define _ASM_POWERPC_LPPACA_H
8
9/*
10 * The below VPHN macros are outside the __KERNEL__ check since these are
11 * used for compiling the vphn selftest in userspace
12 */
13
14/* The H_HOME_NODE_ASSOCIATIVITY h_call returns 6 64-bit registers. */
15#define VPHN_REGISTER_COUNT 6
16
17/*
18 * 6 64-bit registers unpacked into up to 24 be32 associativity values. To
19 * form the complete property we have to add the length in the first cell.
20 */
21#define VPHN_ASSOC_BUFSIZE (VPHN_REGISTER_COUNT*sizeof(u64)/sizeof(u16) + 1)
22
23/*
24 * The H_HOME_NODE_ASSOCIATIVITY hcall takes two values for flags:
25 * 1 for retrieving associativity information for a guest cpu
26 * 2 for retrieving associativity information for a host/hypervisor cpu
27 */
28#define VPHN_FLAG_VCPU	1
29#define VPHN_FLAG_PCPU	2
30
31#ifdef __KERNEL__
32
33/*
34 * These definitions relate to hypervisors that only exist when using
35 * a server type processor
36 */
37#ifdef CONFIG_PPC_BOOK3S
38
39/*
40 * This control block contains the data that is shared between the
41 * hypervisor and the OS.
42 */
43#include <linux/cache.h>
44#include <linux/threads.h>
45#include <asm/types.h>
46#include <asm/mmu.h>
47#include <asm/firmware.h>
48#include <asm/paca.h>
49
50/*
51 * The lppaca is the "virtual processor area" registered with the hypervisor,
52 * H_REGISTER_VPA etc.
53 *
54 * According to PAPR, the structure is 640 bytes long, must be L1 cache line
55 * aligned, and must not cross a 4kB boundary. Its size field must be at
56 * least 640 bytes (but may be more).
57 *
58 * Pre-v4.14 KVM hypervisors reject the VPA if its size field is smaller than
59 * 1kB, so we dynamically allocate 1kB and advertise size as 1kB, but keep
60 * this structure as the canonical 640 byte size.
61 */
62struct lppaca {
63	/* cacheline 1 contains read-only data */
64
65	__be32	desc;			/* Eye catcher 0xD397D781 */
66	__be16	size;			/* Size of this struct */
67	u8	reserved1[3];
68	u8	__old_status;		/* Old status, including shared proc */
69	u8	reserved3[14];
70	volatile __be32 dyn_hw_node_id;	/* Dynamic hardware node id */
71	volatile __be32 dyn_hw_proc_id;	/* Dynamic hardware proc id */
72	u8	reserved4[56];
73	volatile u8 vphn_assoc_counts[8]; /* Virtual processor home node */
74					  /* associativity change counters */
75	u8	reserved5[32];
76
77	/* cacheline 2 contains local read-write data */
78
79	u8	reserved6[48];
80	u8	cede_latency_hint;
81	u8	ebb_regs_in_use;
82	u8	reserved7[6];
83	u8	dtl_enable_mask;	/* Dispatch Trace Log mask */
84	u8	donate_dedicated_cpu;	/* Donate dedicated CPU cycles */
85	u8	fpregs_in_use;
86	u8	pmcregs_in_use;
87	u8	reserved8[28];
88	__be64	wait_state_cycles;	/* Wait cycles for this proc */
89	u8	reserved9[28];
90	__be16	slb_count;		/* # of SLBs to maintain */
91	u8	idle;			/* Indicate OS is idle */
92	u8	vmxregs_in_use;
93
94	/* cacheline 3 is shared with other processors */
95
96	/*
97	 * This is the yield_count.  An "odd" value (low bit on) means that
98	 * the processor is yielded (either because of an OS yield or a
99	 * hypervisor preempt).  An even value implies that the processor is
100	 * currently executing.
101	 * NOTE: Even dedicated processor partitions can yield so this
102	 * field cannot be used to determine if we are shared or dedicated.
103	 */
104	volatile __be32 yield_count;
105	volatile __be32 dispersion_count; /* dispatch changed physical cpu */
106	volatile __be64 cmo_faults;	/* CMO page fault count */
107	volatile __be64 cmo_fault_time;	/* CMO page fault time */
108	u8	reserved10[104];
109
110	/* cacheline 4-5 */
111
112	__be32	page_ins;		/* CMO Hint - # page ins by OS */
113	u8	reserved11[148];
114	volatile __be64 dtl_idx;	/* Dispatch Trace Log head index */
115	u8	reserved12[96];
116} ____cacheline_aligned;
117
118#define lppaca_of(cpu)	(*paca_ptrs[cpu]->lppaca_ptr)
119
120/*
121 * We are using a non architected field to determine if a partition is
122 * shared or dedicated. This currently works on both KVM and PHYP, but
123 * we will have to transition to something better.
124 */
125#define LPPACA_OLD_SHARED_PROC		2
126
127#ifdef CONFIG_PPC_PSERIES
128/*
129 * All CPUs should have the same shared proc value, so directly access the PACA
130 * to avoid false positives from DEBUG_PREEMPT.
131 */
132static inline bool lppaca_shared_proc(void)
133{
134	struct lppaca *l = local_paca->lppaca_ptr;
135
136	if (!firmware_has_feature(FW_FEATURE_SPLPAR))
137		return false;
138	return !!(l->__old_status & LPPACA_OLD_SHARED_PROC);
139}
140
141#define get_lppaca()	(get_paca()->lppaca_ptr)
142#endif
143
144/*
145 * SLB shadow buffer structure as defined in the PAPR.  The save_area
146 * contains adjacent ESID and VSID pairs for each shadowed SLB.  The
147 * ESID is stored in the lower 64bits, then the VSID.
148 */
149struct slb_shadow {
150	__be32	persistent;		/* Number of persistent SLBs */
151	__be32	buffer_length;		/* Total shadow buffer length */
152	__be64	reserved;
153	struct	{
154		__be64     esid;
155		__be64	vsid;
156	} save_area[SLB_NUM_BOLTED];
157} ____cacheline_aligned;
158
159extern long hcall_vphn(unsigned long cpu, u64 flags, __be32 *associativity);
160
161#endif /* CONFIG_PPC_BOOK3S */
162#endif /* __KERNEL__ */
163#endif /* _ASM_POWERPC_LPPACA_H */
164