1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 *
4 * Copyright IBM Corp. 2008
5 *
6 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
7 */
8
9#ifndef __POWERPC_KVM_PPC_H__
10#define __POWERPC_KVM_PPC_H__
11
12/* This file exists just so we can dereference kvm_vcpu, avoiding nested header
13 * dependencies. */
14
15#include <linux/mutex.h>
16#include <linux/timer.h>
17#include <linux/types.h>
18#include <linux/kvm_types.h>
19#include <linux/kvm_host.h>
20#include <linux/bug.h>
21#ifdef CONFIG_PPC_BOOK3S
22#include <asm/kvm_book3s.h>
23#else
24#include <asm/kvm_booke.h>
25#endif
26#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
27#include <asm/paca.h>
28#include <asm/xive.h>
29#include <asm/cpu_has_feature.h>
30#endif
31#include <asm/inst.h>
32
33/*
34 * KVMPPC_INST_SW_BREAKPOINT is debug Instruction
35 * for supporting software breakpoint.
36 */
37#define KVMPPC_INST_SW_BREAKPOINT	0x00dddd00
38
39enum emulation_result {
40	EMULATE_DONE,         /* no further processing */
41	EMULATE_DO_MMIO,      /* kvm_run filled with MMIO request */
42	EMULATE_FAIL,         /* can't emulate this instruction */
43	EMULATE_AGAIN,        /* something went wrong. go again */
44	EMULATE_EXIT_USER,    /* emulation requires exit to user-space */
45};
46
47enum instruction_fetch_type {
48	INST_GENERIC,
49	INST_SC,		/* system call */
50};
51
52enum xlate_instdata {
53	XLATE_INST,		/* translate instruction address */
54	XLATE_DATA		/* translate data address */
55};
56
57enum xlate_readwrite {
58	XLATE_READ,		/* check for read permissions */
59	XLATE_WRITE		/* check for write permissions */
60};
61
62extern int kvmppc_vcpu_run(struct kvm_vcpu *vcpu);
63extern int __kvmppc_vcpu_run(struct kvm_vcpu *vcpu);
64extern void kvmppc_handler_highmem(void);
65
66extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu);
67extern int kvmppc_handle_load(struct kvm_vcpu *vcpu,
68                              unsigned int rt, unsigned int bytes,
69			      int is_default_endian);
70extern int kvmppc_handle_loads(struct kvm_vcpu *vcpu,
71                               unsigned int rt, unsigned int bytes,
72			       int is_default_endian);
73extern int kvmppc_handle_vsx_load(struct kvm_vcpu *vcpu,
74				unsigned int rt, unsigned int bytes,
75			int is_default_endian, int mmio_sign_extend);
76extern int kvmppc_handle_vmx_load(struct kvm_vcpu *vcpu,
77		unsigned int rt, unsigned int bytes, int is_default_endian);
78extern int kvmppc_handle_vmx_store(struct kvm_vcpu *vcpu,
79		unsigned int rs, unsigned int bytes, int is_default_endian);
80extern int kvmppc_handle_store(struct kvm_vcpu *vcpu,
81			       u64 val, unsigned int bytes,
82			       int is_default_endian);
83extern int kvmppc_handle_vsx_store(struct kvm_vcpu *vcpu,
84				int rs, unsigned int bytes,
85				int is_default_endian);
86
87extern int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
88				 enum instruction_fetch_type type,
89				 unsigned long *inst);
90
91extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
92		     bool data);
93extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
94		     bool data);
95extern int kvmppc_emulate_instruction(struct kvm_vcpu *vcpu);
96extern int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu);
97extern int kvmppc_emulate_mmio(struct kvm_vcpu *vcpu);
98extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu);
99extern u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb);
100extern void kvmppc_decrementer_func(struct kvm_vcpu *vcpu);
101extern int kvmppc_sanity_check(struct kvm_vcpu *vcpu);
102extern int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu);
103extern void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu);
104
105/* Core-specific hooks */
106
107extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr,
108                           unsigned int gtlb_idx);
109extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid);
110extern int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
111extern int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
112extern gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index,
113                              gva_t eaddr);
114extern void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu);
115extern void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu);
116extern int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr,
117			enum xlate_instdata xlid, enum xlate_readwrite xlrw,
118			struct kvmppc_pte *pte);
119
120extern int kvmppc_core_vcpu_create(struct kvm_vcpu *vcpu);
121extern void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu);
122extern int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu);
123extern int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
124                                      struct kvm_translation *tr);
125
126extern void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
127extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu);
128
129extern int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu);
130extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu);
131
132extern void kvmppc_core_queue_machine_check(struct kvm_vcpu *vcpu,
133					    ulong srr1_flags);
134extern void kvmppc_core_queue_syscall(struct kvm_vcpu *vcpu);
135extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu,
136				      ulong srr1_flags);
137extern void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu,
138					ulong srr1_flags);
139extern void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu,
140					  ulong srr1_flags);
141extern void kvmppc_core_queue_vsx_unavail(struct kvm_vcpu *vcpu,
142					  ulong srr1_flags);
143extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu);
144extern void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu);
145extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
146                                       struct kvm_interrupt *irq);
147extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu);
148extern void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu,
149					ulong dear_flags,
150					ulong esr_flags);
151extern void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
152					   ulong srr1_flags,
153					   ulong dar,
154					   ulong dsisr);
155extern void kvmppc_core_queue_itlb_miss(struct kvm_vcpu *vcpu);
156extern void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu,
157					   ulong srr1_flags);
158
159extern void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu);
160extern int kvmppc_core_check_requests(struct kvm_vcpu *vcpu);
161
162extern int kvmppc_booke_init(void);
163extern void kvmppc_booke_exit(void);
164
165extern int kvmppc_kvm_pv(struct kvm_vcpu *vcpu);
166extern void kvmppc_map_magic(struct kvm_vcpu *vcpu);
167
168extern int kvmppc_allocate_hpt(struct kvm_hpt_info *info, u32 order);
169extern void kvmppc_set_hpt(struct kvm *kvm, struct kvm_hpt_info *info);
170extern int kvmppc_alloc_reset_hpt(struct kvm *kvm, int order);
171extern void kvmppc_free_hpt(struct kvm_hpt_info *info);
172extern void kvmppc_rmap_reset(struct kvm *kvm);
173extern void kvmppc_map_vrma(struct kvm_vcpu *vcpu,
174			struct kvm_memory_slot *memslot, unsigned long porder);
175extern int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu);
176extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
177		struct iommu_group *grp);
178extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm,
179		struct iommu_group *grp);
180extern int kvmppc_switch_mmu_to_hpt(struct kvm *kvm);
181extern int kvmppc_switch_mmu_to_radix(struct kvm *kvm);
182extern void kvmppc_setup_partition_table(struct kvm *kvm);
183
184extern int kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
185				struct kvm_create_spapr_tce_64 *args);
186#define kvmppc_ioba_validate(stt, ioba, npages)                         \
187		(iommu_tce_check_ioba((stt)->page_shift, (stt)->offset, \
188				(stt)->size, (ioba), (npages)) ?        \
189				H_PARAMETER : H_SUCCESS)
190extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
191			     unsigned long ioba, unsigned long tce);
192extern long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
193		unsigned long liobn, unsigned long ioba,
194		unsigned long tce_list, unsigned long npages);
195extern long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
196		unsigned long liobn, unsigned long ioba,
197		unsigned long tce_value, unsigned long npages);
198extern long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
199			     unsigned long ioba);
200extern struct page *kvm_alloc_hpt_cma(unsigned long nr_pages);
201extern void kvm_free_hpt_cma(struct page *page, unsigned long nr_pages);
202extern int kvmppc_core_init_vm(struct kvm *kvm);
203extern void kvmppc_core_destroy_vm(struct kvm *kvm);
204extern void kvmppc_core_free_memslot(struct kvm *kvm,
205				     struct kvm_memory_slot *slot);
206extern int kvmppc_core_prepare_memory_region(struct kvm *kvm,
207				const struct kvm_memory_slot *old,
208				struct kvm_memory_slot *new,
209				enum kvm_mr_change change);
210extern void kvmppc_core_commit_memory_region(struct kvm *kvm,
211				struct kvm_memory_slot *old,
212				const struct kvm_memory_slot *new,
213				enum kvm_mr_change change);
214extern int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm,
215				      struct kvm_ppc_smmu_info *info);
216extern void kvmppc_core_flush_memslot(struct kvm *kvm,
217				      struct kvm_memory_slot *memslot);
218
219extern int kvmppc_bookehv_init(void);
220extern void kvmppc_bookehv_exit(void);
221
222extern int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu);
223
224extern int kvm_vm_ioctl_get_htab_fd(struct kvm *kvm, struct kvm_get_htab_fd *);
225extern int kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm,
226					   struct kvm_ppc_resize_hpt *rhpt);
227extern int kvm_vm_ioctl_resize_hpt_commit(struct kvm *kvm,
228					  struct kvm_ppc_resize_hpt *rhpt);
229
230int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq);
231
232extern int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp);
233extern int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu);
234extern void kvmppc_rtas_tokens_free(struct kvm *kvm);
235
236extern int kvmppc_xics_set_xive(struct kvm *kvm, u32 irq, u32 server,
237				u32 priority);
238extern int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server,
239				u32 *priority);
240extern int kvmppc_xics_int_on(struct kvm *kvm, u32 irq);
241extern int kvmppc_xics_int_off(struct kvm *kvm, u32 irq);
242
243void kvmppc_core_dequeue_debug(struct kvm_vcpu *vcpu);
244void kvmppc_core_queue_debug(struct kvm_vcpu *vcpu);
245
246union kvmppc_one_reg {
247	u32	wval;
248	u64	dval;
249	vector128 vval;
250	u64	vsxval[2];
251	u32	vsx32val[4];
252	u16	vsx16val[8];
253	u8	vsx8val[16];
254	struct {
255		u64	addr;
256		u64	length;
257	}	vpaval;
258	u64	xive_timaval[2];
259};
260
261struct kvmppc_ops {
262	struct module *owner;
263	int (*get_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
264	int (*set_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
265	int (*get_one_reg)(struct kvm_vcpu *vcpu, u64 id,
266			   union kvmppc_one_reg *val);
267	int (*set_one_reg)(struct kvm_vcpu *vcpu, u64 id,
268			   union kvmppc_one_reg *val);
269	void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
270	void (*vcpu_put)(struct kvm_vcpu *vcpu);
271	void (*inject_interrupt)(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags);
272	void (*set_msr)(struct kvm_vcpu *vcpu, u64 msr);
273	int (*vcpu_run)(struct kvm_vcpu *vcpu);
274	int (*vcpu_create)(struct kvm_vcpu *vcpu);
275	void (*vcpu_free)(struct kvm_vcpu *vcpu);
276	int (*check_requests)(struct kvm_vcpu *vcpu);
277	int (*get_dirty_log)(struct kvm *kvm, struct kvm_dirty_log *log);
278	void (*flush_memslot)(struct kvm *kvm, struct kvm_memory_slot *memslot);
279	int (*prepare_memory_region)(struct kvm *kvm,
280				     const struct kvm_memory_slot *old,
281				     struct kvm_memory_slot *new,
282				     enum kvm_mr_change change);
283	void (*commit_memory_region)(struct kvm *kvm,
284				     struct kvm_memory_slot *old,
285				     const struct kvm_memory_slot *new,
286				     enum kvm_mr_change change);
287	bool (*unmap_gfn_range)(struct kvm *kvm, struct kvm_gfn_range *range);
288	bool (*age_gfn)(struct kvm *kvm, struct kvm_gfn_range *range);
289	bool (*test_age_gfn)(struct kvm *kvm, struct kvm_gfn_range *range);
290	bool (*set_spte_gfn)(struct kvm *kvm, struct kvm_gfn_range *range);
291	void (*free_memslot)(struct kvm_memory_slot *slot);
292	int (*init_vm)(struct kvm *kvm);
293	void (*destroy_vm)(struct kvm *kvm);
294	int (*get_smmu_info)(struct kvm *kvm, struct kvm_ppc_smmu_info *info);
295	int (*emulate_op)(struct kvm_vcpu *vcpu,
296			  unsigned int inst, int *advance);
297	int (*emulate_mtspr)(struct kvm_vcpu *vcpu, int sprn, ulong spr_val);
298	int (*emulate_mfspr)(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val);
299	void (*fast_vcpu_kick)(struct kvm_vcpu *vcpu);
300	int (*arch_vm_ioctl)(struct file *filp, unsigned int ioctl,
301			     unsigned long arg);
302	int (*hcall_implemented)(unsigned long hcall);
303	int (*irq_bypass_add_producer)(struct irq_bypass_consumer *,
304				       struct irq_bypass_producer *);
305	void (*irq_bypass_del_producer)(struct irq_bypass_consumer *,
306					struct irq_bypass_producer *);
307	int (*configure_mmu)(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg);
308	int (*get_rmmu_info)(struct kvm *kvm, struct kvm_ppc_rmmu_info *info);
309	int (*set_smt_mode)(struct kvm *kvm, unsigned long mode,
310			    unsigned long flags);
311	void (*giveup_ext)(struct kvm_vcpu *vcpu, ulong msr);
312	int (*enable_nested)(struct kvm *kvm);
313	int (*load_from_eaddr)(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr,
314			       int size);
315	int (*store_to_eaddr)(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr,
316			      int size);
317	int (*enable_svm)(struct kvm *kvm);
318	int (*svm_off)(struct kvm *kvm);
319	int (*enable_dawr1)(struct kvm *kvm);
320	bool (*hash_v3_possible)(void);
321	int (*create_vm_debugfs)(struct kvm *kvm);
322	int (*create_vcpu_debugfs)(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry);
323};
324
325extern struct kvmppc_ops *kvmppc_hv_ops;
326extern struct kvmppc_ops *kvmppc_pr_ops;
327
328static inline int kvmppc_get_last_inst(struct kvm_vcpu *vcpu,
329				enum instruction_fetch_type type, ppc_inst_t *inst)
330{
331	int ret = EMULATE_DONE;
332	u32 fetched_inst;
333
334	/* Load the instruction manually if it failed to do so in the
335	 * exit path */
336	if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
337		ret = kvmppc_load_last_inst(vcpu, type, &vcpu->arch.last_inst);
338
339	/*  Write fetch_failed unswapped if the fetch failed */
340	if (ret != EMULATE_DONE) {
341		*inst = ppc_inst(KVM_INST_FETCH_FAILED);
342		return ret;
343	}
344
345#ifdef CONFIG_PPC64
346	/* Is this a prefixed instruction? */
347	if ((vcpu->arch.last_inst >> 32) != 0) {
348		u32 prefix = vcpu->arch.last_inst >> 32;
349		u32 suffix = vcpu->arch.last_inst;
350		if (kvmppc_need_byteswap(vcpu)) {
351			prefix = swab32(prefix);
352			suffix = swab32(suffix);
353		}
354		*inst = ppc_inst_prefix(prefix, suffix);
355		return EMULATE_DONE;
356	}
357#endif
358
359	fetched_inst = kvmppc_need_byteswap(vcpu) ?
360		swab32(vcpu->arch.last_inst) :
361		vcpu->arch.last_inst;
362	*inst = ppc_inst(fetched_inst);
363	return EMULATE_DONE;
364}
365
366static inline bool is_kvmppc_hv_enabled(struct kvm *kvm)
367{
368	return kvm->arch.kvm_ops == kvmppc_hv_ops;
369}
370
371extern int kvmppc_hwrng_present(void);
372
373/*
374 * Cuts out inst bits with ordering according to spec.
375 * That means the leftmost bit is zero. All given bits are included.
376 */
377static inline u32 kvmppc_get_field(u64 inst, int msb, int lsb)
378{
379	u32 r;
380	u32 mask;
381
382	BUG_ON(msb > lsb);
383
384	mask = (1 << (lsb - msb + 1)) - 1;
385	r = (inst >> (63 - lsb)) & mask;
386
387	return r;
388}
389
390/*
391 * Replaces inst bits with ordering according to spec.
392 */
393static inline u32 kvmppc_set_field(u64 inst, int msb, int lsb, int value)
394{
395	u32 r;
396	u32 mask;
397
398	BUG_ON(msb > lsb);
399
400	mask = ((1 << (lsb - msb + 1)) - 1) << (63 - lsb);
401	r = (inst & ~mask) | ((value << (63 - lsb)) & mask);
402
403	return r;
404}
405
406#define one_reg_size(id)	\
407	(1ul << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
408
409#define get_reg_val(id, reg)	({		\
410	union kvmppc_one_reg __u;		\
411	switch (one_reg_size(id)) {		\
412	case 4: __u.wval = (reg); break;	\
413	case 8: __u.dval = (reg); break;	\
414	default: BUG();				\
415	}					\
416	__u;					\
417})
418
419
420#define set_reg_val(id, val)	({		\
421	u64 __v;				\
422	switch (one_reg_size(id)) {		\
423	case 4: __v = (val).wval; break;	\
424	case 8: __v = (val).dval; break;	\
425	default: BUG();				\
426	}					\
427	__v;					\
428})
429
430int kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
431int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
432
433int kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
434int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
435
436int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
437int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
438int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *);
439int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *);
440
441void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid);
442
443struct openpic;
444
445#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
446extern void kvm_cma_reserve(void) __init;
447static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
448{
449	paca_ptrs[cpu]->kvm_hstate.xics_phys = (void __iomem *)addr;
450}
451
452static inline void kvmppc_set_xive_tima(int cpu,
453					unsigned long phys_addr,
454					void __iomem *virt_addr)
455{
456	paca_ptrs[cpu]->kvm_hstate.xive_tima_phys = (void __iomem *)phys_addr;
457	paca_ptrs[cpu]->kvm_hstate.xive_tima_virt = virt_addr;
458}
459
460static inline u32 kvmppc_get_xics_latch(void)
461{
462	u32 xirr;
463
464	xirr = get_paca()->kvm_hstate.saved_xirr;
465	get_paca()->kvm_hstate.saved_xirr = 0;
466	return xirr;
467}
468
469/*
470 * To avoid the need to unnecessarily exit fully to the host kernel, an IPI to
471 * a CPU thread that's running/napping inside of a guest is by default regarded
472 * as a request to wake the CPU (if needed) and continue execution within the
473 * guest, potentially to process new state like externally-generated
474 * interrupts or IPIs sent from within the guest itself (e.g. H_PROD/H_IPI).
475 *
476 * To force an exit to the host kernel, kvmppc_set_host_ipi() must be called
477 * prior to issuing the IPI to set the corresponding 'host_ipi' flag in the
478 * target CPU's PACA. To avoid unnecessary exits to the host, this flag should
479 * be immediately cleared via kvmppc_clear_host_ipi() by the IPI handler on
480 * the receiving side prior to processing the IPI work.
481 *
482 * NOTE:
483 *
484 * We currently issue an smp_mb() at the beginning of kvmppc_set_host_ipi().
485 * This is to guard against sequences such as the following:
486 *
487 *      CPU
488 *        X: smp_muxed_ipi_set_message():
489 *        X:   smp_mb()
490 *        X:   message[RESCHEDULE] = 1
491 *        X: doorbell_global_ipi(42):
492 *        X:   kvmppc_set_host_ipi(42)
493 *        X:   ppc_msgsnd_sync()/smp_mb()
494 *        X:   ppc_msgsnd() -> 42
495 *       42: doorbell_exception(): // from CPU X
496 *       42:   ppc_msgsync()
497 *      105: smp_muxed_ipi_set_message():
498 *      105:   smb_mb()
499 *           // STORE DEFERRED DUE TO RE-ORDERING
500 *    --105:   message[CALL_FUNCTION] = 1
501 *    | 105: doorbell_global_ipi(42):
502 *    | 105:   kvmppc_set_host_ipi(42)
503 *    |  42:   kvmppc_clear_host_ipi(42)
504 *    |  42: smp_ipi_demux_relaxed()
505 *    |  42: // returns to executing guest
506 *    |      // RE-ORDERED STORE COMPLETES
507 *    ->105:   message[CALL_FUNCTION] = 1
508 *      105:   ppc_msgsnd_sync()/smp_mb()
509 *      105:   ppc_msgsnd() -> 42
510 *       42: local_paca->kvm_hstate.host_ipi == 0 // IPI ignored
511 *      105: // hangs waiting on 42 to process messages/call_single_queue
512 *
513 * We also issue an smp_mb() at the end of kvmppc_clear_host_ipi(). This is
514 * to guard against sequences such as the following (as well as to create
515 * a read-side pairing with the barrier in kvmppc_set_host_ipi()):
516 *
517 *      CPU
518 *        X: smp_muxed_ipi_set_message():
519 *        X:   smp_mb()
520 *        X:   message[RESCHEDULE] = 1
521 *        X: doorbell_global_ipi(42):
522 *        X:   kvmppc_set_host_ipi(42)
523 *        X:   ppc_msgsnd_sync()/smp_mb()
524 *        X:   ppc_msgsnd() -> 42
525 *       42: doorbell_exception(): // from CPU X
526 *       42:   ppc_msgsync()
527 *           // STORE DEFERRED DUE TO RE-ORDERING
528 *    -- 42:   kvmppc_clear_host_ipi(42)
529 *    |  42: smp_ipi_demux_relaxed()
530 *    | 105: smp_muxed_ipi_set_message():
531 *    | 105:   smb_mb()
532 *    | 105:   message[CALL_FUNCTION] = 1
533 *    | 105: doorbell_global_ipi(42):
534 *    | 105:   kvmppc_set_host_ipi(42)
535 *    |      // RE-ORDERED STORE COMPLETES
536 *    -> 42:   kvmppc_clear_host_ipi(42)
537 *       42: // returns to executing guest
538 *      105:   ppc_msgsnd_sync()/smp_mb()
539 *      105:   ppc_msgsnd() -> 42
540 *       42: local_paca->kvm_hstate.host_ipi == 0 // IPI ignored
541 *      105: // hangs waiting on 42 to process messages/call_single_queue
542 */
543static inline void kvmppc_set_host_ipi(int cpu)
544{
545	/*
546	 * order stores of IPI messages vs. setting of host_ipi flag
547	 *
548	 * pairs with the barrier in kvmppc_clear_host_ipi()
549	 */
550	smp_mb();
551	WRITE_ONCE(paca_ptrs[cpu]->kvm_hstate.host_ipi, 1);
552}
553
554static inline void kvmppc_clear_host_ipi(int cpu)
555{
556	WRITE_ONCE(paca_ptrs[cpu]->kvm_hstate.host_ipi, 0);
557	/*
558	 * order clearing of host_ipi flag vs. processing of IPI messages
559	 *
560	 * pairs with the barrier in kvmppc_set_host_ipi()
561	 */
562	smp_mb();
563}
564
565static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
566{
567	vcpu->kvm->arch.kvm_ops->fast_vcpu_kick(vcpu);
568}
569
570extern void kvm_hv_vm_activated(void);
571extern void kvm_hv_vm_deactivated(void);
572extern bool kvm_hv_mode_active(void);
573
574extern void kvmppc_check_need_tlb_flush(struct kvm *kvm, int pcpu);
575
576#else
577static inline void __init kvm_cma_reserve(void)
578{}
579
580static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
581{}
582
583static inline void kvmppc_set_xive_tima(int cpu,
584					unsigned long phys_addr,
585					void __iomem *virt_addr)
586{}
587
588static inline u32 kvmppc_get_xics_latch(void)
589{
590	return 0;
591}
592
593static inline void kvmppc_set_host_ipi(int cpu)
594{}
595
596static inline void kvmppc_clear_host_ipi(int cpu)
597{}
598
599static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
600{
601	kvm_vcpu_kick(vcpu);
602}
603
604static inline bool kvm_hv_mode_active(void)		{ return false; }
605
606#endif
607
608#ifdef CONFIG_PPC_PSERIES
609static inline bool kvmhv_on_pseries(void)
610{
611	return !cpu_has_feature(CPU_FTR_HVMODE);
612}
613#else
614static inline bool kvmhv_on_pseries(void)
615{
616	return false;
617}
618#endif
619
620#ifdef CONFIG_KVM_XICS
621static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
622{
623	return vcpu->arch.irq_type == KVMPPC_IRQ_XICS;
624}
625
626static inline struct kvmppc_passthru_irqmap *kvmppc_get_passthru_irqmap(
627				struct kvm *kvm)
628{
629	if (kvm && kvm_irq_bypass)
630		return kvm->arch.pimap;
631	return NULL;
632}
633
634extern void kvmppc_alloc_host_rm_ops(void);
635extern void kvmppc_free_host_rm_ops(void);
636extern void kvmppc_free_pimap(struct kvm *kvm);
637extern int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall);
638extern void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu);
639extern int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd);
640extern int kvmppc_xive_xics_hcall(struct kvm_vcpu *vcpu, u32 req);
641extern u64 kvmppc_xics_get_icp(struct kvm_vcpu *vcpu);
642extern int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
643extern int kvmppc_xics_connect_vcpu(struct kvm_device *dev,
644			struct kvm_vcpu *vcpu, u32 cpu);
645extern void kvmppc_xics_ipi_action(void);
646extern void kvmppc_xics_set_mapped(struct kvm *kvm, unsigned long guest_irq,
647				   unsigned long host_irq);
648extern void kvmppc_xics_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
649				   unsigned long host_irq);
650extern long kvmppc_deliver_irq_passthru(struct kvm_vcpu *vcpu, __be32 xirr,
651					struct kvmppc_irq_map *irq_map,
652					struct kvmppc_passthru_irqmap *pimap,
653					bool *again);
654
655extern int kvmppc_xics_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
656			       int level, bool line_status);
657
658extern int h_ipi_redirect;
659#else
660static inline struct kvmppc_passthru_irqmap *kvmppc_get_passthru_irqmap(
661				struct kvm *kvm)
662	{ return NULL; }
663static inline void kvmppc_alloc_host_rm_ops(void) {}
664static inline void kvmppc_free_host_rm_ops(void) {}
665static inline void kvmppc_free_pimap(struct kvm *kvm) {}
666static inline int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall)
667	{ return 0; }
668static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
669	{ return 0; }
670static inline void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu) { }
671static inline int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd)
672	{ return 0; }
673static inline int kvmppc_xive_xics_hcall(struct kvm_vcpu *vcpu, u32 req)
674	{ return 0; }
675#endif
676
677#ifdef CONFIG_KVM_XIVE
678/*
679 * Below the first "xive" is the "eXternal Interrupt Virtualization Engine"
680 * ie. P9 new interrupt controller, while the second "xive" is the legacy
681 * "eXternal Interrupt Vector Entry" which is the configuration of an
682 * interrupt on the "xics" interrupt controller on P8 and earlier. Those
683 * two function consume or produce a legacy "XIVE" state from the
684 * new "XIVE" interrupt controller.
685 */
686extern int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
687				u32 priority);
688extern int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
689				u32 *priority);
690extern int kvmppc_xive_int_on(struct kvm *kvm, u32 irq);
691extern int kvmppc_xive_int_off(struct kvm *kvm, u32 irq);
692
693extern int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
694				    struct kvm_vcpu *vcpu, u32 cpu);
695extern void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu);
696extern int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
697				  unsigned long host_irq);
698extern int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
699				  unsigned long host_irq);
700extern u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu);
701extern int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
702
703extern int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
704			       int level, bool line_status);
705extern void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu);
706extern void kvmppc_xive_pull_vcpu(struct kvm_vcpu *vcpu);
707extern bool kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu);
708
709static inline int kvmppc_xive_enabled(struct kvm_vcpu *vcpu)
710{
711	return vcpu->arch.irq_type == KVMPPC_IRQ_XIVE;
712}
713
714extern int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev,
715					   struct kvm_vcpu *vcpu, u32 cpu);
716extern void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu *vcpu);
717extern int kvmppc_xive_native_get_vp(struct kvm_vcpu *vcpu,
718				     union kvmppc_one_reg *val);
719extern int kvmppc_xive_native_set_vp(struct kvm_vcpu *vcpu,
720				     union kvmppc_one_reg *val);
721extern bool kvmppc_xive_native_supported(void);
722
723#else
724static inline int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
725				       u32 priority) { return -1; }
726static inline int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
727				       u32 *priority) { return -1; }
728static inline int kvmppc_xive_int_on(struct kvm *kvm, u32 irq) { return -1; }
729static inline int kvmppc_xive_int_off(struct kvm *kvm, u32 irq) { return -1; }
730
731static inline int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
732					   struct kvm_vcpu *vcpu, u32 cpu) { return -EBUSY; }
733static inline void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu) { }
734static inline int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
735					 struct irq_desc *host_desc) { return -ENODEV; }
736static inline int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
737					 struct irq_desc *host_desc) { return -ENODEV; }
738static inline u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu) { return 0; }
739static inline int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval) { return -ENOENT; }
740
741static inline int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
742				      int level, bool line_status) { return -ENODEV; }
743static inline void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu) { }
744static inline void kvmppc_xive_pull_vcpu(struct kvm_vcpu *vcpu) { }
745static inline bool kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu) { return true; }
746
747static inline int kvmppc_xive_enabled(struct kvm_vcpu *vcpu)
748	{ return 0; }
749static inline int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev,
750			  struct kvm_vcpu *vcpu, u32 cpu) { return -EBUSY; }
751static inline void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu *vcpu) { }
752static inline int kvmppc_xive_native_get_vp(struct kvm_vcpu *vcpu,
753					    union kvmppc_one_reg *val)
754{ return 0; }
755static inline int kvmppc_xive_native_set_vp(struct kvm_vcpu *vcpu,
756					    union kvmppc_one_reg *val)
757{ return -ENOENT; }
758
759#endif /* CONFIG_KVM_XIVE */
760
761#if defined(CONFIG_PPC_POWERNV) && defined(CONFIG_KVM_BOOK3S_64_HANDLER)
762static inline bool xics_on_xive(void)
763{
764	return xive_enabled() && cpu_has_feature(CPU_FTR_HVMODE);
765}
766#else
767static inline bool xics_on_xive(void)
768{
769	return false;
770}
771#endif
772
773/*
774 * Prototypes for functions called only from assembler code.
775 * Having prototypes reduces sparse errors.
776 */
777long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
778			 unsigned long ioba, unsigned long tce);
779long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
780				  unsigned long liobn, unsigned long ioba,
781				  unsigned long tce_list, unsigned long npages);
782long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
783			   unsigned long liobn, unsigned long ioba,
784			   unsigned long tce_value, unsigned long npages);
785long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target,
786                            unsigned int yield_count);
787long kvmppc_rm_h_random(struct kvm_vcpu *vcpu);
788void kvmhv_commence_exit(int trap);
789void kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu);
790void kvmppc_subcore_enter_guest(void);
791void kvmppc_subcore_exit_guest(void);
792long kvmppc_realmode_hmi_handler(void);
793long kvmppc_p9_realmode_hmi_handler(struct kvm_vcpu *vcpu);
794long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
795                    long pte_index, unsigned long pteh, unsigned long ptel);
796long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
797                     unsigned long pte_index, unsigned long avpn);
798long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu);
799long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
800                      unsigned long pte_index, unsigned long avpn);
801long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
802                   unsigned long pte_index);
803long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags,
804                        unsigned long pte_index);
805long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,
806                        unsigned long pte_index);
807long kvmppc_rm_h_page_init(struct kvm_vcpu *vcpu, unsigned long flags,
808			   unsigned long dest, unsigned long src);
809long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
810                          unsigned long slb_v, unsigned int status, bool data);
811void kvmppc_guest_entry_inject_int(struct kvm_vcpu *vcpu);
812
813/*
814 * Host-side operations we want to set up while running in real
815 * mode in the guest operating on the xics.
816 * Currently only VCPU wakeup is supported.
817 */
818
819union kvmppc_rm_state {
820	unsigned long raw;
821	struct {
822		u32 in_host;
823		u32 rm_action;
824	};
825};
826
827struct kvmppc_host_rm_core {
828	union kvmppc_rm_state rm_state;
829	void *rm_data;
830	char pad[112];
831};
832
833struct kvmppc_host_rm_ops {
834	struct kvmppc_host_rm_core	*rm_core;
835	void		(*vcpu_kick)(struct kvm_vcpu *vcpu);
836};
837
838extern struct kvmppc_host_rm_ops *kvmppc_host_rm_ops_hv;
839
840static inline unsigned long kvmppc_get_epr(struct kvm_vcpu *vcpu)
841{
842#ifdef CONFIG_KVM_BOOKE_HV
843	return mfspr(SPRN_GEPR);
844#elif defined(CONFIG_BOOKE)
845	return vcpu->arch.epr;
846#else
847	return 0;
848#endif
849}
850
851static inline void kvmppc_set_epr(struct kvm_vcpu *vcpu, u32 epr)
852{
853#ifdef CONFIG_KVM_BOOKE_HV
854	mtspr(SPRN_GEPR, epr);
855#elif defined(CONFIG_BOOKE)
856	vcpu->arch.epr = epr;
857#endif
858}
859
860#ifdef CONFIG_KVM_MPIC
861
862void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu);
863int kvmppc_mpic_connect_vcpu(struct kvm_device *dev, struct kvm_vcpu *vcpu,
864			     u32 cpu);
865void kvmppc_mpic_disconnect_vcpu(struct openpic *opp, struct kvm_vcpu *vcpu);
866
867#else
868
869static inline void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu)
870{
871}
872
873static inline int kvmppc_mpic_connect_vcpu(struct kvm_device *dev,
874		struct kvm_vcpu *vcpu, u32 cpu)
875{
876	return -EINVAL;
877}
878
879static inline void kvmppc_mpic_disconnect_vcpu(struct openpic *opp,
880		struct kvm_vcpu *vcpu)
881{
882}
883
884#endif /* CONFIG_KVM_MPIC */
885
886int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
887			      struct kvm_config_tlb *cfg);
888int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu,
889			     struct kvm_dirty_tlb *cfg);
890
891long kvmppc_alloc_lpid(void);
892void kvmppc_free_lpid(long lpid);
893void kvmppc_init_lpid(unsigned long nr_lpids);
894
895static inline void kvmppc_mmu_flush_icache(kvm_pfn_t pfn)
896{
897	struct folio *folio;
898	/*
899	 * We can only access pages that the kernel maps
900	 * as memory. Bail out for unmapped ones.
901	 */
902	if (!pfn_valid(pfn))
903		return;
904
905	/* Clear i-cache for new pages */
906	folio = page_folio(pfn_to_page(pfn));
907	if (!test_bit(PG_dcache_clean, &folio->flags)) {
908		flush_dcache_icache_folio(folio);
909		set_bit(PG_dcache_clean, &folio->flags);
910	}
911}
912
913/*
914 * Shared struct helpers. The shared struct can be little or big endian,
915 * depending on the guest endianness. So expose helpers to all of them.
916 */
917static inline bool kvmppc_shared_big_endian(struct kvm_vcpu *vcpu)
918{
919#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
920	/* Only Book3S_64 PR supports bi-endian for now */
921	return vcpu->arch.shared_big_endian;
922#elif defined(CONFIG_PPC_BOOK3S_64) && defined(__LITTLE_ENDIAN__)
923	/* Book3s_64 HV on little endian is always little endian */
924	return false;
925#else
926	return true;
927#endif
928}
929
930#define SPRNG_WRAPPER_GET(reg, bookehv_spr)				\
931static inline ulong kvmppc_get_##reg(struct kvm_vcpu *vcpu)		\
932{									\
933	return mfspr(bookehv_spr);					\
934}									\
935
936#define SPRNG_WRAPPER_SET(reg, bookehv_spr)				\
937static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, ulong val)	\
938{									\
939	mtspr(bookehv_spr, val);						\
940}									\
941
942#define SHARED_WRAPPER_GET(reg, size)					\
943static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu)		\
944{									\
945	if (kvmppc_shared_big_endian(vcpu))				\
946	       return be##size##_to_cpu(vcpu->arch.shared->reg);	\
947	else								\
948	       return le##size##_to_cpu(vcpu->arch.shared->reg);	\
949}									\
950
951#define SHARED_WRAPPER_SET(reg, size)					\
952static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val)	\
953{									\
954	if (kvmppc_shared_big_endian(vcpu))				\
955	       vcpu->arch.shared->reg = cpu_to_be##size(val);		\
956	else								\
957	       vcpu->arch.shared->reg = cpu_to_le##size(val);		\
958}									\
959
960#define SHARED_WRAPPER(reg, size)					\
961	SHARED_WRAPPER_GET(reg, size)					\
962	SHARED_WRAPPER_SET(reg, size)					\
963
964#define SPRNG_WRAPPER(reg, bookehv_spr)					\
965	SPRNG_WRAPPER_GET(reg, bookehv_spr)				\
966	SPRNG_WRAPPER_SET(reg, bookehv_spr)				\
967
968#ifdef CONFIG_KVM_BOOKE_HV
969
970#define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr)			\
971	SPRNG_WRAPPER(reg, bookehv_spr)					\
972
973#else
974
975#define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr)			\
976	SHARED_WRAPPER(reg, size)					\
977
978#endif
979
980SHARED_WRAPPER(critical, 64)
981SHARED_SPRNG_WRAPPER(sprg0, 64, SPRN_GSPRG0)
982SHARED_SPRNG_WRAPPER(sprg1, 64, SPRN_GSPRG1)
983SHARED_SPRNG_WRAPPER(sprg2, 64, SPRN_GSPRG2)
984SHARED_SPRNG_WRAPPER(sprg3, 64, SPRN_GSPRG3)
985SHARED_SPRNG_WRAPPER(srr0, 64, SPRN_GSRR0)
986SHARED_SPRNG_WRAPPER(srr1, 64, SPRN_GSRR1)
987SHARED_SPRNG_WRAPPER(dar, 64, SPRN_GDEAR)
988SHARED_SPRNG_WRAPPER(esr, 64, SPRN_GESR)
989SHARED_WRAPPER_GET(msr, 64)
990static inline void kvmppc_set_msr_fast(struct kvm_vcpu *vcpu, u64 val)
991{
992	if (kvmppc_shared_big_endian(vcpu))
993	       vcpu->arch.shared->msr = cpu_to_be64(val);
994	else
995	       vcpu->arch.shared->msr = cpu_to_le64(val);
996}
997SHARED_WRAPPER(dsisr, 32)
998SHARED_WRAPPER(int_pending, 32)
999SHARED_WRAPPER(sprg4, 64)
1000SHARED_WRAPPER(sprg5, 64)
1001SHARED_WRAPPER(sprg6, 64)
1002SHARED_WRAPPER(sprg7, 64)
1003
1004static inline u32 kvmppc_get_sr(struct kvm_vcpu *vcpu, int nr)
1005{
1006	if (kvmppc_shared_big_endian(vcpu))
1007	       return be32_to_cpu(vcpu->arch.shared->sr[nr]);
1008	else
1009	       return le32_to_cpu(vcpu->arch.shared->sr[nr]);
1010}
1011
1012static inline void kvmppc_set_sr(struct kvm_vcpu *vcpu, int nr, u32 val)
1013{
1014	if (kvmppc_shared_big_endian(vcpu))
1015	       vcpu->arch.shared->sr[nr] = cpu_to_be32(val);
1016	else
1017	       vcpu->arch.shared->sr[nr] = cpu_to_le32(val);
1018}
1019
1020/*
1021 * Please call after prepare_to_enter. This function puts the lazy ee and irq
1022 * disabled tracking state back to normal mode, without actually enabling
1023 * interrupts.
1024 */
1025static inline void kvmppc_fix_ee_before_entry(void)
1026{
1027	trace_hardirqs_on();
1028
1029#ifdef CONFIG_PPC64
1030	/*
1031	 * To avoid races, the caller must have gone directly from having
1032	 * interrupts fully-enabled to hard-disabled.
1033	 */
1034	WARN_ON(local_paca->irq_happened != PACA_IRQ_HARD_DIS);
1035
1036	/* Only need to enable IRQs by hard enabling them after this */
1037	local_paca->irq_happened = 0;
1038	irq_soft_mask_set(IRQS_ENABLED);
1039#endif
1040}
1041
1042static inline void kvmppc_fix_ee_after_exit(void)
1043{
1044#ifdef CONFIG_PPC64
1045	/* Only need to enable IRQs by hard enabling them after this */
1046	local_paca->irq_happened = PACA_IRQ_HARD_DIS;
1047	irq_soft_mask_set(IRQS_ALL_DISABLED);
1048#endif
1049
1050	trace_hardirqs_off();
1051}
1052
1053
1054static inline ulong kvmppc_get_ea_indexed(struct kvm_vcpu *vcpu, int ra, int rb)
1055{
1056	ulong ea;
1057	ulong msr_64bit = 0;
1058
1059	ea = kvmppc_get_gpr(vcpu, rb);
1060	if (ra)
1061		ea += kvmppc_get_gpr(vcpu, ra);
1062
1063#if defined(CONFIG_PPC_BOOK3E_64)
1064	msr_64bit = MSR_CM;
1065#elif defined(CONFIG_PPC_BOOK3S_64)
1066	msr_64bit = MSR_SF;
1067#endif
1068
1069	if (!(kvmppc_get_msr(vcpu) & msr_64bit))
1070		ea = (uint32_t)ea;
1071
1072	return ea;
1073}
1074
1075extern void xics_wake_cpu(int cpu);
1076
1077#endif /* __POWERPC_KVM_PPC_H__ */
1078