1/* SPDX-License-Identifier: GPL-2.0-or-later */
2#ifndef _ASM_POWERPC_PARAVIRT_H
3#define _ASM_POWERPC_PARAVIRT_H
4
5#include <linux/jump_label.h>
6#include <asm/smp.h>
7#ifdef CONFIG_PPC64
8#include <asm/paca.h>
9#include <asm/lppaca.h>
10#include <asm/hvcall.h>
11#endif
12
13#ifdef CONFIG_PPC_SPLPAR
14DECLARE_STATIC_KEY_FALSE(shared_processor);
15
16static inline bool is_shared_processor(void)
17{
18	return static_branch_unlikely(&shared_processor);
19}
20
21/* If bit 0 is set, the cpu has been preempted */
22static inline u32 yield_count_of(int cpu)
23{
24	__be32 yield_count = READ_ONCE(lppaca_of(cpu).yield_count);
25	return be32_to_cpu(yield_count);
26}
27
28/*
29 * Spinlock code confers and prods, so don't trace the hcalls because the
30 * tracing code takes spinlocks which can cause recursion deadlocks.
31 *
32 * These calls are made while the lock is not held: the lock slowpath yields if
33 * it can not acquire the lock, and unlock slow path might prod if a waiter has
34 * yielded). So this may not be a problem for simple spin locks because the
35 * tracing does not technically recurse on the lock, but we avoid it anyway.
36 *
37 * However the queued spin lock contended path is more strictly ordered: the
38 * H_CONFER hcall is made after the task has queued itself on the lock, so then
39 * recursing on that lock will cause the task to then queue up again behind the
40 * first instance (or worse: queued spinlocks use tricks that assume a context
41 * never waits on more than one spinlock, so such recursion may cause random
42 * corruption in the lock code).
43 */
44static inline void yield_to_preempted(int cpu, u32 yield_count)
45{
46	plpar_hcall_norets_notrace(H_CONFER, get_hard_smp_processor_id(cpu), yield_count);
47}
48
49static inline void prod_cpu(int cpu)
50{
51	plpar_hcall_norets_notrace(H_PROD, get_hard_smp_processor_id(cpu));
52}
53
54static inline void yield_to_any(void)
55{
56	plpar_hcall_norets_notrace(H_CONFER, -1, 0);
57}
58#else
59static inline bool is_shared_processor(void)
60{
61	return false;
62}
63
64static inline u32 yield_count_of(int cpu)
65{
66	return 0;
67}
68
69extern void ___bad_yield_to_preempted(void);
70static inline void yield_to_preempted(int cpu, u32 yield_count)
71{
72	___bad_yield_to_preempted(); /* This would be a bug */
73}
74
75extern void ___bad_yield_to_any(void);
76static inline void yield_to_any(void)
77{
78	___bad_yield_to_any(); /* This would be a bug */
79}
80
81extern void ___bad_prod_cpu(void);
82static inline void prod_cpu(int cpu)
83{
84	___bad_prod_cpu(); /* This would be a bug */
85}
86
87#endif
88
89#define vcpu_is_preempted vcpu_is_preempted
90static inline bool vcpu_is_preempted(int cpu)
91{
92	if (!is_shared_processor())
93		return false;
94	if (yield_count_of(cpu) & 1)
95		return true;
96	return false;
97}
98
99static inline bool pv_is_native_spin_unlock(void)
100{
101	return !is_shared_processor();
102}
103
104#endif /* _ASM_POWERPC_PARAVIRT_H */
105