1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_X86_QSPINLOCK_H
3#define _ASM_X86_QSPINLOCK_H
4
5#include <linux/jump_label.h>
6#include <asm/cpufeature.h>
7#include <asm-generic/qspinlock_types.h>
8#include <asm/paravirt.h>
9#include <asm/rmwcc.h>
10
11#define _Q_PENDING_LOOPS	(1 << 9)
12
13#define queued_fetch_set_pending_acquire queued_fetch_set_pending_acquire
14static __always_inline u32 queued_fetch_set_pending_acquire(struct qspinlock *lock)
15{
16	u32 val;
17
18	/*
19	 * We can't use GEN_BINARY_RMWcc() inside an if() stmt because asm goto
20	 * and CONFIG_PROFILE_ALL_BRANCHES=y results in a label inside a
21	 * statement expression, which GCC doesn't like.
22	 */
23	val = GEN_BINARY_RMWcc(LOCK_PREFIX "btsl", lock->val.counter, c,
24			       "I", _Q_PENDING_OFFSET) * _Q_PENDING_VAL;
25	val |= atomic_read(&lock->val) & ~_Q_PENDING_MASK;
26
27	return val;
28}
29
30#ifdef CONFIG_PARAVIRT_SPINLOCKS
31extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
32extern void __pv_init_lock_hash(void);
33extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
34extern void __raw_callee_save___pv_queued_spin_unlock(struct qspinlock *lock);
35extern bool nopvspin;
36
37#define	queued_spin_unlock queued_spin_unlock
38/**
39 * queued_spin_unlock - release a queued spinlock
40 * @lock : Pointer to queued spinlock structure
41 *
42 * A smp_store_release() on the least-significant byte.
43 */
44static inline void native_queued_spin_unlock(struct qspinlock *lock)
45{
46	smp_store_release(&lock->locked, 0);
47}
48
49static inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
50{
51	pv_queued_spin_lock_slowpath(lock, val);
52}
53
54static inline void queued_spin_unlock(struct qspinlock *lock)
55{
56	pv_queued_spin_unlock(lock);
57}
58
59#define vcpu_is_preempted vcpu_is_preempted
60static inline bool vcpu_is_preempted(long cpu)
61{
62	return pv_vcpu_is_preempted(cpu);
63}
64#endif
65
66#ifdef CONFIG_PARAVIRT
67/*
68 * virt_spin_lock_key - enables (by default) the virt_spin_lock() hijack.
69 *
70 * Native (and PV wanting native due to vCPU pinning) should disable this key.
71 * It is done in this backwards fashion to only have a single direction change,
72 * which removes ordering between native_pv_spin_init() and HV setup.
73 */
74DECLARE_STATIC_KEY_TRUE(virt_spin_lock_key);
75
76void native_pv_lock_init(void) __init;
77
78/*
79 * Shortcut for the queued_spin_lock_slowpath() function that allows
80 * virt to hijack it.
81 *
82 * Returns:
83 *   true - lock has been negotiated, all done;
84 *   false - queued_spin_lock_slowpath() will do its thing.
85 */
86#define virt_spin_lock virt_spin_lock
87static inline bool virt_spin_lock(struct qspinlock *lock)
88{
89	if (!static_branch_likely(&virt_spin_lock_key))
90		return false;
91
92	/*
93	 * On hypervisors without PARAVIRT_SPINLOCKS support we fall
94	 * back to a Test-and-Set spinlock, because fair locks have
95	 * horrible lock 'holder' preemption issues.
96	 */
97
98	do {
99		while (atomic_read(&lock->val) != 0)
100			cpu_relax();
101	} while (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) != 0);
102
103	return true;
104}
105#else
106static inline void native_pv_lock_init(void)
107{
108}
109#endif /* CONFIG_PARAVIRT */
110
111#include <asm-generic/qspinlock.h>
112
113#endif /* _ASM_X86_QSPINLOCK_H */
114