1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_QSPINLOCK_H
3 #define _ASM_QSPINLOCK_H
4 
5 #include <asm/paravirt.h>
6 #include <asm-generic/qspinlock_types.h>
7 
8 #define _Q_PENDING_LOOPS	(1 << 9)
9 #define queued_spin_unlock queued_spin_unlock
10 
11 #ifdef CONFIG_PARAVIRT_SPINLOCKS
12 extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
13 extern void __pv_init_lock_hash(void);
14 extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
15 extern void __pv_queued_spin_unlock(struct qspinlock *lock);
16 
native_queued_spin_unlock(struct qspinlock *lock)17 static inline void native_queued_spin_unlock(struct qspinlock *lock)
18 {
19 	compiletime_assert_atomic_type(lock->locked);
20 	c_sync();
21 	WRITE_ONCE(lock->locked, 0);
22 }
23 
queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)24 static inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
25 {
26 	pv_queued_spin_lock_slowpath(lock, val);
27 }
28 
queued_spin_unlock(struct qspinlock *lock)29 static inline void queued_spin_unlock(struct qspinlock *lock)
30 {
31 	pv_queued_spin_unlock(lock);
32 }
33 
34 #define vcpu_is_preempted vcpu_is_preempted
vcpu_is_preempted(long cpu)35 static inline bool vcpu_is_preempted(long cpu)
36 {
37 	return pv_vcpu_is_preempted(cpu);
38 }
39 #else
queued_spin_unlock(struct qspinlock *lock)40 static inline void queued_spin_unlock(struct qspinlock *lock)
41 {
42 	compiletime_assert_atomic_type(lock->locked);
43 	c_sync();
44 	WRITE_ONCE(lock->locked, 0);
45 }
46 #endif
47 
48 #include <asm-generic/qspinlock.h>
49 
50 #endif /* _ASM_QSPINLOCK_H */
51