xref: /kernel/linux/linux-5.10/arch/loongarch/include/asm/qspinlock.h (revision 8c2ecf20)
  • Home
  • History
  • Annotate
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
  • only in /kernel/linux/linux-5.10/arch/loongarch/include/asm/
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_QSPINLOCK_H
3#define _ASM_QSPINLOCK_H
4
5#include <asm/paravirt.h>
6#include <asm-generic/qspinlock_types.h>
7
8#define _Q_PENDING_LOOPS	(1 << 9)
9#define queued_spin_unlock queued_spin_unlock
10
11#ifdef CONFIG_PARAVIRT_SPINLOCKS
12extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
13extern void __pv_init_lock_hash(void);
14extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
15extern void __pv_queued_spin_unlock(struct qspinlock *lock);
16
17static inline void native_queued_spin_unlock(struct qspinlock *lock)
18{
19	compiletime_assert_atomic_type(lock->locked);
20	c_sync();
21	WRITE_ONCE(lock->locked, 0);
22}
23
24static inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
25{
26	pv_queued_spin_lock_slowpath(lock, val);
27}
28
29static inline void queued_spin_unlock(struct qspinlock *lock)
30{
31	pv_queued_spin_unlock(lock);
32}
33
34#define vcpu_is_preempted vcpu_is_preempted
35static inline bool vcpu_is_preempted(long cpu)
36{
37	return pv_vcpu_is_preempted(cpu);
38}
39#else
40static inline void queued_spin_unlock(struct qspinlock *lock)
41{
42	compiletime_assert_atomic_type(lock->locked);
43	c_sync();
44	WRITE_ONCE(lock->locked, 0);
45}
46#endif
47
48#include <asm-generic/qspinlock.h>
49
50#endif /* _ASM_QSPINLOCK_H */
51

Indexes created Thu Nov 07 10:32:03 CST 2024