18c2ecf20Sopenharmony_ci/* SPDX-License-Identifier: GPL-2.0 */
28c2ecf20Sopenharmony_ci#ifndef _ASM_X86_QSPINLOCK_H
38c2ecf20Sopenharmony_ci#define _ASM_X86_QSPINLOCK_H
48c2ecf20Sopenharmony_ci
58c2ecf20Sopenharmony_ci#include <linux/jump_label.h>
68c2ecf20Sopenharmony_ci#include <asm/cpufeature.h>
78c2ecf20Sopenharmony_ci#include <asm-generic/qspinlock_types.h>
88c2ecf20Sopenharmony_ci#include <asm/paravirt.h>
98c2ecf20Sopenharmony_ci#include <asm/rmwcc.h>
108c2ecf20Sopenharmony_ci
118c2ecf20Sopenharmony_ci#define _Q_PENDING_LOOPS	(1 << 9)
128c2ecf20Sopenharmony_ci
138c2ecf20Sopenharmony_ci#define queued_fetch_set_pending_acquire queued_fetch_set_pending_acquire
148c2ecf20Sopenharmony_cistatic __always_inline u32 queued_fetch_set_pending_acquire(struct qspinlock *lock)
158c2ecf20Sopenharmony_ci{
168c2ecf20Sopenharmony_ci	u32 val;
178c2ecf20Sopenharmony_ci
188c2ecf20Sopenharmony_ci	/*
198c2ecf20Sopenharmony_ci	 * We can't use GEN_BINARY_RMWcc() inside an if() stmt because asm goto
208c2ecf20Sopenharmony_ci	 * and CONFIG_PROFILE_ALL_BRANCHES=y results in a label inside a
218c2ecf20Sopenharmony_ci	 * statement expression, which GCC doesn't like.
228c2ecf20Sopenharmony_ci	 */
238c2ecf20Sopenharmony_ci	val = GEN_BINARY_RMWcc(LOCK_PREFIX "btsl", lock->val.counter, c,
248c2ecf20Sopenharmony_ci			       "I", _Q_PENDING_OFFSET) * _Q_PENDING_VAL;
258c2ecf20Sopenharmony_ci	val |= atomic_read(&lock->val) & ~_Q_PENDING_MASK;
268c2ecf20Sopenharmony_ci
278c2ecf20Sopenharmony_ci	return val;
288c2ecf20Sopenharmony_ci}
298c2ecf20Sopenharmony_ci
308c2ecf20Sopenharmony_ci#ifdef CONFIG_PARAVIRT_SPINLOCKS
318c2ecf20Sopenharmony_ciextern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
328c2ecf20Sopenharmony_ciextern void __pv_init_lock_hash(void);
338c2ecf20Sopenharmony_ciextern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
348c2ecf20Sopenharmony_ciextern void __raw_callee_save___pv_queued_spin_unlock(struct qspinlock *lock);
358c2ecf20Sopenharmony_ciextern bool nopvspin;
368c2ecf20Sopenharmony_ci
378c2ecf20Sopenharmony_ci#define	queued_spin_unlock queued_spin_unlock
388c2ecf20Sopenharmony_ci/**
398c2ecf20Sopenharmony_ci * queued_spin_unlock - release a queued spinlock
408c2ecf20Sopenharmony_ci * @lock : Pointer to queued spinlock structure
418c2ecf20Sopenharmony_ci *
428c2ecf20Sopenharmony_ci * A smp_store_release() on the least-significant byte.
438c2ecf20Sopenharmony_ci */
448c2ecf20Sopenharmony_cistatic inline void native_queued_spin_unlock(struct qspinlock *lock)
458c2ecf20Sopenharmony_ci{
468c2ecf20Sopenharmony_ci	smp_store_release(&lock->locked, 0);
478c2ecf20Sopenharmony_ci}
488c2ecf20Sopenharmony_ci
498c2ecf20Sopenharmony_cistatic inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
508c2ecf20Sopenharmony_ci{
518c2ecf20Sopenharmony_ci	pv_queued_spin_lock_slowpath(lock, val);
528c2ecf20Sopenharmony_ci}
538c2ecf20Sopenharmony_ci
548c2ecf20Sopenharmony_cistatic inline void queued_spin_unlock(struct qspinlock *lock)
558c2ecf20Sopenharmony_ci{
568c2ecf20Sopenharmony_ci	pv_queued_spin_unlock(lock);
578c2ecf20Sopenharmony_ci}
588c2ecf20Sopenharmony_ci
598c2ecf20Sopenharmony_ci#define vcpu_is_preempted vcpu_is_preempted
608c2ecf20Sopenharmony_cistatic inline bool vcpu_is_preempted(long cpu)
618c2ecf20Sopenharmony_ci{
628c2ecf20Sopenharmony_ci	return pv_vcpu_is_preempted(cpu);
638c2ecf20Sopenharmony_ci}
648c2ecf20Sopenharmony_ci#endif
658c2ecf20Sopenharmony_ci
668c2ecf20Sopenharmony_ci#ifdef CONFIG_PARAVIRT
678c2ecf20Sopenharmony_ci/*
688c2ecf20Sopenharmony_ci * virt_spin_lock_key - enables (by default) the virt_spin_lock() hijack.
698c2ecf20Sopenharmony_ci *
708c2ecf20Sopenharmony_ci * Native (and PV wanting native due to vCPU pinning) should disable this key.
718c2ecf20Sopenharmony_ci * It is done in this backwards fashion to only have a single direction change,
728c2ecf20Sopenharmony_ci * which removes ordering between native_pv_spin_init() and HV setup.
738c2ecf20Sopenharmony_ci */
748c2ecf20Sopenharmony_ciDECLARE_STATIC_KEY_TRUE(virt_spin_lock_key);
758c2ecf20Sopenharmony_ci
768c2ecf20Sopenharmony_civoid native_pv_lock_init(void) __init;
778c2ecf20Sopenharmony_ci
788c2ecf20Sopenharmony_ci/*
798c2ecf20Sopenharmony_ci * Shortcut for the queued_spin_lock_slowpath() function that allows
808c2ecf20Sopenharmony_ci * virt to hijack it.
818c2ecf20Sopenharmony_ci *
828c2ecf20Sopenharmony_ci * Returns:
838c2ecf20Sopenharmony_ci *   true - lock has been negotiated, all done;
848c2ecf20Sopenharmony_ci *   false - queued_spin_lock_slowpath() will do its thing.
858c2ecf20Sopenharmony_ci */
868c2ecf20Sopenharmony_ci#define virt_spin_lock virt_spin_lock
878c2ecf20Sopenharmony_cistatic inline bool virt_spin_lock(struct qspinlock *lock)
888c2ecf20Sopenharmony_ci{
898c2ecf20Sopenharmony_ci	if (!static_branch_likely(&virt_spin_lock_key))
908c2ecf20Sopenharmony_ci		return false;
918c2ecf20Sopenharmony_ci
928c2ecf20Sopenharmony_ci	/*
938c2ecf20Sopenharmony_ci	 * On hypervisors without PARAVIRT_SPINLOCKS support we fall
948c2ecf20Sopenharmony_ci	 * back to a Test-and-Set spinlock, because fair locks have
958c2ecf20Sopenharmony_ci	 * horrible lock 'holder' preemption issues.
968c2ecf20Sopenharmony_ci	 */
978c2ecf20Sopenharmony_ci
988c2ecf20Sopenharmony_ci	do {
998c2ecf20Sopenharmony_ci		while (atomic_read(&lock->val) != 0)
1008c2ecf20Sopenharmony_ci			cpu_relax();
1018c2ecf20Sopenharmony_ci	} while (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) != 0);
1028c2ecf20Sopenharmony_ci
1038c2ecf20Sopenharmony_ci	return true;
1048c2ecf20Sopenharmony_ci}
1058c2ecf20Sopenharmony_ci#else
1068c2ecf20Sopenharmony_cistatic inline void native_pv_lock_init(void)
1078c2ecf20Sopenharmony_ci{
1088c2ecf20Sopenharmony_ci}
1098c2ecf20Sopenharmony_ci#endif /* CONFIG_PARAVIRT */
1108c2ecf20Sopenharmony_ci
1118c2ecf20Sopenharmony_ci#include <asm-generic/qspinlock.h>
1128c2ecf20Sopenharmony_ci
1138c2ecf20Sopenharmony_ci#endif /* _ASM_X86_QSPINLOCK_H */
114