162306a36Sopenharmony_ci/* SPDX-License-Identifier: GPL-2.0-or-later */
262306a36Sopenharmony_ci/*
362306a36Sopenharmony_ci * Queued spinlock
462306a36Sopenharmony_ci *
562306a36Sopenharmony_ci * A 'generic' spinlock implementation that is based on MCS locks. For an
662306a36Sopenharmony_ci * architecture that's looking for a 'generic' spinlock, please first consider
762306a36Sopenharmony_ci * ticket-lock.h and only come looking here when you've considered all the
862306a36Sopenharmony_ci * constraints below and can show your hardware does actually perform better
962306a36Sopenharmony_ci * with qspinlock.
1062306a36Sopenharmony_ci *
1162306a36Sopenharmony_ci * qspinlock relies on atomic_*_release()/atomic_*_acquire() to be RCsc (or no
1262306a36Sopenharmony_ci * weaker than RCtso if you're power), where regular code only expects atomic_t
1362306a36Sopenharmony_ci * to be RCpc.
1462306a36Sopenharmony_ci *
1562306a36Sopenharmony_ci * qspinlock relies on a far greater (compared to asm-generic/spinlock.h) set
1662306a36Sopenharmony_ci * of atomic operations to behave well together, please audit them carefully to
1762306a36Sopenharmony_ci * ensure they all have forward progress. Many atomic operations may default to
1862306a36Sopenharmony_ci * cmpxchg() loops which will not have good forward progress properties on
1962306a36Sopenharmony_ci * LL/SC architectures.
2062306a36Sopenharmony_ci *
2162306a36Sopenharmony_ci * One notable example is atomic_fetch_or_acquire(), which x86 cannot (cheaply)
2262306a36Sopenharmony_ci * do. Carefully read the patches that introduced
2362306a36Sopenharmony_ci * queued_fetch_set_pending_acquire().
2462306a36Sopenharmony_ci *
2562306a36Sopenharmony_ci * qspinlock also heavily relies on mixed size atomic operations, in specific
2662306a36Sopenharmony_ci * it requires architectures to have xchg16; something which many LL/SC
2762306a36Sopenharmony_ci * architectures need to implement as a 32bit and+or in order to satisfy the
2862306a36Sopenharmony_ci * forward progress guarantees mentioned above.
2962306a36Sopenharmony_ci *
3062306a36Sopenharmony_ci * Further reading on mixed size atomics that might be relevant:
3162306a36Sopenharmony_ci *
3262306a36Sopenharmony_ci *   http://www.cl.cam.ac.uk/~pes20/popl17/mixed-size.pdf
3362306a36Sopenharmony_ci *
3462306a36Sopenharmony_ci * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
3562306a36Sopenharmony_ci * (C) Copyright 2015 Hewlett-Packard Enterprise Development LP
3662306a36Sopenharmony_ci *
3762306a36Sopenharmony_ci * Authors: Waiman Long <waiman.long@hpe.com>
3862306a36Sopenharmony_ci */
3962306a36Sopenharmony_ci#ifndef __ASM_GENERIC_QSPINLOCK_H
4062306a36Sopenharmony_ci#define __ASM_GENERIC_QSPINLOCK_H
4162306a36Sopenharmony_ci
4262306a36Sopenharmony_ci#include <asm-generic/qspinlock_types.h>
4362306a36Sopenharmony_ci#include <linux/atomic.h>
4462306a36Sopenharmony_ci
4562306a36Sopenharmony_ci#ifndef queued_spin_is_locked
4662306a36Sopenharmony_ci/**
4762306a36Sopenharmony_ci * queued_spin_is_locked - is the spinlock locked?
4862306a36Sopenharmony_ci * @lock: Pointer to queued spinlock structure
4962306a36Sopenharmony_ci * Return: 1 if it is locked, 0 otherwise
5062306a36Sopenharmony_ci */
5162306a36Sopenharmony_cistatic __always_inline int queued_spin_is_locked(struct qspinlock *lock)
5262306a36Sopenharmony_ci{
5362306a36Sopenharmony_ci	/*
5462306a36Sopenharmony_ci	 * Any !0 state indicates it is locked, even if _Q_LOCKED_VAL
5562306a36Sopenharmony_ci	 * isn't immediately observable.
5662306a36Sopenharmony_ci	 */
5762306a36Sopenharmony_ci	return atomic_read(&lock->val);
5862306a36Sopenharmony_ci}
5962306a36Sopenharmony_ci#endif
6062306a36Sopenharmony_ci
6162306a36Sopenharmony_ci/**
6262306a36Sopenharmony_ci * queued_spin_value_unlocked - is the spinlock structure unlocked?
6362306a36Sopenharmony_ci * @lock: queued spinlock structure
6462306a36Sopenharmony_ci * Return: 1 if it is unlocked, 0 otherwise
6562306a36Sopenharmony_ci *
6662306a36Sopenharmony_ci * N.B. Whenever there are tasks waiting for the lock, it is considered
6762306a36Sopenharmony_ci *      locked wrt the lockref code to avoid lock stealing by the lockref
6862306a36Sopenharmony_ci *      code and change things underneath the lock. This also allows some
6962306a36Sopenharmony_ci *      optimizations to be applied without conflict with lockref.
7062306a36Sopenharmony_ci */
7162306a36Sopenharmony_cistatic __always_inline int queued_spin_value_unlocked(struct qspinlock lock)
7262306a36Sopenharmony_ci{
7362306a36Sopenharmony_ci	return !lock.val.counter;
7462306a36Sopenharmony_ci}
7562306a36Sopenharmony_ci
7662306a36Sopenharmony_ci/**
7762306a36Sopenharmony_ci * queued_spin_is_contended - check if the lock is contended
7862306a36Sopenharmony_ci * @lock : Pointer to queued spinlock structure
7962306a36Sopenharmony_ci * Return: 1 if lock contended, 0 otherwise
8062306a36Sopenharmony_ci */
8162306a36Sopenharmony_cistatic __always_inline int queued_spin_is_contended(struct qspinlock *lock)
8262306a36Sopenharmony_ci{
8362306a36Sopenharmony_ci	return atomic_read(&lock->val) & ~_Q_LOCKED_MASK;
8462306a36Sopenharmony_ci}
8562306a36Sopenharmony_ci/**
8662306a36Sopenharmony_ci * queued_spin_trylock - try to acquire the queued spinlock
8762306a36Sopenharmony_ci * @lock : Pointer to queued spinlock structure
8862306a36Sopenharmony_ci * Return: 1 if lock acquired, 0 if failed
8962306a36Sopenharmony_ci */
9062306a36Sopenharmony_cistatic __always_inline int queued_spin_trylock(struct qspinlock *lock)
9162306a36Sopenharmony_ci{
9262306a36Sopenharmony_ci	int val = atomic_read(&lock->val);
9362306a36Sopenharmony_ci
9462306a36Sopenharmony_ci	if (unlikely(val))
9562306a36Sopenharmony_ci		return 0;
9662306a36Sopenharmony_ci
9762306a36Sopenharmony_ci	return likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL));
9862306a36Sopenharmony_ci}
9962306a36Sopenharmony_ci
10062306a36Sopenharmony_ciextern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
10162306a36Sopenharmony_ci
10262306a36Sopenharmony_ci#ifndef queued_spin_lock
10362306a36Sopenharmony_ci/**
10462306a36Sopenharmony_ci * queued_spin_lock - acquire a queued spinlock
10562306a36Sopenharmony_ci * @lock: Pointer to queued spinlock structure
10662306a36Sopenharmony_ci */
10762306a36Sopenharmony_cistatic __always_inline void queued_spin_lock(struct qspinlock *lock)
10862306a36Sopenharmony_ci{
10962306a36Sopenharmony_ci	int val = 0;
11062306a36Sopenharmony_ci
11162306a36Sopenharmony_ci	if (likely(atomic_try_cmpxchg_acquire(&lock->val, &val, _Q_LOCKED_VAL)))
11262306a36Sopenharmony_ci		return;
11362306a36Sopenharmony_ci
11462306a36Sopenharmony_ci	queued_spin_lock_slowpath(lock, val);
11562306a36Sopenharmony_ci}
11662306a36Sopenharmony_ci#endif
11762306a36Sopenharmony_ci
11862306a36Sopenharmony_ci#ifndef queued_spin_unlock
11962306a36Sopenharmony_ci/**
12062306a36Sopenharmony_ci * queued_spin_unlock - release a queued spinlock
12162306a36Sopenharmony_ci * @lock : Pointer to queued spinlock structure
12262306a36Sopenharmony_ci */
12362306a36Sopenharmony_cistatic __always_inline void queued_spin_unlock(struct qspinlock *lock)
12462306a36Sopenharmony_ci{
12562306a36Sopenharmony_ci	/*
12662306a36Sopenharmony_ci	 * unlock() needs release semantics:
12762306a36Sopenharmony_ci	 */
12862306a36Sopenharmony_ci	smp_store_release(&lock->locked, 0);
12962306a36Sopenharmony_ci}
13062306a36Sopenharmony_ci#endif
13162306a36Sopenharmony_ci
13262306a36Sopenharmony_ci#ifndef virt_spin_lock
13362306a36Sopenharmony_cistatic __always_inline bool virt_spin_lock(struct qspinlock *lock)
13462306a36Sopenharmony_ci{
13562306a36Sopenharmony_ci	return false;
13662306a36Sopenharmony_ci}
13762306a36Sopenharmony_ci#endif
13862306a36Sopenharmony_ci
13962306a36Sopenharmony_ci/*
14062306a36Sopenharmony_ci * Remapping spinlock architecture specific functions to the corresponding
14162306a36Sopenharmony_ci * queued spinlock functions.
14262306a36Sopenharmony_ci */
14362306a36Sopenharmony_ci#define arch_spin_is_locked(l)		queued_spin_is_locked(l)
14462306a36Sopenharmony_ci#define arch_spin_is_contended(l)	queued_spin_is_contended(l)
14562306a36Sopenharmony_ci#define arch_spin_value_unlocked(l)	queued_spin_value_unlocked(l)
14662306a36Sopenharmony_ci#define arch_spin_lock(l)		queued_spin_lock(l)
14762306a36Sopenharmony_ci#define arch_spin_trylock(l)		queued_spin_trylock(l)
14862306a36Sopenharmony_ci#define arch_spin_unlock(l)		queued_spin_unlock(l)
14962306a36Sopenharmony_ci
15062306a36Sopenharmony_ci#endif /* __ASM_GENERIC_QSPINLOCK_H */
151