Home
last modified time | relevance | path

Searched refs:__pv_queued_spin_unlock (Results 1 - 15 of 15) sorted by relevance

/kernel/linux/linux-5.10/arch/x86/include/asm/
H A Dqspinlock_paravirt.h8 * and restored. So an optimized version of __pv_queued_spin_unlock() is
14 #define __pv_queued_spin_unlock __pv_queued_spin_unlock macro
23 * void __pv_queued_spin_unlock(struct qspinlock *lock)
65 extern void __pv_queued_spin_unlock(struct qspinlock *lock);
66 PV_CALLEE_SAVE_REGS_THUNK(__pv_queued_spin_unlock); variable
/kernel/linux/linux-6.6/arch/x86/include/asm/
H A Dqspinlock_paravirt.h12 * and restored. So an optimized version of __pv_queued_spin_unlock() is
18 #define __pv_queued_spin_unlock __pv_queued_spin_unlock macro
26 * void __lockfunc __pv_queued_spin_unlock(struct qspinlock *lock)
64 extern void __lockfunc __pv_queued_spin_unlock(struct qspinlock *lock);
65 __PV_CALLEE_SAVE_REGS_THUNK(__pv_queued_spin_unlock, ".spinlock.text");
/kernel/linux/linux-5.10/arch/loongarch/include/asm/
H A Dqspinlock_paravirt.h7 #define __pv_queued_spin_unlock __pv_queued_spin_unlock macro
8 void __pv_queued_spin_unlock(struct qspinlock *lock) in __pv_queued_spin_unlock() function
H A Dqspinlock.h15 extern void __pv_queued_spin_unlock(struct qspinlock *lock);
/kernel/linux/linux-5.10/arch/powerpc/include/asm/
H A Dqspinlock.h13 extern void __pv_queued_spin_unlock(struct qspinlock *lock);
29 __pv_queued_spin_unlock(lock); in queued_spin_unlock()
H A Dqspinlock_paravirt.h5 EXPORT_SYMBOL(__pv_queued_spin_unlock); variable
/kernel/linux/linux-5.10/kernel/locking/
H A Dqspinlock_paravirt.h20 * __pv_queued_spin_unlock() to replace native_queued_spin_lock_slowpath() and
398 * __pv_queued_spin_unlock() will wake us.
447 * when we observe _Q_SLOW_VAL in __pv_queued_spin_unlock() in pv_wait_head_or_lock()
454 * Matches the smp_rmb() in __pv_queued_spin_unlock(). in pv_wait_head_or_lock()
538 * __pv_queued_spin_unlock(). This thunk is put together with
539 * __pv_queued_spin_unlock() to make the callee-save thunk and the real unlock
541 * Alternatively, architecture specific version of __pv_queued_spin_unlock()
546 #ifndef __pv_queued_spin_unlock
547 __visible void __pv_queued_spin_unlock(struct qspinlock *lock) in __pv_queued_spin_unlock() function
562 #endif /* __pv_queued_spin_unlock */
[all...]
/kernel/linux/linux-6.6/kernel/locking/
H A Dqspinlock_paravirt.h20 * __pv_queued_spin_unlock() to replace native_queued_spin_lock_slowpath() and
398 * __pv_queued_spin_unlock() will wake us.
447 * when we observe _Q_SLOW_VAL in __pv_queued_spin_unlock() in pv_wait_head_or_lock()
454 * Matches the smp_rmb() in __pv_queued_spin_unlock(). in pv_wait_head_or_lock()
490 * __pv_queued_spin_unlock(). This thunk is put together with
491 * __pv_queued_spin_unlock() to make the callee-save thunk and the real unlock
493 * Alternatively, architecture specific version of __pv_queued_spin_unlock()
546 #ifndef __pv_queued_spin_unlock
547 __visible __lockfunc void __pv_queued_spin_unlock(struct qspinlock *lock) in __pv_queued_spin_unlock() function
562 #endif /* __pv_queued_spin_unlock */
[all...]
/kernel/linux/linux-5.10/arch/x86/hyperv/
H A Dhv_spinlock.c77 pv_ops.lock.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock); in hv_init_spinlocks()
/kernel/linux/linux-6.6/arch/x86/hyperv/
H A Dhv_spinlock.c81 pv_ops.lock.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock); in hv_init_spinlocks()
/kernel/linux/linux-5.10/arch/x86/xen/
H A Dspinlock.c141 PV_CALLEE_SAVE(__pv_queued_spin_unlock); in xen_init_spinlocks()
/kernel/linux/linux-6.6/arch/x86/xen/
H A Dspinlock.c141 PV_CALLEE_SAVE(__pv_queued_spin_unlock); in xen_init_spinlocks()
/kernel/linux/linux-5.10/arch/loongarch/kernel/
H A Dparavirt.c231 pv_lock_ops.queued_spin_unlock = __pv_queued_spin_unlock; in kvm_spinlock_init()
/kernel/linux/linux-5.10/arch/x86/kernel/
H A Dkvm.c1001 PV_CALLEE_SAVE(__pv_queued_spin_unlock); in kvm_spinlock_init()
/kernel/linux/linux-6.6/arch/x86/kernel/
H A Dkvm.c1103 PV_CALLEE_SAVE(__pv_queued_spin_unlock); in kvm_spinlock_init()

Completed in 8 milliseconds