/kernel/linux/linux-6.6/kernel/locking/ |
H A D | rwbase_rt.c | 19 * 2) Take tmutex::wait_lock, which protects the writelocked flag 74 raw_spin_lock_irq(&rtm->wait_lock); in __rwbase_read_lock() 77 * Call into the slow lock path with the rtmutex->wait_lock in __rwbase_read_lock() 86 * unlock(m->wait_lock) in __rwbase_read_lock() 89 * lock(m->wait_lock) in __rwbase_read_lock() 91 * unlock(m->wait_lock) in __rwbase_read_lock() 119 * rtmutex->wait_lock has to be unlocked in any case of course. in __rwbase_read_lock() 123 raw_spin_unlock_irq(&rtm->wait_lock); in __rwbase_read_lock() 147 raw_spin_lock_irq(&rtm->wait_lock); in __rwbase_read_unlock() 151 * clean up rwb->readers it needs to acquire rtm->wait_lock in __rwbase_read_unlock() [all...] |
H A D | rtmutex.c | 82 * with ->wait_lock is held. To prevent any fast path cmpxchg to the lock, 107 * lock->wait_lock is held but explicit acquire semantics are needed in rt_mutex_set_owner() 115 /* lock->wait_lock is held so the unlock provides release semantics. */ in rt_mutex_clear_owner() 229 * Callers must hold the ->wait_lock -- which is the whole purpose as we force 253 * 2) Drop lock->wait_lock 258 __releases(lock->wait_lock) 263 raw_spin_unlock_irqrestore(&lock->wait_lock, flags); 268 * unlock(wait_lock); 269 * lock(wait_lock); 275 * unlock(wait_lock); [all...] |
H A D | rtmutex_api.c | 168 lockdep_assert_held(&lock->wait_lock); in __rt_mutex_futex_unlock() 179 * retain preempt_disabled when we drop the wait_lock, to in __rt_mutex_futex_unlock() 194 raw_spin_lock_irqsave(&lock->wait_lock, flags); in rt_mutex_futex_unlock() 196 raw_spin_unlock_irqrestore(&lock->wait_lock, flags); in rt_mutex_futex_unlock() 246 * the wait_lock of the rtmutex associated to the pi_futex held. in rt_mutex_init_proxy_locked() 247 * spin_unlock() in turn takes wait_lock of the rtmutex on which in rt_mutex_init_proxy_locked() 249 * recursion. Give the futex/rtmutex wait_lock a separate key. in rt_mutex_init_proxy_locked() 251 lockdep_set_class(&lock->wait_lock, &pi_futex_key); in rt_mutex_init_proxy_locked() 298 lockdep_assert_held(&lock->wait_lock); in __rt_mutex_start_proxy_lock() 345 raw_spin_lock_irq(&lock->wait_lock); in rt_mutex_start_proxy_lock() [all...] |
H A D | rwsem.c | 113 * For all the above cases, wait_lock will be held. A writer must also 324 raw_spin_lock_init(&sem->wait_lock); in __init_rwsem() 372 lockdep_assert_held(&sem->wait_lock); in rwsem_add_waiter() 388 lockdep_assert_held(&sem->wait_lock); in rwsem_del_waiter() 402 * - the wait_lock must be held by the caller 405 * preferably when the wait_lock is released 419 lockdep_assert_held(&sem->wait_lock); in rwsem_mark_wake() 573 * writer slowpaths with wait_lock held. It releases the wait_lock and 579 __releases(&sem->wait_lock) [all...] |
H A D | qrwlock.c | 43 arch_spin_lock(&lock->wait_lock); in queued_read_lock_slowpath() 56 arch_spin_unlock(&lock->wait_lock); in queued_read_lock_slowpath() 73 arch_spin_lock(&lock->wait_lock); in queued_write_lock_slowpath() 88 arch_spin_unlock(&lock->wait_lock); in queued_write_lock_slowpath()
|
H A D | ww_mutex.h | 75 raw_spin_lock(&lock->wait_lock); in lock_wait_lock() 80 raw_spin_unlock(&lock->wait_lock); in unlock_wait_lock() 85 lockdep_assert_held(&lock->wait_lock); in lockdep_assert_wait_lock_held() 149 raw_spin_lock(&lock->rtmutex.wait_lock); in lock_wait_lock() 154 raw_spin_unlock(&lock->rtmutex.wait_lock); in unlock_wait_lock() 159 lockdep_assert_held(&lock->rtmutex.wait_lock); in lockdep_assert_wait_lock_held() 319 * wait_lock. in __ww_mutex_wound() 374 * After acquiring lock with fastpath, where we do not hold wait_lock, set ctx 386 * and keep spinning, or it will acquire wait_lock, add itself in ww_mutex_set_context_fastpath()
|
H A D | mutex.c | 49 raw_spin_lock_init(&lock->wait_lock); in __mutex_init() 313 * by acquiring wait_lock there is a guarantee that in ww_mutex_spin_on_owner() 617 raw_spin_lock(&lock->wait_lock); in __mutex_lock_common() 619 * After waiting to acquire the wait_lock, try again. in __mutex_lock_common() 654 * Once we hold wait_lock, we're serialized against in __mutex_lock_common() 664 * wait_lock. This ensures the lock cancellation is ordered in __mutex_lock_common() 678 raw_spin_unlock(&lock->wait_lock); in __mutex_lock_common() 699 raw_spin_lock(&lock->wait_lock); in __mutex_lock_common() 701 raw_spin_lock(&lock->wait_lock); in __mutex_lock_common() 727 raw_spin_unlock(&lock->wait_lock); in __mutex_lock_common() [all...] |
/kernel/linux/linux-5.10/drivers/gpu/drm/tidss/ |
H A D | tidss_irq.c | 15 /* call with wait_lock and dispc runtime held */ 18 assert_spin_locked(&tidss->wait_lock); in tidss_irq_update() 31 spin_lock_irqsave(&tidss->wait_lock, flags); in tidss_irq_enable_vblank() 35 spin_unlock_irqrestore(&tidss->wait_lock, flags); in tidss_irq_enable_vblank() 46 spin_lock_irqsave(&tidss->wait_lock, flags); in tidss_irq_disable_vblank() 50 spin_unlock_irqrestore(&tidss->wait_lock, flags); in tidss_irq_disable_vblank() 91 spin_lock_irqsave(&tidss->wait_lock, flags); in tidss_irq_resume() 93 spin_unlock_irqrestore(&tidss->wait_lock, flags); in tidss_irq_resume() 100 spin_lock_init(&tidss->wait_lock); in tidss_irq_preinstall() 118 spin_lock_irqsave(&tidss->wait_lock, flag in tidss_irq_postinstall() [all...] |
/kernel/linux/linux-5.10/kernel/locking/ |
H A D | rtmutex.c | 42 * with ->wait_lock is held. To prevent any fast path cmpxchg to the lock, 148 * Callers must hold the ->wait_lock -- which is the whole purpose as we force 165 * 2) Drop lock->wait_lock 170 __releases(lock->wait_lock) 175 raw_spin_unlock_irqrestore(&lock->wait_lock, flags); 180 * unlock(wait_lock); 181 * lock(wait_lock); 187 * unlock(wait_lock); 188 * lock(wait_lock); 193 * unlock(wait_lock); [all...] |
H A D | rwsem.c | 150 * For all the above cases, wait_lock will be held. A writer must also 337 raw_spin_lock_init(&sem->wait_lock); in __init_rwsem() 393 * - the wait_lock must be held by the caller 396 * preferably when the wait_lock is released 408 lockdep_assert_held(&sem->wait_lock); in rwsem_mark_wake() 555 * This function must be called with the sem->wait_lock held to prevent 567 lockdep_assert_held(&sem->wait_lock); in rwsem_try_write_lock() 796 /* sem->wait_lock should not be held when doing optimistic spinning */ in rwsem_optimistic_spin() 1022 raw_spin_lock_irq(&sem->wait_lock); in rwsem_down_read_slowpath() 1026 raw_spin_unlock_irq(&sem->wait_lock); in rwsem_down_read_slowpath() [all...] |
H A D | qrwlock.c | 41 arch_spin_lock(&lock->wait_lock); in queued_read_lock_slowpath() 54 arch_spin_unlock(&lock->wait_lock); in queued_read_lock_slowpath() 67 arch_spin_lock(&lock->wait_lock); in queued_write_lock_slowpath() 82 arch_spin_unlock(&lock->wait_lock); in queued_write_lock_slowpath()
|
H A D | mutex.c | 43 spin_lock_init(&lock->wait_lock); in __mutex_init() 405 lockdep_assert_held(&lock->wait_lock); in __ww_mutex_wound() 418 * wait_lock. in __ww_mutex_wound() 458 lockdep_assert_held(&lock->wait_lock); in __ww_mutex_check_waiters() 471 * After acquiring lock with fastpath, where we do not hold wait_lock, set ctx 483 * and keep spinning, or it will acquire wait_lock, add itself in ww_mutex_set_context_fastpath() 504 spin_lock(&lock->base.wait_lock); in ww_mutex_set_context_fastpath() 506 spin_unlock(&lock->base.wait_lock); in ww_mutex_set_context_fastpath() 521 * by acquiring wait_lock there is a guarantee that in ww_mutex_spin_on_owner() 980 spin_lock(&lock->wait_lock); in __mutex_lock_common() [all...] |
/kernel/linux/linux-5.10/drivers/gpu/drm/omapdrm/ |
H A D | omap_irq.c | 18 /* call with wait_lock and dispc runtime held */ 25 assert_spin_locked(&priv->wait_lock); in omap_irq_update() 52 spin_lock_irqsave(&priv->wait_lock, flags); in omap_irq_wait_init() 55 spin_unlock_irqrestore(&priv->wait_lock, flags); in omap_irq_wait_init() 69 spin_lock_irqsave(&priv->wait_lock, flags); in omap_irq_wait() 72 spin_unlock_irqrestore(&priv->wait_lock, flags); in omap_irq_wait() 90 spin_lock_irqsave(&priv->wait_lock, flags); in omap_irq_enable_framedone() 96 spin_unlock_irqrestore(&priv->wait_lock, flags); in omap_irq_enable_framedone() 123 spin_lock_irqsave(&priv->wait_lock, flags); in omap_irq_enable_vblank() 127 spin_unlock_irqrestore(&priv->wait_lock, flag in omap_irq_enable_vblank() [all...] |
/kernel/linux/linux-6.6/drivers/gpu/drm/omapdrm/ |
H A D | omap_irq.c | 18 /* call with wait_lock and dispc runtime held */ 25 assert_spin_locked(&priv->wait_lock); in omap_irq_update() 52 spin_lock_irqsave(&priv->wait_lock, flags); in omap_irq_wait_init() 55 spin_unlock_irqrestore(&priv->wait_lock, flags); in omap_irq_wait_init() 69 spin_lock_irqsave(&priv->wait_lock, flags); in omap_irq_wait() 72 spin_unlock_irqrestore(&priv->wait_lock, flags); in omap_irq_wait() 90 spin_lock_irqsave(&priv->wait_lock, flags); in omap_irq_enable_framedone() 96 spin_unlock_irqrestore(&priv->wait_lock, flags); in omap_irq_enable_framedone() 122 spin_lock_irqsave(&priv->wait_lock, flags); in omap_irq_enable_vblank() 126 spin_unlock_irqrestore(&priv->wait_lock, flag in omap_irq_enable_vblank() [all...] |
/kernel/linux/linux-6.6/drivers/gpu/drm/tidss/ |
H A D | tidss_irq.c | 18 /* call with wait_lock and dispc runtime held */ 21 assert_spin_locked(&tidss->wait_lock); in tidss_irq_update() 34 spin_lock_irqsave(&tidss->wait_lock, flags); in tidss_irq_enable_vblank() 38 spin_unlock_irqrestore(&tidss->wait_lock, flags); in tidss_irq_enable_vblank() 49 spin_lock_irqsave(&tidss->wait_lock, flags); in tidss_irq_disable_vblank() 53 spin_unlock_irqrestore(&tidss->wait_lock, flags); in tidss_irq_disable_vblank() 91 spin_lock_irqsave(&tidss->wait_lock, flags); in tidss_irq_resume() 93 spin_unlock_irqrestore(&tidss->wait_lock, flags); in tidss_irq_resume() 100 spin_lock_init(&tidss->wait_lock); in tidss_irq_preinstall() 118 spin_lock_irqsave(&tidss->wait_lock, flag in tidss_irq_postinstall() [all...] |
/kernel/linux/linux-5.10/drivers/tty/ |
H A D | tty_ldsem.c | 69 raw_spin_lock_init(&sem->wait_lock); in __init_ldsem() 147 raw_spin_lock_irqsave(&sem->wait_lock, flags); in ldsem_wake() 149 raw_spin_unlock_irqrestore(&sem->wait_lock, flags); in ldsem_wake() 162 raw_spin_lock_irq(&sem->wait_lock); in down_read_failed() 175 raw_spin_unlock_irq(&sem->wait_lock); in down_read_failed() 190 raw_spin_unlock_irq(&sem->wait_lock); in down_read_failed() 211 raw_spin_lock_irq(&sem->wait_lock); in down_read_failed() 216 raw_spin_unlock_irq(&sem->wait_lock); in down_read_failed() 220 raw_spin_unlock_irq(&sem->wait_lock); in down_read_failed() 237 raw_spin_lock_irq(&sem->wait_lock); in down_write_failed() [all...] |
/kernel/linux/linux-6.6/drivers/tty/ |
H A D | tty_ldsem.c | 69 raw_spin_lock_init(&sem->wait_lock); in __init_ldsem() 147 raw_spin_lock_irqsave(&sem->wait_lock, flags); in ldsem_wake() 149 raw_spin_unlock_irqrestore(&sem->wait_lock, flags); in ldsem_wake() 162 raw_spin_lock_irq(&sem->wait_lock); in down_read_failed() 175 raw_spin_unlock_irq(&sem->wait_lock); in down_read_failed() 190 raw_spin_unlock_irq(&sem->wait_lock); in down_read_failed() 211 raw_spin_lock_irq(&sem->wait_lock); in down_read_failed() 216 raw_spin_unlock_irq(&sem->wait_lock); in down_read_failed() 220 raw_spin_unlock_irq(&sem->wait_lock); in down_read_failed() 237 raw_spin_lock_irq(&sem->wait_lock); in down_write_failed() [all...] |
/kernel/linux/linux-6.6/kernel/futex/ |
H A D | pi.c | 50 lockdep_assert_held(&pi_state->pi_mutex.wait_lock); in pi_state_update_owner() 92 raw_spin_lock_irqsave(&pi_state->pi_mutex.wait_lock, flags); in put_pi_state() 95 raw_spin_unlock_irqrestore(&pi_state->pi_mutex.wait_lock, flags); in put_pi_state() 173 * pi_mutex->wait_lock: 192 * pi_mutex->wait_lock 231 * Now that we have a pi_state, we can acquire wait_lock in attach_to_pi_state() 234 raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); in attach_to_pi_state() 237 * Since {uval, pi_state} is serialized by wait_lock, and our current in attach_to_pi_state() 299 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); in attach_to_pi_state() 316 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); in attach_to_pi_state() [all...] |
/kernel/linux/linux-5.10/include/linux/ |
H A D | rtmutex.h | 25 * @wait_lock: spinlock to protect the structure 31 raw_spinlock_t wait_lock; member 86 { .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
|
H A D | rwsem.h | 46 raw_spinlock_t wait_lock; member 93 .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock),\
|
H A D | mutex.h | 55 spinlock_t wait_lock; member 134 , .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \
|
/kernel/linux/linux-6.6/include/linux/ |
H A D | rtmutex.h | 24 raw_spinlock_t wait_lock; member 31 .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(rtbasename.wait_lock), \ 52 * @wait_lock: spinlock to protect the structure
|
H A D | mutex.h | 66 raw_spinlock_t wait_lock; member 111 , .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(lockname.wait_lock) \
|
/kernel/linux/linux-5.10/include/asm-generic/ |
H A D | qrwlock_types.h | 26 arch_spinlock_t wait_lock; member 31 .wait_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
|
/kernel/linux/linux-6.6/include/asm-generic/ |
H A D | qrwlock_types.h | 26 arch_spinlock_t wait_lock; member 31 .wait_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
|