Home
last modified time | relevance | path

Searched refs:lock_ptr (Results 1 - 9 of 9) sorted by relevance

/kernel/linux/linux-6.6/kernel/futex/
H A Dcore.c507 * The q->lock_ptr must not be NULL and must be held by the caller.
513 if (WARN_ON_SMP(!q->lock_ptr) || WARN_ON(plist_node_empty(&q->list))) in __futex_unqueue()
515 lockdep_assert_held(q->lock_ptr); in __futex_unqueue()
517 hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock); in __futex_unqueue()
540 q->lock_ptr = &hb->lock;
576 * The q->lock_ptr must not be held by the caller. A call to futex_unqueue() must
585 spinlock_t *lock_ptr; in futex_unqueue() local
591 * q->lock_ptr can change between this read and the following spin_lock. in futex_unqueue()
592 * Use READ_ONCE to forbid the compiler from reloading q->lock_ptr and in futex_unqueue()
593 * optimizing lock_ptr ou in futex_unqueue()
[all...]
H A Drequeue.c87 q->lock_ptr = &hb2->lock; in requeue_futex()
211 * 4) Set the q->lock_ptr to the requeue target hb->lock for the case that
220 * Must be called with both q->lock_ptr and hb->lock held.
233 q->lock_ptr = &hb->lock; in requeue_pi_wake_futex()
704 WARN_ON_ONCE(&hb->lock != q->lock_ptr); in handle_early_requeue_pi_wakeup()
832 spin_lock(q.lock_ptr); in futex_wait_requeue_pi()
839 spin_unlock(q.lock_ptr); in futex_wait_requeue_pi()
854 spin_lock(q.lock_ptr); in futex_wait_requeue_pi()
872 spin_unlock(q.lock_ptr); in futex_wait_requeue_pi()
H A Dpi.c804 spin_unlock(q->lock_ptr); in __fixup_pi_state_owner()
821 spin_lock(q->lock_ptr); in __fixup_pi_state_owner()
861 lockdep_assert_held(q->lock_ptr); in fixup_pi_state_owner()
1021 spin_unlock(q.lock_ptr); in futex_lock_pi()
1042 spin_lock(q.lock_ptr); in futex_lock_pi()
1069 spin_unlock(q.lock_ptr); in futex_lock_pi()
H A Dwaitwake.c125 * The waiting task can free the futex_q as soon as q->lock_ptr = NULL in futex_wake_mark()
128 * to prevent the following store to lock_ptr from getting ahead of the in futex_wake_mark()
131 smp_store_release(&q->lock_ptr, NULL); in futex_wake_mark()
503 if (!READ_ONCE(vs->q.lock_ptr)) in futex_sleep_multiple()
H A Dfutex.h76 * @lock_ptr: the hash bucket lock
89 * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0.
100 spinlock_t *lock_ptr; member
/kernel/linux/linux-5.10/kernel/futex/
H A Dcore.c194 * @lock_ptr: the hash bucket lock
205 * It is considered woken when plist_node_empty(&q->list) || q->lock_ptr == 0.
216 spinlock_t *lock_ptr; member
1456 * The q->lock_ptr must not be NULL and must be held by the caller.
1462 if (WARN_ON_SMP(!q->lock_ptr) || WARN_ON(plist_node_empty(&q->list))) in __unqueue_futex()
1464 lockdep_assert_held(q->lock_ptr); in __unqueue_futex()
1466 hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock); in __unqueue_futex()
1487 * The waiting task can free the futex_q as soon as q->lock_ptr = NULL in mark_wake_futex()
1490 * to prevent the following store to lock_ptr from getting ahead of the in mark_wake_futex()
1493 smp_store_release(&q->lock_ptr, NUL in mark_wake_futex()
2286 spinlock_t *lock_ptr; unqueue_me() local
[all...]
/kernel/liteos_m/utils/
H A Dlos_compiler.h482 BOOL *lock_ptr = __libatomic_flag_for_address (addr); in get_lock() local
485 while (__atomic_test_and_set (lock_ptr, __ATOMIC_ACQUIRE) == 1) { in get_lock()
492 BOOL *lock_ptr = __libatomic_flag_for_address (addr); in free_lock() local
494 __atomic_clear (lock_ptr, __ATOMIC_RELEASE); in free_lock()
/kernel/linux/linux-5.10/net/rds/
H A Dib_cm.c1286 spinlock_t *lock_ptr; in rds_ib_conn_free() local
1295 lock_ptr = ic->rds_ibdev ? &ic->rds_ibdev->spinlock : &ib_nodev_conns_lock; in rds_ib_conn_free()
1297 spin_lock_irq(lock_ptr); in rds_ib_conn_free()
1299 spin_unlock_irq(lock_ptr); in rds_ib_conn_free()
/kernel/linux/linux-6.6/net/rds/
H A Dib_cm.c1253 spinlock_t *lock_ptr; in rds_ib_conn_free() local
1262 lock_ptr = ic->rds_ibdev ? &ic->rds_ibdev->spinlock : &ib_nodev_conns_lock; in rds_ib_conn_free()
1264 spin_lock_irq(lock_ptr); in rds_ib_conn_free()
1266 spin_unlock_irq(lock_ptr); in rds_ib_conn_free()

Completed in 13 milliseconds