Home
last modified time | relevance | path

Searched refs:waiter (Results 1 - 25 of 71) sorted by relevance

123

/kernel/linux/linux-6.6/kernel/locking/
H A Dmutex-debug.c28 void debug_mutex_lock_common(struct mutex *lock, struct mutex_waiter *waiter) in debug_mutex_lock_common() argument
30 memset(waiter, MUTEX_DEBUG_INIT, sizeof(*waiter)); in debug_mutex_lock_common()
31 waiter->magic = waiter; in debug_mutex_lock_common()
32 INIT_LIST_HEAD(&waiter->list); in debug_mutex_lock_common()
33 waiter->ww_ctx = MUTEX_POISON_WW_CTX; in debug_mutex_lock_common()
36 void debug_mutex_wake_waiter(struct mutex *lock, struct mutex_waiter *waiter) in debug_mutex_wake_waiter() argument
40 DEBUG_LOCKS_WARN_ON(waiter->magic != waiter); in debug_mutex_wake_waiter()
44 debug_mutex_free_waiter(struct mutex_waiter *waiter) debug_mutex_free_waiter() argument
50 debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter, struct task_struct *task) debug_mutex_add_waiter() argument
59 debug_mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter, struct task_struct *task) debug_mutex_remove_waiter() argument
[all...]
H A Drtmutex.c35 static inline int __ww_mutex_add_waiter(struct rt_mutex_waiter *waiter, in __ww_mutex_add_waiter() argument
53 struct rt_mutex_waiter *waiter, in __ww_mutex_check_kill()
73 * NULL 1 lock is free and has waiters and the top waiter
183 * With the check for the waiter bit in place T3 on CPU2 will not in fixup_rt_mutex_waiters()
265 * If a new waiter comes in between the unlock and the cmpxchg
283 * wake waiter();
337 * Update the waiter->tree copy of the sort keys.
340 waiter_update_prio(struct rt_mutex_waiter *waiter, struct task_struct *task) in waiter_update_prio() argument
342 lockdep_assert_held(&waiter->lock->wait_lock); in waiter_update_prio()
343 lockdep_assert(RB_EMPTY_NODE(&waiter in waiter_update_prio()
52 __ww_mutex_check_kill(struct rt_mutex *lock, struct rt_mutex_waiter *waiter, struct ww_acquire_ctx *ww_ctx) __ww_mutex_check_kill() argument
353 waiter_clone_prio(struct rt_mutex_waiter *waiter, struct task_struct *task) waiter_clone_prio() argument
407 rt_mutex_steal(struct rt_mutex_waiter *waiter, struct rt_mutex_waiter *top_waiter) rt_mutex_steal() argument
457 rt_mutex_enqueue(struct rt_mutex_base *lock, struct rt_mutex_waiter *waiter) rt_mutex_enqueue() argument
465 rt_mutex_dequeue(struct rt_mutex_base *lock, struct rt_mutex_waiter *waiter) rt_mutex_dequeue() argument
485 rt_mutex_enqueue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter) rt_mutex_enqueue_pi() argument
493 rt_mutex_dequeue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter) rt_mutex_dequeue_pi() argument
569 rt_mutex_cond_detect_deadlock(struct rt_mutex_waiter *waiter, enum rtmutex_chainwalk chwalk) rt_mutex_cond_detect_deadlock() argument
662 struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter; rt_mutex_adjust_prio_chain() local
1064 try_to_take_rt_mutex(struct rt_mutex_base *lock, struct task_struct *task, struct rt_mutex_waiter *waiter) try_to_take_rt_mutex() argument
1180 task_blocks_on_rt_mutex(struct rt_mutex_base *lock, struct rt_mutex_waiter *waiter, struct task_struct *task, struct ww_acquire_ctx *ww_ctx, enum rtmutex_chainwalk chwalk) task_blocks_on_rt_mutex() argument
1291 struct rt_mutex_waiter *waiter; mark_wakeup_next_waiter() local
1457 rtmutex_spin_on_owner(struct rt_mutex_base *lock, struct rt_mutex_waiter *waiter, struct task_struct *owner) rtmutex_spin_on_owner() argument
1494 rtmutex_spin_on_owner(struct rt_mutex_base *lock, struct rt_mutex_waiter *waiter, struct task_struct *owner) rtmutex_spin_on_owner() argument
1515 remove_waiter(struct rt_mutex_base *lock, struct rt_mutex_waiter *waiter) remove_waiter() argument
1579 rt_mutex_slowlock_block(struct rt_mutex_base *lock, struct ww_acquire_ctx *ww_ctx, unsigned int state, struct hrtimer_sleeper *timeout, struct rt_mutex_waiter *waiter) rt_mutex_slowlock_block() argument
1657 __rt_mutex_slowlock(struct rt_mutex_base *lock, struct ww_acquire_ctx *ww_ctx, unsigned int state, enum rtmutex_chainwalk chwalk, struct rt_mutex_waiter *waiter) __rt_mutex_slowlock() argument
1714 struct rt_mutex_waiter waiter; __rt_mutex_slowlock_locked() local
1776 struct rt_mutex_waiter waiter; rtlock_slowlock_locked() local
[all...]
H A Drtmutex_common.h22 * This is a helper for the struct rt_mutex_waiter below. A waiter goes in two
27 * @prio: Priority of the waiter
28 * @deadline: Deadline of the waiter if applicable
45 * @lock: Pointer to the rt_mutex on which the waiter blocks
85 struct rt_mutex_waiter *waiter,
88 struct rt_mutex_waiter *waiter,
92 struct rt_mutex_waiter *waiter);
94 struct rt_mutex_waiter *waiter);
116 * Lockless speculative check whether @waiter is still the top waiter o
120 rt_mutex_waiter_is_top_waiter(struct rt_mutex_base *lock, struct rt_mutex_waiter *waiter) rt_mutex_waiter_is_top_waiter() argument
199 debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter) debug_rt_mutex_init_waiter() argument
205 debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter) debug_rt_mutex_free_waiter() argument
211 rt_mutex_init_waiter(struct rt_mutex_waiter *waiter) rt_mutex_init_waiter() argument
220 rt_mutex_init_rtlock_waiter(struct rt_mutex_waiter *waiter) rt_mutex_init_rtlock_waiter() argument
[all...]
H A Dmutex.h25 struct mutex_waiter *waiter);
27 struct mutex_waiter *waiter);
28 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
30 struct mutex_waiter *waiter,
32 extern void debug_mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
38 # define debug_mutex_lock_common(lock, waiter) do { } while (0)
39 # define debug_mutex_wake_waiter(lock, waiter) do { } while (0)
40 # define debug_mutex_free_waiter(waiter) do { } while (0)
41 # define debug_mutex_add_waiter(lock, waiter, ti) do { } while (0)
42 # define debug_mutex_remove_waiter(lock, waiter, t
[all...]
H A Dmutex.c64 * Bit0 indicates a non-empty waiter list; unlock must issue a wakeup.
65 * Bit1 indicates unlock needs to hand the lock to the top-waiter
195 static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_waiter *waiter) in __mutex_waiter_is_first() argument
197 return list_first_entry(&lock->wait_list, struct mutex_waiter, list) == waiter; in __mutex_waiter_is_first()
201 * Add @waiter to a given location in the lock wait_list and set the
202 * FLAG_WAITERS flag if it's the first waiter.
205 __mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter, in __mutex_add_waiter() argument
208 debug_mutex_add_waiter(lock, waiter, current); in __mutex_add_waiter()
210 list_add_tail(&waiter->list, list); in __mutex_add_waiter()
211 if (__mutex_waiter_is_first(lock, waiter)) in __mutex_add_waiter()
216 __mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter) __mutex_remove_waiter() argument
304 ww_mutex_spin_on_owner(struct mutex *lock, struct ww_acquire_ctx *ww_ctx, struct mutex_waiter *waiter) ww_mutex_spin_on_owner() argument
352 mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner, struct ww_acquire_ctx *ww_ctx, struct mutex_waiter *waiter) mutex_spin_on_owner() argument
441 mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx, struct mutex_waiter *waiter) mutex_optimistic_spin() argument
517 mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx, struct mutex_waiter *waiter) mutex_optimistic_spin() argument
573 struct mutex_waiter waiter; __mutex_lock_common() local
936 struct mutex_waiter *waiter = __mutex_unlock_slowpath() local
[all...]
H A Drwsem.c370 rwsem_add_waiter(struct rw_semaphore *sem, struct rwsem_waiter *waiter) in rwsem_add_waiter() argument
373 list_add_tail(&waiter->list, &sem->wait_list); in rwsem_add_waiter()
378 * Remove a waiter from the wait_list and clear flags.
386 rwsem_del_waiter(struct rw_semaphore *sem, struct rwsem_waiter *waiter) in rwsem_del_waiter() argument
389 list_del(&waiter->list); in rwsem_del_waiter()
415 struct rwsem_waiter *waiter, *tmp; in rwsem_mark_wake() local
422 * Take a peek at the queue head waiter such that we can determine in rwsem_mark_wake()
425 waiter = rwsem_first_waiter(sem); in rwsem_mark_wake()
427 if (waiter->type == RWSEM_WAITING_FOR_WRITE) { in rwsem_mark_wake()
436 wake_q_add(wake_q, waiter in rwsem_mark_wake()
604 rwsem_try_write_lock(struct rw_semaphore *sem, struct rwsem_waiter *waiter) rwsem_try_write_lock() argument
1000 struct rwsem_waiter waiter; rwsem_down_read_slowpath() local
1109 struct rwsem_waiter waiter; rwsem_down_write_slowpath() local
[all...]
H A Dsemaphore.c212 struct semaphore_waiter waiter; in ___down_common() local
214 list_add_tail(&waiter.list, &sem->wait_list); in ___down_common()
215 waiter.task = current; in ___down_common()
216 waiter.up = false; in ___down_common()
227 if (waiter.up) in ___down_common()
232 list_del(&waiter.list); in ___down_common()
236 list_del(&waiter.list); in ___down_common()
274 struct semaphore_waiter *waiter = list_first_entry(&sem->wait_list, in __up() local
276 list_del(&waiter->list); in __up()
277 waiter in __up()
[all...]
H A Drtmutex_api.c163 * @wqh: The wake queue head from which to get the next lock waiter
276 * @waiter: the pre-initialized rt_mutex_waiter
279 * Starts the rt_mutex acquire; it enqueues the @waiter and does deadlock
282 * NOTE: does _NOT_ remove the @waiter on failure; must either call
293 struct rt_mutex_waiter *waiter, in __rt_mutex_start_proxy_lock()
304 ret = task_blocks_on_rt_mutex(lock, waiter, task, NULL, in __rt_mutex_start_proxy_lock()
312 * pi chain. Let the waiter sort it out. in __rt_mutex_start_proxy_lock()
323 * @waiter: the pre-initialized rt_mutex_waiter
326 * Starts the rt_mutex acquire; it enqueues the @waiter and does deadlock
329 * NOTE: unlike __rt_mutex_start_proxy_lock this _DOES_ remove the @waiter
292 __rt_mutex_start_proxy_lock(struct rt_mutex_base *lock, struct rt_mutex_waiter *waiter, struct task_struct *task) __rt_mutex_start_proxy_lock() argument
339 rt_mutex_start_proxy_lock(struct rt_mutex_base *lock, struct rt_mutex_waiter *waiter, struct task_struct *task) rt_mutex_start_proxy_lock() argument
371 rt_mutex_wait_proxy_lock(struct rt_mutex_base *lock, struct hrtimer_sleeper *to, struct rt_mutex_waiter *waiter) rt_mutex_wait_proxy_lock() argument
411 rt_mutex_cleanup_proxy_lock(struct rt_mutex_base *lock, struct rt_mutex_waiter *waiter) rt_mutex_cleanup_proxy_lock() argument
455 struct rt_mutex_waiter *waiter; rt_mutex_adjust_pi() local
[all...]
H A Dww_mutex.h53 __ww_waiter_add(struct mutex *lock, struct mutex_waiter *waiter, struct mutex_waiter *pos) in __ww_waiter_add() argument
58 __mutex_add_waiter(lock, waiter, p); in __ww_waiter_add()
130 __ww_waiter_add(struct rt_mutex *lock, struct rt_mutex_waiter *waiter, struct rt_mutex_waiter *pos) in __ww_waiter_add() argument
132 /* RT unconditionally adds the waiter first and then removes it on error */ in __ww_waiter_add()
232 * so the wait_list ordering will go wobbly. rt_mutex re-queues the waiter and in __ww_ctx_less()
269 * Wait-Die; wake a lesser waiter context (when locks held) such that it can
277 __ww_mutex_die(struct MUTEX *lock, struct MUTEX_WAITER *waiter, in __ww_mutex_die() argument
283 if (waiter->ww_ctx->acquired > 0 && __ww_ctx_less(waiter->ww_ctx, ww_ctx)) { in __ww_mutex_die()
285 debug_mutex_wake_waiter(lock, waiter); in __ww_mutex_die()
441 __ww_mutex_check_kill(struct MUTEX *lock, struct MUTEX_WAITER *waiter, struct ww_acquire_ctx *ctx) __ww_mutex_check_kill() argument
489 __ww_mutex_add_waiter(struct MUTEX_WAITER *waiter, struct MUTEX *lock, struct ww_acquire_ctx *ww_ctx) __ww_mutex_add_waiter() argument
[all...]
/kernel/linux/linux-5.10/drivers/gpu/host1x/
H A Dintr.c33 * add a waiter to a waiter queue, sorted by threshold
36 static bool add_waiter_to_queue(struct host1x_waitlist *waiter, in add_waiter_to_queue() argument
40 u32 thresh = waiter->thresh; in add_waiter_to_queue()
44 list_add(&waiter->list, &pos->list); in add_waiter_to_queue()
48 list_add(&waiter->list, queue); in add_waiter_to_queue()
53 * run through a waiter queue for a single sync point ID
60 struct host1x_waitlist *waiter, *next, *prev; in remove_completed_waiters() local
62 list_for_each_entry_safe(waiter, next, head, list) { in remove_completed_waiters()
63 if ((s32)(waiter in remove_completed_waiters()
99 action_submit_complete(struct host1x_waitlist *waiter) action_submit_complete() argument
110 action_wakeup(struct host1x_waitlist *waiter) action_wakeup() argument
117 action_wakeup_interruptible(struct host1x_waitlist *waiter) action_wakeup_interruptible() argument
139 struct host1x_waitlist *waiter, *next; run_handlers() local
202 host1x_intr_add_action(struct host1x *host, struct host1x_syncpt *syncpt, u32 thresh, enum host1x_intr_action action, void *data, struct host1x_waitlist *waiter, void **ref) host1x_intr_add_action() argument
247 struct host1x_waitlist *waiter = ref; host1x_intr_put_ref() local
315 struct host1x_waitlist *waiter, *next; host1x_intr_stop() local
[all...]
/kernel/linux/linux-5.10/kernel/locking/
H A Dmutex-debug.c30 void debug_mutex_lock_common(struct mutex *lock, struct mutex_waiter *waiter) in debug_mutex_lock_common() argument
32 memset(waiter, MUTEX_DEBUG_INIT, sizeof(*waiter)); in debug_mutex_lock_common()
33 waiter->magic = waiter; in debug_mutex_lock_common()
34 INIT_LIST_HEAD(&waiter->list); in debug_mutex_lock_common()
37 void debug_mutex_wake_waiter(struct mutex *lock, struct mutex_waiter *waiter) in debug_mutex_wake_waiter() argument
41 DEBUG_LOCKS_WARN_ON(waiter->magic != waiter); in debug_mutex_wake_waiter()
42 DEBUG_LOCKS_WARN_ON(list_empty(&waiter in debug_mutex_wake_waiter()
45 debug_mutex_free_waiter(struct mutex_waiter *waiter) debug_mutex_free_waiter() argument
51 debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter, struct task_struct *task) debug_mutex_add_waiter() argument
60 debug_mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter, struct task_struct *task) debug_mutex_remove_waiter() argument
[all...]
H A Drtmutex.c33 * NULL 1 lock is free and has waiters and the top waiter
126 * With the check for the waiter bit in place T3 on CPU2 will not in fixup_rt_mutex_waiters()
177 * If a new waiter comes in between the unlock and the cmpxchg
195 * wake waiter();
242 * If left waiter has a dl_prio(), and we didn't return 1 above, in rt_mutex_waiter_less()
243 * then right waiter has a dl_prio() too. in rt_mutex_waiter_less()
261 * If left waiter has a dl_prio(), and we didn't return 0 above, in rt_mutex_waiter_equal()
262 * then right waiter has a dl_prio() too. in rt_mutex_waiter_equal()
271 rt_mutex_enqueue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter) in rt_mutex_enqueue() argument
281 if (rt_mutex_waiter_less(waiter, entr in rt_mutex_enqueue()
294 rt_mutex_dequeue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter) rt_mutex_dequeue() argument
304 rt_mutex_enqueue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter) rt_mutex_enqueue_pi() argument
327 rt_mutex_dequeue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter) rt_mutex_dequeue_pi() argument
361 rt_mutex_cond_detect_deadlock(struct rt_mutex_waiter *waiter, enum rtmutex_chainwalk chwalk) rt_mutex_cond_detect_deadlock() argument
454 struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter; rt_mutex_adjust_prio_chain() local
805 try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, struct rt_mutex_waiter *waiter) try_to_take_rt_mutex() argument
926 task_blocks_on_rt_mutex(struct rt_mutex *lock, struct rt_mutex_waiter *waiter, struct task_struct *task, enum rtmutex_chainwalk chwalk) task_blocks_on_rt_mutex() argument
1018 struct rt_mutex_waiter *waiter; mark_wakeup_next_waiter() local
1065 remove_waiter(struct rt_mutex *lock, struct rt_mutex_waiter *waiter) remove_waiter() argument
1125 struct rt_mutex_waiter *waiter; rt_mutex_adjust_pi() local
1146 rt_mutex_init_waiter(struct rt_mutex_waiter *waiter) rt_mutex_init_waiter() argument
1165 __rt_mutex_slowlock(struct rt_mutex *lock, int state, struct hrtimer_sleeper *timeout, struct rt_mutex_waiter *waiter) __rt_mutex_slowlock() argument
1232 struct rt_mutex_waiter waiter; rt_mutex_slowlock() local
1744 __rt_mutex_start_proxy_lock(struct rt_mutex *lock, struct rt_mutex_waiter *waiter, struct task_struct *task) __rt_mutex_start_proxy_lock() argument
1793 rt_mutex_start_proxy_lock(struct rt_mutex *lock, struct rt_mutex_waiter *waiter, struct task_struct *task) rt_mutex_start_proxy_lock() argument
1845 rt_mutex_wait_proxy_lock(struct rt_mutex *lock, struct hrtimer_sleeper *to, struct rt_mutex_waiter *waiter) rt_mutex_wait_proxy_lock() argument
1885 rt_mutex_cleanup_proxy_lock(struct rt_mutex *lock, struct rt_mutex_waiter *waiter) rt_mutex_cleanup_proxy_lock() argument
[all...]
H A Drtmutex-debug.c67 * We fill out the fields in the waiter to store the information about
69 * case of a remove waiter operation.
87 void debug_rt_mutex_print_deadlock(struct rt_mutex_waiter *waiter) in debug_rt_mutex_print_deadlock() argument
91 if (!waiter->deadlock_lock || !debug_locks) in debug_rt_mutex_print_deadlock()
95 task = pid_task(waiter->deadlock_task_pid, PIDTYPE_PID); in debug_rt_mutex_print_deadlock()
117 printk_lock(waiter->lock, 1); in debug_rt_mutex_print_deadlock()
121 printk_lock(waiter->deadlock_lock, 1); in debug_rt_mutex_print_deadlock()
158 void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter) in debug_rt_mutex_init_waiter() argument
160 memset(waiter, 0x11, sizeof(*waiter)); in debug_rt_mutex_init_waiter()
164 debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter) debug_rt_mutex_free_waiter() argument
[all...]
H A Dmutex.c58 * Bit0 indicates a non-empty waiter list; unlock must issue a wakeup.
59 * Bit1 indicates unlock needs to hand the lock to the top-waiter
198 static inline bool __mutex_waiter_is_first(struct mutex *lock, struct mutex_waiter *waiter) in __mutex_waiter_is_first() argument
200 return list_first_entry(&lock->wait_list, struct mutex_waiter, list) == waiter; in __mutex_waiter_is_first()
204 * Add @waiter to a given location in the lock wait_list and set the
205 * FLAG_WAITERS flag if it's the first waiter.
208 __mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter, in __mutex_add_waiter() argument
211 debug_mutex_add_waiter(lock, waiter, current); in __mutex_add_waiter()
213 list_add_tail(&waiter->list, list); in __mutex_add_waiter()
214 if (__mutex_waiter_is_first(lock, waiter)) in __mutex_add_waiter()
219 __mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter) __mutex_remove_waiter() argument
377 __ww_mutex_die(struct mutex *lock, struct mutex_waiter *waiter, struct ww_acquire_ctx *ww_ctx) __ww_mutex_die() argument
512 ww_mutex_spin_on_owner(struct mutex *lock, struct ww_acquire_ctx *ww_ctx, struct mutex_waiter *waiter) ww_mutex_spin_on_owner() argument
560 mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner, struct ww_acquire_ctx *ww_ctx, struct mutex_waiter *waiter) mutex_spin_on_owner() argument
648 mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx, struct mutex_waiter *waiter) mutex_optimistic_spin() argument
724 mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx, struct mutex_waiter *waiter) mutex_optimistic_spin() argument
815 __ww_mutex_check_kill(struct mutex *lock, struct mutex_waiter *waiter, struct ww_acquire_ctx *ctx) __ww_mutex_check_kill() argument
862 __ww_mutex_add_waiter(struct mutex_waiter *waiter, struct mutex *lock, struct ww_acquire_ctx *ww_ctx) __ww_mutex_add_waiter() argument
940 struct mutex_waiter waiter; __mutex_lock_common() local
1271 struct mutex_waiter *waiter = __mutex_unlock_slowpath() local
[all...]
H A Drtmutex-debug.h13 extern void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter);
14 extern void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter);
22 struct rt_mutex_waiter *waiter,
24 extern void debug_rt_mutex_print_deadlock(struct rt_mutex_waiter *waiter);
28 static inline bool debug_rt_mutex_detect_deadlock(struct rt_mutex_waiter *waiter, in debug_rt_mutex_detect_deadlock() argument
31 return (waiter != NULL); in debug_rt_mutex_detect_deadlock()
H A Dsemaphore.c207 struct semaphore_waiter waiter; in __down_common() local
209 list_add_tail(&waiter.list, &sem->wait_list); in __down_common()
210 waiter.task = current; in __down_common()
211 waiter.up = false; in __down_common()
222 if (waiter.up) in __down_common()
227 list_del(&waiter.list); in __down_common()
231 list_del(&waiter.list); in __down_common()
257 struct semaphore_waiter *waiter = list_first_entry(&sem->wait_list, in __up() local
259 list_del(&waiter->list); in __up()
260 waiter in __up()
[all...]
H A Dmutex.h13 #define debug_mutex_wake_waiter(lock, waiter) do { } while (0)
14 #define debug_mutex_free_waiter(waiter) do { } while (0)
15 #define debug_mutex_add_waiter(lock, waiter, ti) do { } while (0)
16 #define debug_mutex_remove_waiter(lock, waiter, ti) do { } while (0)
21 debug_mutex_lock_common(struct mutex *lock, struct mutex_waiter *waiter) in debug_mutex_lock_common() argument
H A Drwsem.c404 struct rwsem_waiter *waiter, *tmp; in rwsem_mark_wake() local
411 * Take a peek at the queue head waiter such that we can determine in rwsem_mark_wake()
414 waiter = rwsem_first_waiter(sem); in rwsem_mark_wake()
416 if (waiter->type == RWSEM_WAITING_FOR_WRITE) { in rwsem_mark_wake()
425 wake_q_add(wake_q, waiter->task); in rwsem_mark_wake()
455 time_after(jiffies, waiter->timeout)) { in rwsem_mark_wake()
469 owner = waiter->task; in rwsem_mark_wake()
470 if (waiter->last_rowner & RWSEM_RD_NONSPINNABLE) { in rwsem_mark_wake()
484 * reader phase (first waiter is a reader), all readers are eligible in rwsem_mark_wake()
491 * is because the to-be-woken waiter ma in rwsem_mark_wake()
995 struct rwsem_waiter waiter; rwsem_down_read_slowpath() local
1141 struct rwsem_waiter waiter; rwsem_down_write_slowpath() local
[all...]
/kernel/linux/linux-5.10/drivers/tty/
H A Dtty_ldsem.c8 * 2) Write waiter has priority
76 struct ldsem_waiter *waiter, *next; in __ldsem_wake_readers() local
94 list_for_each_entry_safe(waiter, next, &sem->read_wait, list) { in __ldsem_wake_readers()
95 tsk = waiter->task; in __ldsem_wake_readers()
96 smp_store_release(&waiter->task, NULL); in __ldsem_wake_readers()
121 struct ldsem_waiter *waiter; in __ldsem_wake_writer() local
123 waiter = list_entry(sem->write_wait.next, struct ldsem_waiter, list); in __ldsem_wake_writer()
124 wake_up_process(waiter->task); in __ldsem_wake_writer()
158 struct ldsem_waiter waiter; in down_read_failed() local
180 list_add_tail(&waiter in down_read_failed()
232 struct ldsem_waiter waiter; down_write_failed() local
[all...]
/kernel/linux/linux-6.6/drivers/tty/
H A Dtty_ldsem.c8 * 2) Write waiter has priority
76 struct ldsem_waiter *waiter, *next; in __ldsem_wake_readers() local
94 list_for_each_entry_safe(waiter, next, &sem->read_wait, list) { in __ldsem_wake_readers()
95 tsk = waiter->task; in __ldsem_wake_readers()
96 smp_store_release(&waiter->task, NULL); in __ldsem_wake_readers()
121 struct ldsem_waiter *waiter; in __ldsem_wake_writer() local
123 waiter = list_entry(sem->write_wait.next, struct ldsem_waiter, list); in __ldsem_wake_writer()
124 wake_up_process(waiter->task); in __ldsem_wake_writer()
158 struct ldsem_waiter waiter; in down_read_failed() local
180 list_add_tail(&waiter in down_read_failed()
232 struct ldsem_waiter waiter; down_write_failed() local
[all...]
/kernel/linux/linux-5.10/lib/
H A Dklist.c185 struct klist_waiter *waiter, *tmp; in klist_release() local
191 list_for_each_entry_safe(waiter, tmp, &klist_remove_waiters, list) { in klist_release()
192 if (waiter->node != n) in klist_release()
195 list_del(&waiter->list); in klist_release()
196 waiter->woken = 1; in klist_release()
198 wake_up_process(waiter->process); in klist_release()
240 struct klist_waiter waiter; in klist_remove() local
242 waiter.node = n; in klist_remove()
243 waiter.process = current; in klist_remove()
244 waiter in klist_remove()
[all...]
/kernel/linux/linux-6.6/lib/
H A Dklist.c185 struct klist_waiter *waiter, *tmp; in klist_release() local
191 list_for_each_entry_safe(waiter, tmp, &klist_remove_waiters, list) { in klist_release()
192 if (waiter->node != n) in klist_release()
195 list_del(&waiter->list); in klist_release()
196 waiter->woken = 1; in klist_release()
198 wake_up_process(waiter->process); in klist_release()
240 struct klist_waiter waiter; in klist_remove() local
242 waiter.node = n; in klist_remove()
243 waiter.process = current; in klist_remove()
244 waiter in klist_remove()
[all...]
/kernel/linux/linux-6.6/tools/testing/selftests/filesystems/epoll/
H A Depoll_wakeup_test.c23 pthread_t waiter; member
47 pthread_kill(ctx->waiter, SIGUSR1); in kill_timeout()
489 ASSERT_EQ(pthread_create(&ctx.waiter, NULL, waiter_entry1a, &ctx), 0); in TEST()
495 ASSERT_EQ(pthread_join(ctx.waiter, NULL), 0); in TEST()
532 ASSERT_EQ(pthread_create(&ctx.waiter, NULL, waiter_entry1a, &ctx), 0); in TEST()
538 ASSERT_EQ(pthread_join(ctx.waiter, NULL), 0); in TEST()
579 ASSERT_EQ(pthread_create(&ctx.waiter, NULL, waiter_entry2a, &ctx), 0); in TEST()
585 ASSERT_EQ(pthread_join(ctx.waiter, NULL), 0); in TEST()
628 ASSERT_EQ(pthread_create(&ctx.waiter, NULL, waiter_entry1a, &ctx), 0); in TEST()
634 ASSERT_EQ(pthread_join(ctx.waiter, NUL in TEST()
3461 pthread_t waiter[2]; TEST() local
[all...]
/kernel/linux/linux-5.10/tools/testing/selftests/filesystems/epoll/
H A Depoll_wakeup_test.c21 pthread_t waiter; member
32 pthread_kill(ctx->waiter, SIGUSR1); in kill_timeout()
474 ASSERT_EQ(pthread_create(&ctx.waiter, NULL, waiter_entry1a, &ctx), 0); in TEST()
480 ASSERT_EQ(pthread_join(ctx.waiter, NULL), 0); in TEST()
517 ASSERT_EQ(pthread_create(&ctx.waiter, NULL, waiter_entry1a, &ctx), 0); in TEST()
523 ASSERT_EQ(pthread_join(ctx.waiter, NULL), 0); in TEST()
564 ASSERT_EQ(pthread_create(&ctx.waiter, NULL, waiter_entry2a, &ctx), 0); in TEST()
570 ASSERT_EQ(pthread_join(ctx.waiter, NULL), 0); in TEST()
613 ASSERT_EQ(pthread_create(&ctx.waiter, NULL, waiter_entry1a, &ctx), 0); in TEST()
619 ASSERT_EQ(pthread_join(ctx.waiter, NUL in TEST()
[all...]
/kernel/linux/linux-5.10/drivers/staging/vc04_services/interface/vchiq_arm/
H A Dvchiq_arm.c224 struct bulk_waiter_node *waiter, *next; in vchiq_shutdown() local
226 list_for_each_entry_safe(waiter, next, in vchiq_shutdown()
228 list_del(&waiter->list); in vchiq_shutdown()
231 waiter, waiter->pid); in vchiq_shutdown()
232 kfree(waiter); in vchiq_shutdown()
435 struct bulk_waiter_node *waiter = NULL; in vchiq_blocking_bulk_transfer() local
447 list_for_each_entry(waiter, &instance->bulk_waiter_list, list) { in vchiq_blocking_bulk_transfer()
448 if (waiter->pid == current->pid) { in vchiq_blocking_bulk_transfer()
449 list_del(&waiter in vchiq_blocking_bulk_transfer()
955 struct bulk_waiter_node *waiter = NULL; vchiq_irq_queue_bulk_tx_rx() local
1985 struct bulk_waiter_node *waiter, *next; vchiq_release() local
[all...]

Completed in 20 milliseconds

123