Home
last modified time | relevance | path

Searched refs:waiters (Results 26 - 50 of 54) sorted by relevance

123

/kernel/linux/linux-6.6/kernel/locking/
H A Dww_mutex.h96 struct rb_node *n = rb_first(&lock->rtmutex.waiters.rb_root); in __ww_waiter_first()
123 struct rb_node *n = rb_last(&lock->rtmutex.waiters.rb_root); in __ww_waiter_last()
272 * Among waiters with context, only the first one can have other locks acquired
296 * Wound the lock holder if there are waiters with more important transactions
297 * than the lock holders. Even if multiple waiters may wound the lock holder,
375 * and wake up any waiters so they can recheck.
384 * the WAITERS check is done, otherwise contended waiters might be in ww_mutex_set_context_fastpath()
385 * missed. The contended waiters will either see ww_ctx == NULL in ww_mutex_set_context_fastpath()
404 * Uh oh, we raced in fastpath, check if any of the waiters need to in ww_mutex_set_context_fastpath()
438 * look at waiters befor
[all...]
H A Drtmutex.c69 * is used to keep track of the "lock has waiters" state.
73 * NULL 1 lock is free and has waiters and the top waiter
76 * taskpointer 1 lock is held and has waiters**
87 * waiters. This can happen when grabbing the lock in the slow path.
134 * The rbtree has no waiters enqueued, now make sure that the in fixup_rt_mutex_waiters()
135 * lock->owner still has the waiters bit set, otherwise the in fixup_rt_mutex_waiters()
184 * overwrite. All tasks fiddling with the waiters bit are in fixup_rt_mutex_waiters()
185 * serialized by l->lock, so nothing else can modify the waiters in fixup_rt_mutex_waiters()
188 * happens in the middle of the RMW because the waiters bit is in fixup_rt_mutex_waiters()
252 * 1) Clear the waiters bi
[all...]
/kernel/linux/linux-5.10/kernel/locking/
H A Drtmutex.c29 * is used to keep track of the "lock has waiters" state.
33 * NULL 1 lock is free and has waiters and the top waiter
36 * taskpointer 1 lock is held and has waiters**
47 * waiters. This can happen when grabbing the lock in the slow path.
77 * The rbtree has no waiters enqueued, now make sure that the in fixup_rt_mutex_waiters()
78 * lock->owner still has the waiters bit set, otherwise the in fixup_rt_mutex_waiters()
127 * overwrite. All tasks fiddling with the waiters bit are in fixup_rt_mutex_waiters()
128 * serialized by l->lock, so nothing else can modify the waiters in fixup_rt_mutex_waiters()
131 * happens in the middle of the RMW because the waiters bit is in fixup_rt_mutex_waiters()
164 * 1) Clear the waiters bi
[all...]
/kernel/linux/linux-5.10/drivers/tee/optee/
H A Dcall.c43 list_add_tail(&w->list_node, &cq->waiters); in optee_cq_wait_init()
55 /* Move to end of list to get out of the way for other waiters */ in optee_cq_wait_for_completion()
58 list_add_tail(&w->list_node, &cq->waiters); in optee_cq_wait_for_completion()
67 list_for_each_entry(w, &cq->waiters, list_node) { in optee_cq_complete_one()
168 * thread waiters wake up one. in optee_do_call_with_arg()
H A Doptee_private.h35 struct list_head waiters; member
H A Dcore.c719 INIT_LIST_HEAD(&optee->call_queue.waiters); in optee_probe()
/kernel/linux/linux-5.10/include/linux/greybus/
H A Doperation.h108 atomic_t waiters; member
/kernel/linux/linux-6.6/include/linux/greybus/
H A Doperation.h108 atomic_t waiters; member
/kernel/linux/linux-5.10/tools/testing/selftests/filesystems/epoll/
H A Depoll_wakeup_test.c3147 int waiters; member
3177 __atomic_fetch_add(&ctx->waiters, 1, __ATOMIC_ACQUIRE); in epoll60_wait_thread()
3191 __atomic_fetch_sub(&ctx->waiters, 1, __ATOMIC_RELEASE); in epoll60_wait_thread()
3211 return __atomic_load_n(&ctx->waiters, __ATOMIC_ACQUIRE); in count_waiters()
3217 pthread_t waiters[ARRAY_SIZE(ctx.evfd)]; in TEST() local
3237 for (i = 0; i < ARRAY_SIZE(waiters); i++) in TEST()
3238 ASSERT_EQ(pthread_create(&waiters[i], NULL, in TEST()
3266 /* Busy loop for 1s and wait for all waiters to wake up */ in TEST()
3274 /* Stop waiters */ in TEST()
3275 for (i = 0; i < ARRAY_SIZE(waiters); in TEST()
[all...]
/kernel/linux/linux-6.6/tools/testing/selftests/filesystems/epoll/
H A Depoll_wakeup_test.c3162 int waiters; member
3192 __atomic_fetch_add(&ctx->waiters, 1, __ATOMIC_ACQUIRE); in epoll60_wait_thread()
3206 __atomic_fetch_sub(&ctx->waiters, 1, __ATOMIC_RELEASE); in epoll60_wait_thread()
3226 return __atomic_load_n(&ctx->waiters, __ATOMIC_ACQUIRE); in count_waiters()
3232 pthread_t waiters[ARRAY_SIZE(ctx.evfd)]; in TEST() local
3252 for (i = 0; i < ARRAY_SIZE(waiters); i++) in TEST()
3253 ASSERT_EQ(pthread_create(&waiters[i], NULL, in TEST()
3281 /* Busy loop for 1s and wait for all waiters to wake up */ in TEST()
3289 /* Stop waiters */ in TEST()
3290 for (i = 0; i < ARRAY_SIZE(waiters); in TEST()
[all...]
/kernel/linux/linux-5.10/include/linux/
H A Dpage-flags.h112 PG_waiters, /* Page has waiters, check its waitqueue. Must be bit #7 and in the same byte as "PG_locked" */
341 PAGEFLAG(Waiters, waiters, PF_ONLY_HEAD) __CLEARPAGEFLAG(Waiters, waiters, PF_ONLY_HEAD)
/kernel/linux/linux-5.10/fs/xfs/
H A Dxfs_log.c154 INIT_LIST_HEAD(&head->waiters); in xlog_grant_head_init()
165 list_for_each_entry(tic, &head->waiters, t_queue) in xlog_grant_head_wake_all()
197 list_for_each_entry(tic, &head->waiters, t_queue) { in xlog_grant_head_wake()
244 list_add_tail(&tic->t_queue, &head->waiters);
275 * Once a ticket gets put onto head->waiters, it will only return after the
283 * As tickets are only ever moved on and off head->waiters under head->lock, we
286 * head->waiters because the t_queue list head will be empty and we hold the
302 * If there are other waiters on the queue then give them a chance at in xlog_grant_head_check()
303 * logspace before us. Wake up the first waiters, if we do not wake in xlog_grant_head_check()
304 * up all the waiters the in xlog_grant_head_check()
[all...]
H A Dxfs_log_priv.h374 struct list_head waiters; member
/kernel/linux/linux-6.6/fs/xfs/
H A Dxfs_log.c204 INIT_LIST_HEAD(&head->waiters); in xlog_grant_head_init()
215 list_for_each_entry(tic, &head->waiters, t_queue) in xlog_grant_head_wake_all()
247 list_for_each_entry(tic, &head->waiters, t_queue) { in xlog_grant_head_wake()
294 list_add_tail(&tic->t_queue, &head->waiters);
325 * Once a ticket gets put onto head->waiters, it will only return after the
333 * As tickets are only ever moved on and off head->waiters under head->lock, we
336 * head->waiters because the t_queue list head will be empty and we hold the
352 * If there are other waiters on the queue then give them a chance at in xlog_grant_head_check()
353 * logspace before us. Wake up the first waiters, if we do not wake in xlog_grant_head_check()
354 * up all the waiters the in xlog_grant_head_check()
[all...]
H A Dxfs_log_priv.h389 struct list_head waiters; member
/kernel/linux/linux-5.10/kernel/futex/
H A Dcore.c62 * hash bucket lock. Then it looks for waiters on that futex in the hash
100 * waiters++; (a)
114 * schedule(); if (waiters)
117 * waiters--; (b) unlock(hash_bucket(futex));
119 * Where (A) orders the waiters increment and the futex value read through
121 * to futex and the waiters read (see hb_waiters_pending()).
123 * This yields the following case (where X:=waiters, Y:=futex):
139 * Similarly, in order to account for waiters being requeued on another
140 * address we always increment the waiters for the destination bucket before
236 atomic_t waiters; member
[all...]
/kernel/linux/linux-6.6/drivers/tee/optee/
H A Doptee_private.h51 struct list_head waiters; member
H A Dffa_abi.c595 * thread waiters wake up one. in optee_ffa_yielding_call()
854 INIT_LIST_HEAD(&optee->call_queue.waiters); in optee_ffa_probe()
H A Dsmc_abi.c961 * thread waiters wake up one. in optee_smc_do_call_with_arg()
1729 INIT_LIST_HEAD(&optee->call_queue.waiters); in optee_probe()
/kernel/linux/linux-6.6/include/trace/events/
H A Dmmflags.h114 DEF_PAGEFLAG_NAME(waiters), \
/kernel/linux/linux-5.10/kernel/trace/
H A Dring_buffer.c416 wait_queue_head_t waiters; member
827 * ring buffer waiters queue.
833 wake_up_all(&rbwork->waiters); in rb_wake_up_waiters()
842 * ring_buffer_wake_waiters - wake up any waiters on this ring buffer
843 * @buffer: The ring buffer to wake waiters on
846 * it is prudent to wake up any waiters that are on this.
866 /* make sure the waiters see the new index */ in ring_buffer_wake_waiters()
913 prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE); in ring_buffer_wait()
980 finish_wait(&work->waiters, &wait); in ring_buffer_wait()
1024 poll_wait(filp, &work->waiters, poll_tabl in ring_buffer_poll_wait()
[all...]
/kernel/linux/linux-6.6/kernel/trace/
H A Dring_buffer.c413 wait_queue_head_t waiters; member
898 * ring buffer waiters queue.
904 wake_up_all(&rbwork->waiters); in rb_wake_up_waiters()
913 * ring_buffer_wake_waiters - wake up any waiters on this ring buffer
914 * @buffer: The ring buffer to wake waiters on
915 * @cpu: The CPU buffer to wake waiters on
918 * it is prudent to wake up any waiters that are on this.
949 /* make sure the waiters see the new index */ in ring_buffer_wake_waiters()
996 prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE); in ring_buffer_wait()
1063 finish_wait(&work->waiters, in ring_buffer_wait()
[all...]
/kernel/linux/linux-6.6/kernel/futex/
H A Dcore.c668 * waiters it is killed. in handle_futex_death()
678 * potential waiters which can cause these waiters to block in handle_futex_death()
779 * and mark any locks found there dead, and notify any waiters.
873 * and mark any locks found there dead, and notify any waiters.
963 * versus waiters unqueueing themselves: in exit_pi_state_list()
1102 * Drop the exit protection. This unblocks waiters which observed in futex_cleanup_end()
1151 atomic_set(&futex_queues[i].waiters, 0); in futex_init()
/kernel/linux/linux-6.6/include/linux/
H A Dpage-flags.h108 PG_waiters, /* Page has waiters, check its waitqueue. Must be bit #7 and in the same byte as "PG_locked" */
475 PAGEFLAG(Waiters, waiters, PF_ONLY_HEAD)
H A Dsyscalls.h550 asmlinkage long sys_futex_waitv(struct futex_waitv *waiters,

Completed in 42 milliseconds

123