Lines Matching defs:wait

287 	struct wait_queue_head	wait;
468 struct wait_queue_head wait;
490 struct wait_queue_entry wait;
918 /* set if opcode supports polled "wait" */
1196 /* already at zero, wait for ->release() */
1650 if (ctx->sq_data && waitqueue_active(&ctx->sq_data->wait))
1651 wake_up(&ctx->sq_data->wait);
1745 wake_up(&tctx->wait);
2593 * We can't just wait for polled events to come to us, we have to actively
2899 wq_has_sleeper(&ctx->sq_data->wait))
2900 wake_up(&ctx->sq_data->wait);
3520 static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode,
3524 struct io_kiocb *req = wait->private;
3527 wpq = container_of(wait, struct wait_page_queue, wait);
3533 list_del_init(&wait->entry);
3553 struct wait_page_queue *wait = &rw->wpq;
3571 wait->wait.func = io_async_buf_func;
3572 wait->wait.private = req;
3573 wait->wait.flags = 0;
3574 INIT_LIST_HEAD(&wait->wait.entry);
3577 kiocb->ki_waitq = wait;
5427 INIT_LIST_HEAD(&poll->wait.entry);
5428 init_waitqueue_func_entry(&poll->wait, wake_func);
5437 list_del_init(&poll->wait.entry);
5450 * wake_up_pollfree() will wait for us. However, taking the waitqueue
5619 static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
5622 struct io_kiocb *req = wait->private;
5623 struct io_poll_iocb *poll = container_of(wait, struct io_poll_iocb,
5624 wait);
5639 list_del_init(&poll->wait.entry);
5699 io_init_poll_iocb(poll, first->events, first->wait.func);
5705 poll->wait.private = req;
5708 add_wait_queue_exclusive(head, &poll->wait);
5710 add_wait_queue(head, &poll->wait);
5731 poll->wait.private = req;
6904 * wait for request slots on the block side.
6911 * If REQ_F_NOWAIT is set, then don't wait or retry with
7519 DEFINE_WAIT(wait);
7557 prepare_to_wait(&sqd->wait, &wait, TASK_INTERRUPTIBLE);
7584 finish_wait(&sqd->wait, &wait);
7735 /* if we can't even flush overflow, don't wait for more */
7907 /* wait for all works potentially completing data->done */
8156 init_waitqueue_head(&sqd->wait);
8651 init_waitqueue_head(&hash->wait);
8692 init_waitqueue_head(&tctx->wait);
9370 /* __io_rsrc_put_work() may need uring_lock to progress, wait w/o it */
9423 static __poll_t io_uring_poll(struct file *file, poll_table *wait)
9428 poll_wait(file, &ctx->poll_wait, wait);
9533 * This is really an uninterruptible wait, as it has to be
9536 * avoids scenarios where we knowingly can wait much longer
9571 * wait is marked as interruptible.
9873 DEFINE_WAIT(wait);
9907 prepare_to_wait(&tctx->wait, &wait, TASK_INTERRUPTIBLE);
9918 finish_wait(&tctx->wait, &wait);
10010 DEFINE_WAIT(wait);
10015 prepare_to_wait(&ctx->sqo_sq_wait, &wait, TASK_INTERRUPTIBLE);
10022 finish_wait(&ctx->sqo_sq_wait, &wait);
10104 wake_up(&ctx->sq_data->wait);
10661 if (ctx->sq_data && wq_has_sleeper(&ctx->sq_data->wait))
10662 wake_up(&ctx->sq_data->wait);
10910 * wait, then we can deadlock. It's safe to drop the mutex here, since