Lines Matching defs:iowq
1914 /* either cancelled or io-wq is dying, so don't touch tctx->iowq */
2450 static inline bool io_should_wake(struct io_wait_queue *iowq)
2452 struct io_ring_ctx *ctx = iowq->ctx;
2453 int dist = READ_ONCE(ctx->rings->cq.tail) - (int) iowq->cq_tail;
2460 return dist >= 0 || atomic_read(&ctx->cq_timeouts) != iowq->nr_timeouts;
2466 struct io_wait_queue *iowq = container_of(curr, struct io_wait_queue, wq);
2472 if (io_should_wake(iowq) || io_has_work(iowq->ctx))
2502 struct io_wait_queue *iowq)
2514 if (unlikely(io_should_wake(iowq)))
2525 if (iowq->timeout == KTIME_MAX)
2527 else if (!schedule_hrtimeout(&iowq->timeout, HRTIMER_MODE_ABS))
2541 struct io_wait_queue iowq;
2568 init_waitqueue_func_entry(&iowq.wq, io_wake_function);
2569 iowq.wq.private = current;
2570 INIT_LIST_HEAD(&iowq.wq.entry);
2571 iowq.ctx = ctx;
2572 iowq.nr_timeouts = atomic_read(&ctx->cq_timeouts);
2573 iowq.cq_tail = READ_ONCE(ctx->rings->cq.head) + min_events;
2574 iowq.timeout = KTIME_MAX;
2581 iowq.timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns());
2586 int nr_wait = (int) iowq.cq_tail - READ_ONCE(ctx->rings->cq.tail);
2593 prepare_to_wait_exclusive(&ctx->cq_wait, &iowq.wq,
2597 ret = io_cqring_wait_schedule(ctx, &iowq);
2633 if (io_should_wake(&iowq)) {
2641 finish_wait(&ctx->cq_wait, &iowq.wq);