Lines Matching refs:wqh
32 wait_queue_head_t wqh;
36 * wakeup is performed on "wqh". If EFD_SEMAPHORE flag was not
61 spin_lock_irqsave(&ctx->wqh.lock, flags);
66 if (waitqueue_active(&ctx->wqh))
67 wake_up_locked_poll(&ctx->wqh, EPOLLIN | mask);
69 spin_unlock_irqrestore(&ctx->wqh.lock, flags);
125 wake_up_poll(&ctx->wqh, EPOLLHUP);
136 poll_wait(file, &ctx->wqh, wait);
139 * All writes to ctx->count occur within ctx->wqh.lock. This read
140 * can be done outside ctx->wqh.lock because we know that poll_wait
150 * lock ctx->wqh.lock (in poll_wait)
153 * unlock ctx->wqh.lock
171 * lock ctx->wqh.lock (in poll_wait)
173 * unlock ctx->wqh.lock
190 lockdep_assert_held(&ctx->wqh.lock);
215 spin_lock_irqsave(&ctx->wqh.lock, flags);
217 __remove_wait_queue(&ctx->wqh, wait);
218 if (*cnt != 0 && waitqueue_active(&ctx->wqh))
219 wake_up_locked_poll(&ctx->wqh, EPOLLOUT);
220 spin_unlock_irqrestore(&ctx->wqh.lock, flags);
234 spin_lock_irq(&ctx->wqh.lock);
238 spin_unlock_irq(&ctx->wqh.lock);
242 if (wait_event_interruptible_locked_irq(ctx->wqh, ctx->count)) {
243 spin_unlock_irq(&ctx->wqh.lock);
249 if (waitqueue_active(&ctx->wqh))
250 wake_up_locked_poll(&ctx->wqh, EPOLLOUT);
252 spin_unlock_irq(&ctx->wqh.lock);
272 spin_lock_irq(&ctx->wqh.lock);
277 res = wait_event_interruptible_locked_irq(ctx->wqh,
285 if (waitqueue_active(&ctx->wqh))
286 wake_up_locked_poll(&ctx->wqh, EPOLLIN);
289 spin_unlock_irq(&ctx->wqh.lock);
299 spin_lock_irq(&ctx->wqh.lock);
302 spin_unlock_irq(&ctx->wqh.lock);
407 init_waitqueue_head(&ctx->wqh);