Lines Matching refs:poll

8 #include <linux/poll.h>
20 #include "poll.h"
38 /* output value, set only if arm poll returns >0 */
90 * arming poll and wakeups.
106 /* pure poll stashes this in ->async_data, poll driven retry elsewhere */
116 return &req->apoll->poll;
170 static void io_init_poll_iocb(struct io_poll *poll, __poll_t events)
172 poll->head = NULL;
175 poll->events = events | IO_POLL_UNMASK;
176 INIT_LIST_HEAD(&poll->wait.entry);
177 init_waitqueue_func_entry(&poll->wait, io_poll_wake);
180 static inline void io_poll_remove_entry(struct io_poll *poll)
182 struct wait_queue_head *head = smp_load_acquire(&poll->head);
186 list_del_init(&poll->wait.entry);
187 poll->head = NULL;
196 * into the poll/apoll/double cachelines if we can.
248 * All poll tw should go through this. Checks for poll events, manages
255 * poll and that the result is stored in req->cqe.
363 struct io_poll *poll;
365 poll = io_kiocb_to_cmd(req, struct io_poll);
366 req->cqe.res = mangle_poll(req->cqe.res & poll->events);
398 static __cold int io_pollfree_wake(struct io_kiocb *req, struct io_poll *poll)
411 list_del_init(&poll->wait.entry);
419 smp_store_release(&poll->head, NULL);
427 struct io_poll *poll = container_of(wait, struct io_poll, wait);
431 return io_pollfree_wake(req, poll);
434 if (mask && !(mask & (poll->events & ~IO_ASYNC_POLL_COMMON)))
439 * If we trigger a multishot poll off our own wakeup path,
444 poll->events |= EPOLLONESHOT;
447 if (mask && poll->events & EPOLLONESHOT) {
448 list_del_init(&poll->wait.entry);
449 poll->head = NULL;
464 struct io_poll *poll = io_poll_get_single(req);
468 head = smp_load_acquire(&poll->head);
470 * poll arm might not hold ownership and so race for req->flags with
471 * io_poll_wake(). There is only one poll entry queued, serialise with
486 static void __io_queue_proc(struct io_poll *poll, struct io_poll_table *pt,
494 * The file being polled uses multiple waitqueues for poll handling
499 struct io_poll *first = poll;
512 poll = kmalloc(sizeof(*poll), GFP_ATOMIC);
513 if (!poll) {
520 io_init_poll_iocb(poll, first->events);
523 kfree(poll);
526 *poll_ptr = poll;
528 /* fine to modify, there is no poll queued to race with us */
533 poll->head = head;
534 poll->wait.private = (void *) wqe_private;
536 if (poll->events & EPOLLEXCLUSIVE)
537 add_wait_queue_exclusive(head, &poll->wait);
539 add_wait_queue(head, &poll->wait);
546 struct io_poll *poll = io_kiocb_to_cmd(pt->req, struct io_poll);
548 __io_queue_proc(poll, pt, head,
573 struct io_poll *poll,
581 io_init_poll_iocb(poll, mask);
582 poll->file = req->file;
583 req->apoll_events = poll->events;
607 mask = vfs_poll(req->file, &ipt->pt) & poll->events;
615 } else if (mask && (poll->events & EPOLLET)) {
623 ((poll->events & (EPOLLET|EPOLLONESHOT)) == (EPOLLET|EPOLLONESHOT))) {
636 if (mask && (poll->events & EPOLLET) &&
645 * poll was waken up, queue up a tw, it'll deal with it.
659 __io_queue_proc(&apoll->poll, pt, head, &apoll->double_poll);
663 * We can't reliably detect loops in repeated poll triggers and issue
685 apoll->poll.retries = APOLL_MAX_RETRY;
691 apoll->poll.retries = APOLL_MAX_RETRY;
695 if (unlikely(!--apoll->poll.retries))
742 ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask, issue_flags);
745 trace_io_uring_poll_arm(req, mask, apoll->poll.events);
776 * Returns true if we found and killed one or more poll requests
938 struct io_poll *poll = io_kiocb_to_cmd(req, struct io_poll);
949 poll->events = io_poll_parse_events(sqe, flags);
955 struct io_poll *poll = io_kiocb_to_cmd(req, struct io_poll);
968 ret = __io_arm_poll_handler(req, poll, &ipt, poll->events, issue_flags);
1015 struct io_poll *poll = io_kiocb_to_cmd(preq, struct io_poll);
1017 poll->events &= ~0xffff;
1018 poll->events |= poll_update->events & 0xffff;
1019 poll->events |= IO_POLL_UNMASK;
1025 /* successfully updated, don't complete poll request */