Lines Matching defs:events

84 #include <trace/events/io_uring.h>
148 * application (i.e. get number of "new events" by comparing to
174 * Number of completion events lost because the queue was full;
180 * application (i.e. get number of "new events" by comparing to
183 * As completion events come in out of order this counter is not
188 * Ring buffer of completion events.
190 * The kernel writes completion events fresh every time they are
488 __poll_t events;
497 __poll_t events;
1554 * Assuming not more than 2^31-1 events have happened since,
2593 * We can't just wait for polled events to come to us, we have to actively
2612 * in this case we need to ensure that we reap all events.
2636 * Don't enter poll loop if we already have events pending.
5420 static void io_init_poll_iocb(struct io_poll_iocb *poll, __poll_t events,
5425 /* mask in events that we always want/need */
5426 poll->events = events | IO_POLL_UNMASK;
5471 * All poll tw should go through this. Checks for poll events, manages
5497 * cqe.res contains only events of the first wake up
5506 * We won't find new events that came in between
5515 struct poll_table_struct pt = { ._key = poll->events };
5517 req->result = vfs_poll(req->file, &pt) & poll->events;
5521 if (req->result && !(poll->events & EPOLLONESHOT)) {
5522 __poll_t mask = mangle_poll(req->result & poll->events);
5560 req->result = mangle_poll(req->result & req->poll.events);
5652 if (mask && !(mask & poll->events))
5662 poll->events |= EPOLLONESHOT;
5699 io_init_poll_iocb(poll, first->events, first->wait.func);
5707 if (poll->events & EPOLLEXCLUSIVE)
5743 mask = vfs_poll(req->file, &ipt->pt) & poll->events;
5745 if (mask && (poll->events & EPOLLONESHOT)) {
5764 poll->events |= EPOLLONESHOT;
5851 mask, apoll->poll.events);
5926 u32 events;
5928 events = READ_ONCE(sqe->poll32_events);
5930 events = swahw32(events);
5933 events |= EPOLLONESHOT;
5934 return demangle_poll(events) | (events & (EPOLLEXCLUSIVE|EPOLLONESHOT));
5963 upd->events = io_poll_parse_events(sqe, flags);
5984 poll->events = io_poll_parse_events(sqe, flags);
5996 ret = __io_arm_poll_handler(req, &req->poll, &ipt, poll->events);
6025 preq->poll.events &= ~0xffff;
6026 preq->poll.events |= req->poll_update.events & 0xffff;
6027 preq->poll.events |= IO_POLL_UNMASK;
6304 * sqe->off holds how many events that need to occur for this
7612 * Wake up if we have enough events, or if a timeout occurred since we
7685 * Wait until events become available, if we don't already have some. The
10134 * space applications don't need to do io completion events