Lines Matching refs:events
134 struct kevent events[1024];
175 if ((w->events & POLLIN) == 0 && (w->pevents & POLLIN) != 0) {
187 EV_SET(events + nevents, w->fd, filter, op, fflags, 0, 0);
189 if (++nevents == ARRAY_SIZE(events)) {
190 if (kevent(loop->backend_fd, events, nevents, NULL, 0, NULL))
196 if ((w->events & POLLOUT) == 0 && (w->pevents & POLLOUT) != 0) {
197 EV_SET(events + nevents, w->fd, EVFILT_WRITE, EV_ADD, 0, 0, 0);
199 if (++nevents == ARRAY_SIZE(events)) {
200 if (kevent(loop->backend_fd, events, nevents, NULL, 0, NULL))
206 if ((w->events & UV__POLLPRI) == 0 && (w->pevents & UV__POLLPRI) != 0) {
207 EV_SET(events + nevents, w->fd, EV_OOBAND, EV_ADD, 0, 0, 0);
209 if (++nevents == ARRAY_SIZE(events)) {
210 if (kevent(loop->backend_fd, events, nevents, NULL, 0, NULL))
216 w->events = w->pevents;
255 * of events in the callback were waiting when poll was called.
260 events,
262 events,
263 ARRAY_SIZE(events),
269 /* Unlimited timeout should only return with events or signal. */
306 loop->watchers[loop->nwatchers] = (void*) events;
309 ev = events + i;
326 /* Skip invalidated events, see uv__platform_invalidate_fd */
338 assert(w->events == POLLIN);
415 if (nfds == ARRAY_SIZE(events) && --count != 0) {
416 /* Poll for more events but don't block this time. */
442 struct kevent* events;
449 events = (struct kevent*) loop->watchers[loop->nwatchers];
451 if (events == NULL)
454 /* Invalidate events with same file descriptor */
456 if ((int) events[i].ident == fd && events[i].filter != EVFILT_PROC)
457 events[i].ident = -1;
464 int events;
474 events = UV_CHANGE;
476 events = UV_RENAME;
507 handle->cb(handle, path, events, 0);