Lines Matching defs:loop

294 static int uv__inotify_fork(uv_loop_t* loop, struct watcher_list* root);
295 static void uv__inotify_read(uv_loop_t* loop,
301 uv_loop_t* loop);
317 static struct watcher_root* uv__inotify_watchers(uv_loop_t* loop) {
323 return (struct watcher_root*) &loop->inotify_watchers;
660 int uv__platform_loop_init(uv_loop_t* loop) {
663 lfields = uv__get_internal_fields(loop);
667 loop->inotify_watchers = NULL;
668 loop->inotify_fd = -1;
669 loop->backend_fd = epoll_create1(O_CLOEXEC);
671 fdsan_exchange_owner_tag(loop->backend_fd, 0, uv__get_addr_tag((void *)&loop->backend_fd));
673 if (loop->backend_fd == -1)
676 uv__iou_init(loop->backend_fd, &lfields->iou, 64, UV__IORING_SETUP_SQPOLL);
677 uv__iou_init(loop->backend_fd, &lfields->ctl, 256, 0);
678 UV_LOGI("init:%{public}zu, backend_fd:%{public}d", (size_t)loop, loop->backend_fd);
683 int uv__io_fork(uv_loop_t* loop) {
687 root = uv__inotify_watchers(loop)->rbh_root;
689 fdsan_close_with_tag(loop->backend_fd, uv__get_addr_tag((void *)&loop->backend_fd));
691 uv__close(loop->backend_fd);
693 loop->backend_fd = -1;
696 uv__platform_loop_delete(loop);
698 err = uv__platform_loop_init(loop);
702 return uv__inotify_fork(loop, root);
706 void uv__platform_loop_delete(uv_loop_t* loop) {
709 lfields = uv__get_internal_fields(loop);
713 if (loop->inotify_fd != -1) {
714 uv__io_stop(loop, &loop->inotify_read_watcher, POLLIN);
715 uv__close(loop->inotify_fd);
716 loop->inotify_fd = -1;
728 void uv__platform_invalidate_fd(uv_loop_t* loop, int fd) {
734 lfields = uv__get_internal_fields(loop);
755 uv__epoll_ctl(loop->backend_fd, EPOLL_CTL_DEL, fd, &dummy);
757 uv__epoll_ctl_prep(loop->backend_fd,
767 int uv__io_check_fd(uv_loop_t* loop, int fd) {
776 if (uv__epoll_ctl(loop->backend_fd, EPOLL_CTL_ADD, fd, &e))
781 if (uv__epoll_ctl(loop->backend_fd, EPOLL_CTL_DEL, fd, &e))
790 uv_loop_t* loop,
816 req->work_req.loop = loop;
821 uv__req_register(loop, req);
845 int uv__iou_fs_close(uv_loop_t* loop, uv_fs_t* req) {
869 iou = &uv__get_internal_fields(loop)->iou;
871 sqe = uv__iou_get_sqe(iou, loop, req);
884 int uv__iou_fs_fsync_or_fdatasync(uv_loop_t* loop,
890 iou = &uv__get_internal_fields(loop)->iou;
892 sqe = uv__iou_get_sqe(iou, loop, req);
909 int uv__iou_fs_link(uv_loop_t* loop, uv_fs_t* req) {
913 iou = &uv__get_internal_fields(loop)->iou;
918 sqe = uv__iou_get_sqe(iou, loop, req);
934 int uv__iou_fs_mkdir(uv_loop_t* loop, uv_fs_t* req) {
938 iou = &uv__get_internal_fields(loop)->iou;
943 sqe = uv__iou_get_sqe(iou, loop, req);
958 int uv__iou_fs_open(uv_loop_t* loop, uv_fs_t* req) {
962 iou = &uv__get_internal_fields(loop)->iou;
964 sqe = uv__iou_get_sqe(iou, loop, req);
980 int uv__iou_fs_rename(uv_loop_t* loop, uv_fs_t* req) {
984 iou = &uv__get_internal_fields(loop)->iou;
986 sqe = uv__iou_get_sqe(iou, loop, req);
1002 int uv__iou_fs_symlink(uv_loop_t* loop, uv_fs_t* req) {
1006 iou = &uv__get_internal_fields(loop)->iou;
1011 sqe = uv__iou_get_sqe(iou, loop, req);
1026 int uv__iou_fs_unlink(uv_loop_t* loop, uv_fs_t* req) {
1030 iou = &uv__get_internal_fields(loop)->iou;
1032 sqe = uv__iou_get_sqe(iou, loop, req);
1046 int uv__iou_fs_read_or_write(uv_loop_t* loop,
1061 iou = &uv__get_internal_fields(loop)->iou;
1063 sqe = uv__iou_get_sqe(iou, loop, req);
1079 int uv__iou_fs_statx(uv_loop_t* loop,
1091 iou = &uv__get_internal_fields(loop)->iou;
1093 sqe = uv__iou_get_sqe(iou, loop, req);
1164 static void uv__poll_io_uring(uv_loop_t* loop, struct uv__iou* iou) {
1189 uv__req_unregister(loop, req);
1194 uv__fs_post(loop, req);
1211 uv__metrics_update_idle_time(loop);
1221 * available. Don't grab them immediately but in the next loop iteration to
1222 * avoid loop starvation. */
1235 uv__metrics_inc_events(loop, nevents);
1236 if (uv__get_internal_fields(loop)->current_timeout == 0)
1237 uv__metrics_inc_events_waiting(loop, nevents);
1381 void uv__io_poll(uv_loop_t* loop, int timeout) {
1408 lfields = uv__get_internal_fields(loop);
1413 if (loop->flags & UV_LOOP_BLOCK_SIGPROF) {
1420 base = loop->time;
1433 epollfd = loop->backend_fd;
1437 while (!uv__queue_empty(&loop->watcher_queue)) {
1438 q = uv__queue_head(&loop->watcher_queue);
1459 if (loop->nfds == 0)
1471 * will return early if the loop isn't configured with UV_METRICS_IDLE_TIME.
1474 uv__metrics_set_provider_entry_time(loop);
1491 /* Update loop->time unconditionally. It's tempting to skip the update when
1495 SAVE_ERRNO(uv__update_time(loop));
1531 uv__poll_io_uring(loop, iou);
1538 assert((unsigned) fd < loop->nwatchers);
1540 if (fd < 0 || (unsigned) fd >= loop->nwatchers)
1544 w = loop->watchers[fd];
1557 * callbacks when previous callback invocation in this loop has stopped
1564 * EPOLLERR or EPOLLHUP event. In order to force the event loop to
1586 if (w == &loop->signal_io_watcher) {
1589 uv__metrics_update_idle_time(loop);
1590 w->cb(loop, w, pe->events);
1597 uv__metrics_inc_events(loop, nevents);
1601 uv__metrics_inc_events_waiting(loop, nevents);
1605 uv__metrics_update_idle_time(loop);
1606 loop->signal_io_watcher.cb(loop, &loop->signal_io_watcher, POLLIN);
1612 break; /* Event loop should cycle now so don't poll again. */
1615 break; /* Event loop should cycle now so don't poll again. */
1635 real_timeout -= (loop->time - base);
2360 static int init_inotify(uv_loop_t* loop) {
2363 if (loop->inotify_fd != -1)
2370 loop->inotify_fd = fd;
2371 uv__io_init(&loop->inotify_read_watcher, uv__inotify_read, loop->inotify_fd);
2372 uv__io_start(loop, &loop->inotify_read_watcher, POLLIN);
2378 static int uv__inotify_fork(uv_loop_t* loop, struct watcher_list* root) {
2395 loop->inotify_watchers = root;
2404 uv__inotify_watchers(loop), tmp_watcher_list_iter) {
2424 maybe_free_watcher_list(watcher_list, loop);
2444 static struct watcher_list* find_watcher(uv_loop_t* loop, int wd) {
2447 return RB_FIND(watcher_root, uv__inotify_watchers(loop), &w);
2451 static void maybe_free_watcher_list(struct watcher_list* w, uv_loop_t* loop) {
2455 RB_REMOVE(watcher_root, uv__inotify_watchers(loop), w);
2456 inotify_rm_watch(loop->inotify_fd, w->wd);
2462 static void uv__inotify_read(uv_loop_t* loop,
2478 size = read(loop->inotify_fd, buf, sizeof(buf));
2498 w = find_watcher(loop, e->wd);
2532 maybe_free_watcher_list(w, loop);
2538 int uv_fs_event_init(uv_loop_t* loop, uv_fs_event_t* handle) {
2539 uv__handle_init(loop, (uv_handle_t*)handle, UV_FS_EVENT);
2549 uv_loop_t* loop;
2558 loop = handle->loop;
2560 err = init_inotify(loop);
2573 wd = inotify_add_watch(loop->inotify_fd, path, events);
2577 w = find_watcher(loop, wd);
2590 RB_INSERT(watcher_root, uv__inotify_watchers(loop), w);
2609 w = find_watcher(handle->loop, handle->wd);
2617 maybe_free_watcher_list(w, handle->loop);